repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
spoatacus/useful-scripts | https://github.com/spoatacus/useful-scripts | 54c99905fcc76b608c17d740fde8756a41f3b68f | 2c07bf657de2f4ff798969e0c6ae5c4399c34fe3 | 9889b7095e5a2cefcdfc77dd707e61ee81919a86 | refs/heads/master | 2021-01-19T20:16:17.202961 | 2012-12-04T18:14:48 | 2012-12-04T18:14:48 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6546275615692139,
"alphanum_fraction": 0.6719337701797485,
"avg_line_length": 24.55769157409668,
"blob_id": "6de353e1f82bc63ec92fd57b56cce63806169602",
"content_id": "e7df5b89160d209f818ff8b561ae0d5f1df97516",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1329,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 52,
"path": "/csv-diff.py",
"repo_name": "spoatacus/useful-scripts",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport argparse\nimport csv\n\nparser = argparse.ArgumentParser(description=\"Removes items in 'other' from 'set' based on a 'key' column.\")\nparser.add_argument('set', type=str, help='File that contains primary set')\nparser.add_argument('other', type=str, help='File that contains set to be removed')\nparser.add_argument('set_key', type=int, help='Column to use as key in primary set')\nparser.add_argument('other_key', type=int, help='Column to use as key in other set')\nparser.add_argument('output', type=str, help='File to put results in')\nargs = parser.parse_args()\n\nwith open( args.set, 'r' ) as f1, open( args.other, 'r' ) as f2:\n\tcsv1 = csv.reader( f1 )\n\tcsv2 = csv.reader( f2 )\n\n\t# skip header row\n\theader = csv1.next()\n\tcsv2.next()\n\n\tdict1 = {}\n\tdict2 = {}\n\n\tfor row in csv1:\n\t\ttry:\n\t\t\tkey = row[args.set_key]\n\t\t\tdict1[key] = row\n\t\texcept IndexError:\n\t\t\tprint( row )\n\n\tfor row in csv2:\n\t\tdict2[row[args.other_key]] = row\n\n\n\tprint( 'len set: ', len(dict1) )\n\tprint( 'len other: ', len(dict2) )\n\n\n\tkey1 = dict1.viewkeys()\n\tkey2 = dict2.viewkeys()\n\tdiff = key1 - key2\n\n\tprint( 'len diff: ', len(diff) )\n\n\t\n\twith open( args.output, 'w' ) as fout:\n\t\tcsvWriter = csv.writer( fout, quoting=csv.QUOTE_ALL )\n\n\t\tcsvWriter.writerow( header )\n\n\t\tfor key in diff:\n\t\t\tcsvWriter.writerow( dict1[key] )\n"
},
{
"alpha_fraction": 0.6440678238868713,
"alphanum_fraction": 0.6440678238868713,
"avg_line_length": 14,
"blob_id": "1033badfc86ebf343048b2a7bbcbac47176300fc",
"content_id": "87c5d1ca8f84adb44f377089340f799b97a858ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 59,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 4,
"path": "/README.md",
"repo_name": "spoatacus/useful-scripts",
"src_encoding": "UTF-8",
"text": "useful-scripts\n==============\n\nCollection of random scripts"
},
{
"alpha_fraction": 0.695770800113678,
"alphanum_fraction": 0.695770800113678,
"avg_line_length": 28.31999969482422,
"blob_id": "d9182d102b09e842742515fb3af608d42e89aa19",
"content_id": "20b96f1a18826280e87e798e26e6392aa53c3eb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 733,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 25,
"path": "/col2list.py",
"repo_name": "spoatacus/useful-scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\nimport argparse\nimport csv\n\nparser = argparse.ArgumentParser(description='Convert a column of data to a comma separated list.')\nparser.add_argument('input', type=str, help='File to get data from')\nparser.add_argument('col', type=int, help='Column number to convert')\nparser.add_argument('--output', type=str, help='File to dump list to')\nparser.add_argument('--sep', type=str, default='\\t', help='Separater to split columns on')\nargs = parser.parse_args()\n\nwith open(args.input, 'rb') as f:\n\treader = csv.reader( f, delimiter=args.sep )\n\n\tdata = []\n\n\tfor row in reader:\n\t\tdata.append( row[args.col].strip() )\n\n\n\tprint( ','.join(data) )\n\n\tif args.output:\n\t\twith open(args.output, 'w') as fout:\n\t\t\tfout.write( data )\n"
}
] | 3 |
CSAIL-LivingLab/location-reporter | https://github.com/CSAIL-LivingLab/location-reporter | 30df10817b51fdd88d4667f928e01cc0e2440d63 | 36d54ffdb7c2ca987461d12ae627499b3f49a73e | 08473779f3bca2e4fcd6a790b8c5eabccf41fd77 | refs/heads/master | 2021-01-12T13:20:43.385163 | 2016-05-04T22:08:46 | 2016-05-04T22:08:46 | 58,085,989 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6330556869506836,
"alphanum_fraction": 0.6359784007072449,
"avg_line_length": 35.790321350097656,
"blob_id": "38ca43837a10fe64a7cf30f373f2a864188cf8b5",
"content_id": "24f63d45ee2b3ea9f3552304bb3130f0de1c3ca7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6843,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 186,
"path": "/location_app/views.py",
"repo_name": "CSAIL-LivingLab/location-reporter",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse, \\\n HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.template.context import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.http import require_POST\n\nimport requests\nimport re\n\n\napi_base_url = 'https://datahub.csail.mit.edu'\nour_base_url = 'http://localhost:8000'\nclient_id = 'client_id'\nclient_secret = ('foo'\n 'bar'\n 'baz')\nowner_username = 'jander'\nowner_password = 'foo'\n\n\ndef _authorization_url():\n url_format = ('{base}/oauth2/authorize?'\n 'response_type=code&'\n 'scope=read+write&'\n 'client_id={client_id}&'\n 'redirect_uri={redirect_uri}')\n return url_format.format(\n base=api_base_url,\n client_id=client_id,\n redirect_uri=our_base_url)\n\n\ndef _exchange_code_for_token(code):\n token_url = '{base}/oauth2/token/'.format(base=api_base_url)\n\n response = requests.post(token_url, data={\n 'code': code,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'redirect_uri': our_base_url,\n 'grant_type': 'authorization_code',\n })\n\n return response.json()['access_token']\n\n\ndef _update_username_for_current_session(request):\n access_token = request.session['access_token']\n user_url = '{base}/api/v1/user'.format(base=api_base_url)\n # Act as the user to find out what their username is.\n headers = {\n 'Authorization': 'Bearer {token}'.format(token=access_token)}\n response = requests.get(user_url, headers=headers)\n if response.status_code == 200 and 'username' in response.json():\n username = response.json()['username']\n request.session['username'] = username\n return username\n\n\ndef _post_query(request, query):\n user_url = '{base}/api/v1/query/{repo_base}'.format(\n base=api_base_url, repo_base=owner_username)\n # Run queries as the app owner\n headers = {\n 'Authorization': 'Bearer {token}'.format(\n token=request.session['owner_access_token'])}\n requests.post(user_url, headers=headers, data={'query': query})\n\n\ndef _form_input_value(text, name):\n regexp = \"name='{}' value='([^']+)'\"\n result = re.search(regexp.format(name), text)\n if not result:\n regexp = \"name=\\\"{}\\\" type=\\\"hidden\\\" value=\\\"([^\\\"]+)\\\"\"\n result = re.search(regexp.format(name), text)\n if not result:\n regexp = \"name=\\\"{}\\\" value=\\\"([^\\\"]+)\\\"\"\n result = re.search(regexp.format(name), text)\n return result.groups()[0]\n\n\ndef _repo_owner_access_token(request):\n # Use existing owner token if one already exists.\n # if 'owner_access_token' in request.session:\n # return request.session['owner_access_token']\n # Otherwise, do the authorization dance server-side.\n session = requests.Session()\n # Go to the authorization page, get redirected to log in.\n response = session.get(_authorization_url())\n # Pick out the login form's values and post them along with the owner's\n # credentials.\n csrf_token = _form_input_value(response.text, 'csrfmiddlewaretoken')\n next_url = _form_input_value(response.text, 'next')\n\n headers = {'Referer': '{base}/'.format(base=api_base_url)}\n form_url = '{base}/account/login'.format(base=api_base_url)\n response = session.post(form_url, headers=headers, data={\n 'csrfmiddlewaretoken': csrf_token,\n 'next': next_url,\n 'username': owner_username,\n 'password': owner_password,\n })\n\n # Get redirected to the authorization form, pick out its values and post\n # those to approve self.\n csrf_token = _form_input_value(response.text, 'csrfmiddlewaretoken')\n redirect_uri = _form_input_value(response.text, 'redirect_uri')\n scope = _form_input_value(response.text, 'scope')\n cid = _form_input_value(response.text, 'client_id')\n response_type = _form_input_value(response.text, 'response_type')\n\n form_url = \"{base}/oauth2/authorize/\".format(base=api_base_url)\n response = session.post(form_url, headers=headers, data={\n 'csrfmiddlewaretoken': csrf_token,\n 'redirect_uri': redirect_uri,\n 'scope': scope,\n 'client_id': cid,\n 'state': \"\",\n 'response_type': response_type,\n 'allow': \"Authorize\",\n }, allow_redirects=False)\n # Grab the code from the redirect URL\n # e.g. 'http://localhost:8000?code=VubcXxXEJrdtrycMUU1J0IWJbTHJfG'\n code = response.headers['location'].partition(\"?code=\")[-1]\n # Get the owner access token\n token = _exchange_code_for_token(code)\n request.session['owner_access_token'] = token\n return token\n\n\ndef home(request):\n context = RequestContext(request, {\n 'authorize_url': _authorization_url(),\n })\n\n # If this is a redirect from DataHub after being authorized by the user,\n # use the given code to get an access token from DataHub.\n if 'code' in request.GET:\n code = request.GET['code']\n access_token = _exchange_code_for_token(code)\n # Save the token in this session for later reuse.\n request.session['access_token'] = access_token\n # Redirect to this same page, minus the OAuth query parameters.\n return HttpResponseRedirect(reverse('home-page'))\n\n if 'access_token' in request.session:\n username = _update_username_for_current_session(request)\n _give_user_access_to_table(request, username)\n context.update({'username': username})\n\n return render_to_response('index.html', context)\n\n\ndef logout(request):\n request.session.flush()\n return HttpResponseRedirect(reverse('home-page'))\n\n\n@require_POST\ndef mark(request):\n # Matches a table created with:\n # CREATE TABLE location.locations (username varchar, latitude float,\n # longitude float, timestamp timestamp);\n query = (\"INSERT INTO {repo}.{table} (username, latitude, \"\n \"longitude, timestamp) \"\n \"VALUES ('{username}', '{latitude}', \"\n \"'{longitude}', '{timestamp}')\").format(\n repo=\"location\",\n table=\"locations\",\n username=request.session['username'],\n latitude=request.POST['latitude'],\n longitude=request.POST['longitude'],\n timestamp=request.POST['timestamp'])\n print(query)\n _post_query(request, query)\n return HttpResponse(\"Thanks!\")\n\n\ndef _give_user_access_to_table(request, username):\n # Get the access token that lets us modify policies.\n owner_access_token = _repo_owner_access_token(request)\n print(owner_access_token)\n # Add a policy for this user if they don't already have one.\n # query = \"INSERT INTO repo.policy_table VALUES (policy)\"\n # _post_query(request, query)\n"
},
{
"alpha_fraction": 0.46666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 14,
"blob_id": "0e4ba0c515562d2c45003b549c318004d4527e79",
"content_id": "a37f25c3f1bacf243dbdeff63e245926bcce4714",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 30,
"license_type": "permissive",
"max_line_length": 15,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "CSAIL-LivingLab/location-reporter",
"src_encoding": "UTF-8",
"text": "Django==1.8.2\nrequests==2.7.0\n"
}
] | 2 |
lilianluong/multitask-card-games | https://github.com/lilianluong/multitask-card-games | 61793d05cdda1c8d7fdc0d4f2fc3aa5c79194a7e | ae32e85583c61cc27a44946a6b5fa7c1e2c152ff | 92ecc03ca56dbed91e7b803b41bb2b9625503ca8 | refs/heads/main | 2023-01-29T20:34:25.123311 | 2020-12-02T09:18:05 | 2020-12-08T10:27:05 | 302,153,908 | 1 | 0 | MIT | 2020-10-07T20:35:16 | 2020-12-01T13:12:52 | 2020-12-03T04:51:58 | Python | [
{
"alpha_fraction": 0.5313479900360107,
"alphanum_fraction": 0.565047025680542,
"avg_line_length": 28.674419403076172,
"blob_id": "dbf4e8bfe4730f3f4d6de1b4bcf524277a9c8df5",
"content_id": "d6cdad8928a4ddf6f8bf81d921de4f8d402eedc3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1276,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 43,
"path": "/test/test_spades.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\n# Created by Patrick Kao\nimport unittest\nfrom typing import Tuple\n\nfrom agents.random_agent import RandomAgent\nfrom environments.spades import Spades\nfrom game import Game\nfrom util import Suit\n\n\nclass MiniSpades(Spades):\n @property\n def cards_per_suit(self) -> Tuple[int, ...]:\n return 2, 2, 2, 2\n\n\nclass OhHellTest(unittest.TestCase):\n def test_game(self):\n game = Game(MiniSpades, [RandomAgent] * 4)\n result = game.run()\n print(result)\n self.assertTrue(result is not None)\n\n def test_simple_game(self):\n game = MiniSpades()\n state = [0, 1, 2, 3, 0, 1, 2, 3, ] # cards\n state.extend([-1 for _ in range(4)]) # in play\n state.extend([0 for _ in range(4)]) # score\n state.extend([Suit.SPADES, -1, 0]) # trump plus leading + players\n\n game.reset(state)\n\n plays = [1, 1, 0, 1,\n 0, 1, 2, 3, 5, 6, 7, 4]\n for turn in range(12):\n next_player = game.next_player\n play = plays[turn]\n observations, rewards, done, info = game.step((next_player, play))\n if turn != 11:\n self.assertTrue(rewards == tuple([0] * 4))\n else:\n self.assertEqual(rewards, (0, 0, 10, 10))\n"
},
{
"alpha_fraction": 0.6088996529579163,
"alphanum_fraction": 0.6142727136611938,
"avg_line_length": 40.42319869995117,
"blob_id": "73a5bb9645ca677accd0f9c1e35d6285e5fd534f",
"content_id": "6bfbbf22b04d6468d19e369a9c2fbc43ceaa19ce",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13214,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 319,
"path": "/src/agents/models/multitask_models.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nimport torch\nfrom torch import nn\n\nfrom agents.belief_agent import BeliefBasedAgent\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import polynomial_transform\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass MultitaskTransitionModel(nn.Module):\n \"\"\"\n Multilayered perceptron to approximate T: (b, a) -> b'\n \"\"\"\n def __init__(self, layer_sizes: List[int] = None, shared_layers: int = 2, polynomial: bool = True):\n \"\"\"\n :param layer_sizes: sizes of the hidden layers in the network (there will be len(layer_sizes) + 1 Linear layers)\n :param shared_layers: number of layers to maintain as a shared backbone\n :param polynomial: whether or not to use the polynomial basis\n \"\"\"\n super().__init__()\n\n if layer_sizes is None:\n # Default layer sizes\n # layer_sizes = [400, 250, 150]\n layer_sizes = [1200, 600, 220]\n self._layer_sizes = layer_sizes\n self._num_shared_layers = shared_layers\n assert 0 < shared_layers <= len(self._layer_sizes)\n\n self._polynomial = polynomial\n self._input_size = None\n self._belief_size = None\n self._num_players = None\n self._parameters_returned = False\n self.models = {}\n self.backbone = None\n\n def setup(self, task_instance: TrickTakingGame):\n \"\"\"\n Set parameters for task, and create shared backbone for model\n :param task_instance: instance of task to sample parameters from\n :return: None\n \"\"\"\n num_actions = task_instance.num_cards\n num_players = task_instance.num_players\n belief_size = BeliefBasedAgent(task_instance, 0).get_belief_size()\n d = belief_size + num_actions\n self._input_size = d * (d + 1) if self._polynomial else d\n self._belief_size = belief_size\n self._num_players = num_players\n\n # Create shared backbone\n layers = []\n layer_input = self._input_size\n for layer_output in self._layer_sizes[:self._num_shared_layers]:\n layers.append(nn.Linear(layer_input, layer_output))\n layer_input = layer_output\n self.backbone = nn.Sequential(*layers).to(device)\n\n def get_parameters(self) -> List:\n \"\"\"\n :return: list of the parameters of all the models for use by an optimizer\n \"\"\"\n self._parameters_returned = True\n params = list(self.backbone.parameters())\n for model in self.models.values():\n params.extend(list(model.parameters()))\n return params\n\n def make_model(self, task: TrickTakingGame.__class__):\n \"\"\"\n Creates a model for a task.\n :param task: class of the task for which a model should be created\n :returns: None\n \"\"\"\n assert not self._parameters_returned, \"Optimizer has already been initialized, this model would not train\"\n game_instance = task()\n if self._input_size:\n assert BeliefBasedAgent(game_instance, 0).get_belief_size() == self._belief_size\n assert game_instance.num_players == self._num_players\n else:\n self.setup(game_instance)\n layers = []\n input_size = ([self._input_size] + self._layer_sizes)[self._num_shared_layers]\n for layer_size in self._layer_sizes[self._num_shared_layers:]:\n layers.append(nn.Linear(input_size, layer_size))\n layers.append(nn.ReLU(inplace=True))\n input_size = layer_size\n layers.append(nn.Linear(input_size, self._belief_size))\n self.models[task.name] = nn.Sequential(*layers).to(device)\n\n def forward(self, x: torch.FloatTensor, task: str) -> torch.FloatTensor:\n \"\"\"\n Forward pass of the model\n :param x: a shape (batch_size, belief_size + num_actions) torch Float tensor, beliefs concatenated with actions\n :param task: the name of the task of which the model should be used\n :return: a shape (batch_size, belief_size) torch Float tensor, the predicted next belief\n \"\"\"\n if self._polynomial:\n x = polynomial_transform(x)\n fc_out = self.models[task](self.backbone(x))\n fc_out[:, :-self._num_players] = nn.Sigmoid()(fc_out[:, :-self._num_players])\n return fc_out\n\n def loss(self, pred: torch.FloatTensor, y: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Calculate the loss of a batch of predictions against the true labels\n :param pred: (batch_size, belief_size) predicted next beliefs\n :param y: (batch_size, belief_size) actual next beliefs\n :return: mean loss as a torch Float scalar\n \"\"\"\n bce_loss = nn.BCELoss()(pred[:, :-self._num_players], y[:, :-self._num_players])\n mse_loss = nn.MSELoss()(pred[:, -self._num_players:], y[:, -self._num_players:])\n return bce_loss + mse_loss\n\n\nclass MultitaskRewardModel(nn.Module):\n \"\"\"\n Multilayered perceptron to approximate T: (b, a) -> r\n \"\"\"\n\n def __init__(self, layer_sizes: List[int] = None, shared_layers: int = 2, polynomial: bool = True):\n \"\"\"\n :param layer_sizes: sizes of the hidden layers in the network (there will be len(layer_sizes) + 1 Linear layers)\n :param shared_layers: number of layers to maintain as a shared backbone\n :param polynomial: whether or not to use the polynomial basis\n \"\"\"\n super().__init__()\n\n if layer_sizes is None:\n # Default layer sizes\n # layer_sizes = [100, 50, 20]\n layer_sizes = [200, 40]\n self._layer_sizes = layer_sizes\n self._num_shared_layers = shared_layers\n assert 0 < shared_layers <= len(self._layer_sizes)\n\n self._polynomial = polynomial\n self._input_size = None\n self._belief_size = None\n self._parameters_returned = False\n self.models = {}\n self.backbone = None\n\n def setup(self, task_instance: TrickTakingGame):\n \"\"\"\n Set parameters for task\n :param task_instance: instance of task to sample parameters from\n :return: None\n \"\"\"\n num_actions = task_instance.num_cards\n belief_size = BeliefBasedAgent(task_instance, 0).get_belief_size()\n d = belief_size + num_actions\n self._input_size = d * (d + 1) if self._polynomial else d\n self._belief_size = belief_size\n\n # Create shared backbone\n layers = []\n layer_input = self._input_size\n for layer_output in self._layer_sizes[:self._num_shared_layers]:\n layers.append(nn.Linear(layer_input, layer_output))\n layer_input = layer_output\n self.backbone = nn.Sequential(*layers).to(device)\n\n def get_parameters(self) -> List:\n \"\"\"\n :return: list of the parameters of all the models for use by an optimizer\n \"\"\"\n self._parameters_returned = True\n params = list(self.backbone.parameters())\n for model in self.models.values():\n params.extend(list(model.parameters()))\n return params\n\n def make_model(self, task: TrickTakingGame.__class__):\n \"\"\"\n Creates a model for a task.\n :param task: class of the task for which a model should be created\n :returns: None\n \"\"\"\n assert not self._parameters_returned, \"Optimizer has already been initialized, this model would not train\"\n game_instance = task()\n if self._input_size:\n assert BeliefBasedAgent(game_instance, 0).get_belief_size() == self._belief_size\n else:\n self.setup(game_instance)\n layers = []\n input_size = ([self._input_size] + self._layer_sizes)[self._num_shared_layers]\n for layer_size in self._layer_sizes[self._num_shared_layers:]:\n layers.append(nn.Linear(input_size, layer_size))\n layers.append(nn.ReLU(inplace=True))\n input_size = layer_size\n layers.append(nn.Linear(input_size, 1))\n self.models[task.name] = nn.Sequential(*layers).to(device)\n\n def forward(self, x: torch.FloatTensor, task: str) -> torch.FloatTensor:\n \"\"\"\n Forward pass of the model\n :param x: a shape (batch_size, belief_size + num_actions) torch Float tensor, beliefs concatenated with actions\n :param task: the name of the task of which the model should be used\n :return: a shape (batch_size, 1) torch Float tensor, the predicted reward\n \"\"\"\n if self._polynomial:\n x = polynomial_transform(x)\n return self.models[task](self.backbone(x))\n\n def loss(self, pred: torch.FloatTensor, y: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Calculate the loss of a batch of predictions against the true labels\n :param pred: (batch_size, 1) predicted rewards\n :param y: (batch_size, 1) actual rewards\n :return: mean loss as a torch Float scalar\n \"\"\"\n mse_loss = nn.MSELoss()(pred, y)\n return mse_loss\n\n\nclass MultitaskApprenticeModel(nn.Module):\n \"\"\"\n Multilayered perceptron to approximate PI: b -> a according to the expert MCTS policy\n \"\"\"\n\n def __init__(self, layer_sizes: List[int] = None, shared_layers: int = 1, polynomial: bool = True):\n \"\"\"\n :param layer_sizes: sizes of the hidden layers in the network (there will be len(layer_sizes) + 1 Linear layers)\n :param shared_layers: number of layers to maintain as a shared backbone\n :param polynomial: whether or not to use the polynomial basis\n \"\"\"\n super().__init__()\n\n if layer_sizes is None:\n # Default layer sizes\n # layer_sizes = [140, 80, 50]\n layer_sizes = [600, 300, 110]\n self._layer_sizes = layer_sizes\n self._num_shared_layers = shared_layers\n assert 0 < shared_layers <= len(self._layer_sizes)\n\n self._polynomial = polynomial\n self._input_size = None\n self._belief_size = None\n self._parameters_returned = False\n self.models = {}\n self.backbone = None\n\n def setup(self, task_instance: TrickTakingGame):\n \"\"\"\n Set parameters for task\n :param task_instance: instance of task to sample parameters from\n :return: None\n \"\"\"\n num_actions = task_instance.num_cards\n belief_size = BeliefBasedAgent(task_instance, 0).get_belief_size()\n d = belief_size\n self._input_size = d * (d + 1) if self._polynomial else d\n self._belief_size = belief_size\n\n # Create shared backbone\n layers = []\n layer_input = self._input_size\n for layer_output in self._layer_sizes[:self._num_shared_layers]:\n layers.append(nn.Linear(layer_input, layer_output))\n layer_input = layer_output\n self.backbone = nn.Sequential(*layers).to(device)\n\n def get_parameters(self) -> List:\n \"\"\"\n :return: list of the parameters of all the models for use by an optimizer\n \"\"\"\n self._parameters_returned = True\n params = list(self.backbone.parameters())\n for model in self.models.values():\n params.extend(list(model.parameters()))\n return params\n\n def make_model(self, task: TrickTakingGame.__class__):\n \"\"\"\n Creates a model for a task.\n :param task: class of the task for which a model should be created\n :returns: None\n \"\"\"\n assert not self._parameters_returned, \"Optimizer has already been initialized, this model would not train\"\n game_instance = task()\n if self._input_size:\n assert BeliefBasedAgent(game_instance, 0).get_belief_size() == self._belief_size\n else:\n self.setup(game_instance)\n layers = []\n input_size = ([self._input_size] + self._layer_sizes)[self._num_shared_layers]\n for layer_size in self._layer_sizes[self._num_shared_layers:]:\n layers.append(nn.Linear(input_size, layer_size))\n layers.append(nn.ReLU(inplace=True))\n input_size = layer_size\n layers.append(nn.Linear(input_size, game_instance.num_cards))\n self.models[task.name] = nn.Sequential(*layers).to(device)\n\n def forward(self, x: torch.FloatTensor, task: str) -> torch.FloatTensor:\n \"\"\"\n Forward pass of the model\n :param x: a shape (batch_size, belief_size) torch Float tensor, beliefs\n :param task: the name of the task of which the model should be used\n :return: a shape (batch_size, num_actions) torch Float tensor, the predicted action scores\n \"\"\"\n if self._polynomial:\n x = polynomial_transform(x)\n return self.models[task](self.backbone(x))\n\n def loss(self, pred: torch.FloatTensor, y: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Calculate the loss of a batch of predictions against the true labels\n :param pred: (batch_size, num_actions) predicted action scores\n :param y: (batch_size, 1) expert action selections\n :return: cross entropy loss as a torch Float scalar\n \"\"\"\n loss = nn.CrossEntropyLoss()(pred, y)\n return loss\n"
},
{
"alpha_fraction": 0.5714530944824219,
"alphanum_fraction": 0.5764874219894409,
"avg_line_length": 38.63718795776367,
"blob_id": "caabf7d26bc1d0a25be21bec4b2a4a1670b62d37",
"content_id": "17f94e37e0d89887b50d9e1a6592f5af4547e09a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17480,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 441,
"path": "/src/environments/trick_taking_game.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import random\nfrom typing import Dict, List, Tuple, Union\n\nimport torch\n\nfrom util import Card, OutOfTurnException, Suit\n\n\nclass TrickTakingGame:\n \"\"\"\n Base gym-inspired environment for a trick taking card game.\n\n TrickTakingGame maintains a _state that encodes the position of every card and game\n information such\n as cards in play, the score of each player, and whose turn it is.\n\n All players interact with the same interface.\n An action is defined by (id, i) where id is the player's index and i is the index of the card\n they intend to play.\n - if id != next player to move, no transition happens and zero reward is returned\n - if card i is not held by player id, then a random card is played and a large negative\n reward is added\n - otherwise, player id plays card i\n - if the trick finishes, the score changes and the next player is set\n - if the trick is ongoing, card i is marked as being played and the turn moves to the\n next player\n\n Each reset() and step(action) returns a tuple of observations specific to each player,\n which comprises of only\n the information visible to that player. The reward is similarly a tuple of integers for each\n player.\n\n Trick taking games should be implemented as classes that inherit TrickTakingGame and\n implement its abstract methods.\n They may also want to override some methods that define properties (e.g. cards_per_suit) or\n rules for each game.\n\n The state representation is defined in reset(), while the observations are defined in\n _get_observations().\n \"\"\"\n name = \"Trick Taking Game\"\n\n def __init__(self):\n self._num_cards = sum(self.cards_per_suit)\n self._state = None\n self._index_card_map = None\n self._num_cards = sum(self.cards_per_suit)\n self._index_card_map = self._compute_index_card_map()\n\n def reset(self, state: List[int] = None, assert_size: bool = True) -> Tuple[List[int], ...]:\n \"\"\"\n Reset the state for a new game, and return initial observations.\n\n State representation:\n [index of player holding card i or -1 if discarded| 0 <= i < self.num_cards] +\n [index of card in play or -1 if no card yet played by player j | 0 <= j <\n self.num_players] +\n [score of player j | 0 <= j < self.num_players] +\n [trump suit number or -1, trick leading card index or -1, index of player to move next]\n\n :param assert_size: Enable checking of state size\n :param state: the state to force the game to start at\n :return: Tuple[List[int], ...] of observations\n \"\"\"\n if state is None:\n card_distribution = self._deal()\n self._state = (\n card_distribution +\n [-1 for _ in range(self.num_players)] +\n [0 for _ in range(self.num_players)] +\n [self._get_trump_suit(), -1, self._get_first_player(card_distribution)]\n )\n else:\n self._state = state\n\n if assert_size:\n assert len(\n self._state) == self.num_cards + 2 * self.num_players + 3, \"state was reset to \" \\\n \"the \" \\\n \"wrong size\"\n return self._get_observations()\n\n def step(self, action: Tuple[int, int]) -> Tuple[\n Tuple[List[int], ...], Tuple[int, ...], bool, Dict]:\n \"\"\"\n Execute action according to the rules defined in the docstring of TrickTakingGame.\n\n :param action: Tuple[int, int], (id, i) representing player id playing card i\n :return: Tuple of the following:\n - observation, Tuple[List[int], ...] of observations\n - reward, Tuple[int, ...] of rewards\n - done, bool that is True if the game has ended, otherwise False\n - info, Dict of diagnostic information, currently empty\n \"\"\"\n assert len(action) == 2, \"invalid action\"\n player, card_index = action\n assert card_index < self.num_cards, \"Trying to pick card with index higher than allowed\"\n num_cards = self.num_cards\n num_players = self.num_players\n\n rewards = [0 for _ in range(num_players)]\n\n # Check if it is this player's turn\n if player != self.next_player:\n return self._get_observations(), tuple(rewards), False, {\"error\": OutOfTurnException}\n\n # Check if the card is a valid play\n invalid_plays = {}\n if not self.is_valid_play(player, card_index):\n valid_cards = [i for i in range(num_cards) if self.is_valid_play(player, i)]\n rewards[player] -= 50 # Huge penalty for picking an invalid card!\n card_index = random.choice(valid_cards) # Choose random valid move to play\n invalid_plays[player] = \"invalid\"\n else:\n pass\n # possible to reward player for making good choice here\n\n # Play the card\n self._state[card_index] = -1\n assert self._state[num_cards + player] == -1, \"Trying to play in a trick already played in\"\n self._state[num_cards + player] = card_index\n if self._state[-2] == -1:\n # Trick leader\n self._state[-2] = card_index\n # update next player\n self._state[-1] = (player + 1) % num_players\n\n # Check if trick completed\n played_cards = self._state[num_cards: num_cards + num_players]\n if -1 not in played_cards:\n # Handle rewards\n trick_rewards, next_leader = self._end_trick()\n total_rewards = sum(trick_rewards)\n rewards = [rewards[i] + 1.3 * trick_rewards[i] - 0.3 * total_rewards for i in\n range(num_players)]\n for i in range(num_players):\n offset = num_cards + num_players # index into state correctly\n self._state[offset + i] += trick_rewards[i] # update current scores\n\n # Reset trick\n for i in range(num_cards, num_cards + num_players):\n self._state[i] = -1\n self._state[-2] = -1\n self._state[-1] = next_leader\n\n # Check if game ended\n if self._game_has_ended():\n done = True\n # apply score bonuses\n bonus_rewards = self._end_game_bonuses()\n rewards = [rewards[i] + bonus_rewards[i] for i in range(num_players)]\n for i in range(num_players):\n offset = num_cards + num_players\n self._state[offset + i] += bonus_rewards[i]\n else:\n done = False\n\n return self._get_observations(), tuple(rewards), done, invalid_plays\n\n def render(self, mode: str = \"human\", view: int = -1):\n \"\"\"\n Render the state of the game.\n\n :param mode: str\n :param view: int, render whole state if -1, otherwise render observation of agent view\n :return: None\n \"\"\"\n assert mode == \"human\", \"invalid mode\"\n if view == -1 or not 0 <= view < self.num_players:\n print(self._state)\n else:\n print(self._get_observations()[view])\n\n def _compute_index_card_map(self):\n index_map = {}\n for card_index in range(self.num_cards):\n suit, total = 0, 0\n while total + self.cards_per_suit[suit] <= card_index:\n total += self.cards_per_suit[suit]\n suit += 1\n num = card_index - total\n assert 0 <= num < self.cards_per_suit[suit], \"card value is invalid\"\n index_map[card_index] = Card(suit=Suit(suit), value=num)\n return index_map\n\n def _deal(self) -> List[int]:\n \"\"\"\n Deal cards evenly to each player, and return the positions of the cards as included in\n the state\n :return: List[int], where the i^th element is the index of the player who holds card i\n \"\"\"\n assert self.num_cards % self.num_players == 0, \"cards cannot be evenly divided among the \" \\\n \"players\"\n cards = []\n for i in range(self.num_players):\n cards += [i for _ in range(self.num_cards // self.num_players)]\n random.shuffle(cards)\n assert len(cards) == self.num_cards, \"wrong number of cards dealt\"\n return cards\n\n def _get_hands(self) -> Dict[int, List[int]]:\n \"\"\"\n Return the cards possessed by each player.\n :return: Dict, mapping i to a list of the sorted card indices in player i's hand\n \"\"\"\n hands = {i: [] for i in range(self.num_players)}\n for card_index in range(self.num_cards):\n holding_player = self._state[card_index]\n if holding_player != -1:\n hands[holding_player].append(card_index)\n return hands\n\n def _get_observations(self) -> Tuple[List[int], ...]:\n \"\"\"\n Extract visible information for each player to create vector of observations.\n\n An observation is structured as follows:\n [0 if card i not in hand, 1 if in hand, -1 if discarded | 0 <= i < self.num_cards] +\n [index of card in play or -1 if no card yet played by player j | 0 <= j <\n self.num_players] +\n [score of player j | 0 <= j < self.num_players] +\n [index of player to move next]\n\n :return: Tuple, where the i^th element is a fixed-length list, the observation for player i\n \"\"\"\n cards, public_info = self._state[:self.num_cards], self._state[self.num_cards:]\n observations = tuple([-1 if x == -1 else 0 for x in cards] for _ in range(self.num_players))\n\n for card_index, card_position in enumerate(cards):\n if card_position != -1:\n observations[card_position][card_index] = 1\n\n for i in range(self.num_players):\n observations[i].extend(public_info[:])\n\n return observations\n\n # Rule-related methods\n\n def _end_game_bonuses(self) -> List[int]:\n \"\"\"\n Computes additional reward assigned to each player at the end of a game.\n May be overwritten by child classes.\n :return: vector of bonus rewards for each player\n \"\"\"\n rewards = [0 for _ in range(self.num_players)]\n return rewards\n\n def _end_trick(self) -> Tuple[List[int], int]:\n \"\"\"\n Determine the rewards of a completed trick, and choose the player to start the next trick.\n Should probably be overwritten by a child class.\n :return: Tuple, of a vector of rewards for the current trick and the index of the next\n player to start\n \"\"\"\n winning_player, winning_card = self._get_trick_winner()\n rewards = [0 for _ in range(self.num_players)]\n rewards[winning_player] = 1\n return rewards, winning_player\n\n def _game_has_ended(self) -> bool:\n \"\"\"\n :return: True if the game has ended\n \"\"\"\n return sum(self._state[:self.num_cards]) == -self.num_cards\n\n def _get_trick_winner(self) -> Tuple[int, Card]:\n \"\"\"\n Determine the winning player and card of a completed trick\n :return: Tuple[int, Card], the index of the winning player and their played Card\n \"\"\"\n trump_suit = self.trump_suit\n starting_card = self.trick_leader\n played_cards = [self.index_to_card(self._state[self.num_cards + i]) for i in\n range(self.num_players)]\n\n winning_index = -1\n winning_card = starting_card\n for player_index, card in enumerate(played_cards):\n if (card.suit == winning_card.suit and card.value >= winning_card.value) or \\\n (card.suit == trump_suit and winning_card.suit != trump_suit):\n winning_index = player_index\n winning_card = card\n\n return winning_index, winning_card\n\n # noinspection PyMethodMayBeStatic\n def _get_trump_suit(self) -> int:\n \"\"\"\n Determine the trump suit.\n :return: int, -1 if there is no trump else the numerical index of the suit\n \"\"\"\n return random.randint(0, 3)\n\n # noinspection PyMethodMayBeStatic\n def _get_first_player(self, card_distribution: List[int]) -> int:\n \"\"\"\n :param card_distribution: part of the state that shows the card positions, also the first\n output of _deal()\n :return: int, index of the player who gets the first turn\n \"\"\"\n return 0\n\n def is_valid_play(self, player_index, card_index) -> bool:\n \"\"\"\n Determines if a proposed card play is valid\n :param player_index: player making the move\n :param card_index: proposed card to be played\n :return: True if the card play is valid, else False\n \"\"\"\n if self._state[card_index] != player_index:\n return False\n\n # Check if player is empty of the starting suit if different suit was played\n played_card = self.index_to_card(card_index)\n starting_card = self.trick_leader\n if starting_card is None:\n return True\n if played_card.suit != starting_card.suit:\n for i in range(self.num_cards):\n if self._state[i] == player_index and self.index_to_card(\n i).suit == starting_card.suit:\n return False\n\n return True\n\n def valid_play_from_belief(self, belief, card_index):\n belief_col = torch.squeeze(belief)\n if belief_col[card_index] <= 0.5:\n return False\n # Check if player is empty of the starting suit if different suit was played\n # see if agent thinks no starting card\n starting_card_section = belief_col[3 * self.num_cards:3 * self.num_cards + self.num_cards]\n # no starting card\n if torch.all(starting_card_section < 0.5):\n return True\n # is starting card\n starting_card = self.index_to_card((torch.argmax(\n starting_card_section)).item())\n played_card = self.index_to_card(card_index)\n if played_card.suit != starting_card.suit:\n for i in range(self.num_cards):\n if belief_col[i] >= 0.5 and self.index_to_card(\n i).suit == starting_card.suit:\n return False\n\n return True\n\n\n @property\n def cards_per_suit(self) -> Tuple[int, ...]:\n \"\"\"\n Defines the number of cards for each suit the game is played with.\n Override for children classes.\n :return: Tuple[int], where the i^th element is the number of cards for suit i\n \"\"\"\n return 6, 6, 6, 6\n\n @property\n def num_cards(self) -> int:\n \"\"\"\n :return: int, the total number of cards in the game based on cards_per_suit()\n \"\"\"\n return self._num_cards\n\n @property\n def num_players(self) -> int:\n \"\"\"\n :return: int, number of players in the game\n \"\"\"\n return 4\n\n @property\n def next_player(self) -> int:\n \"\"\"\n :return: index of player who is allowed to play the next card\n \"\"\"\n return self._state[-1]\n\n @property\n def scores(self) -> List[int]:\n \"\"\"\n :return: list where the i^th integer is the score of player i\n \"\"\"\n return self._state[self.num_cards + self.num_players: self.num_cards + 2 * self.num_players]\n\n @property\n def trump_suit(self) -> Suit:\n \"\"\"\n :return: the trump suit\n \"\"\"\n return Suit(self._state[-3])\n\n @property\n def trick_leader(self) -> Union[Card, None]:\n \"\"\"\n :return: the card that was first played in the trick, or None if one hasn't yet been played\n \"\"\"\n card_index = self._state[-2]\n if card_index == -1:\n return None\n else:\n return self.index_to_card(card_index)\n\n def current_trick(self) -> Dict[int, Card]:\n \"\"\"\n\n :return: dictionary mapping player ids to card they have played in this trick\n \"\"\"\n card_list = self._state[self.num_cards:self.num_cards + self.num_players]\n trick_dict = {i: self.index_to_card(el) for i, el in enumerate(card_list) if el != -1}\n return trick_dict\n\n def index_to_card(self, card_index: int) -> Card:\n \"\"\"\n Converts a card index to a suit and number representing the relative strength of the card.\n\n :param card_index: int, 0 <= card_index < self.num_cards\n :return: Card\n \"\"\"\n # TODO: run once in map and lookup in future\n suit, total = 0, 0\n while total + self.cards_per_suit[suit] <= card_index:\n total += self.cards_per_suit[suit]\n suit += 1\n num = card_index - total\n assert 0 <= num < self.cards_per_suit[suit], \"card value is invalid\"\n return Card(suit=Suit(suit), value=num)\n\n def card_to_index(self, card: Card) -> int:\n \"\"\"\n Converts a card to a numerical index.\n\n :param card: Card\n :return: int index, 0 <= index < self.num_cards\n \"\"\"\n target_suit = card.suit.value\n cards_before_suit = sum(self.cards_per_suit[:target_suit])\n index = cards_before_suit + card.value\n assert 0 <= index < self.num_cards, \"card index is invalid\"\n return index\n"
},
{
"alpha_fraction": 0.5658507943153381,
"alphanum_fraction": 0.5710955858230591,
"avg_line_length": 39.85714340209961,
"blob_id": "67d5877cdd835af68d0d0826743a14c07f93189e",
"content_id": "30ee51c68ac798e9ddacb29a3db08d858e74cbc6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8580,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 210,
"path": "/src/agents/expert_iteration_agent.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import math\nimport random\nimport time\nfrom collections import defaultdict, Counter\nfrom typing import Tuple, List, Union\n\nimport numpy as np\nimport torch\n\nfrom agents.belief_agent import BeliefBasedAgent\nfrom agents.models.model_based_models import ApprenticeModel\nfrom agents.models.multitask_models import MultitaskApprenticeModel\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\naction_tensor_cache = {}\n\n\ndef mcts(executor, num_workers, belief, game, transition_model, reward_model, task_name,\n timeout: float = 0.5,\n horizon: int = 3,\n inverse_discount=1.2) -> int:\n \"\"\"\n Given models and state, outputs action\n :param executor:\n :param game:\n :param timeout:\n :param horizon:\n :param inverse_discount:\n :return:\n \"\"\"\n mcts_helper = _MCTSRunner(game, transition_model, reward_model, task_name, timeout, horizon,\n inverse_discount)\n thread_results = executor.map(mcts_helper, [belief.detach()] * num_workers)\n # thread_results = [mcts_helper(belief) for _ in range(num_workers)]\n thread_scores, thread_plays = list(map(list, zip(*thread_results)))\n # combine scores lists\n scores_counter = Counter()\n for d in thread_scores:\n scores_counter.update(d)\n scores = dict(scores_counter)\n # combine plays lists\n plays_counter = Counter()\n for d in thread_plays:\n plays_counter.update(d)\n plays = dict(plays_counter)\n # compute best move\n card_index = _get_final_action(belief, game, scores, plays)\n return card_index\n\n\ndef _get_final_action(belief, game, scores, plays):\n action_list = range(game.num_cards - 1)\n valid_actions = [action for action in action_list if\n game.valid_play_from_belief(belief, action)]\n action_values = {}\n for a in (valid_actions if len(valid_actions) else action_list):\n if (a,) in plays and (a,) in scores and plays[(a,)]:\n action_values[a] = scores[(a,)] / plays[(a,)]\n else:\n action_values[a] = -float('inf')\n\n # get key associated with biggest val\n return max(action_values, key=action_values.get)\n\n\nclass _MCTSRunner:\n \"\"\"\n Helper class for mcts()\n \"\"\"\n\n def __init__(self, game, transition_model, reward_model, task_name,\n timeout: float = 0.5,\n horizon: int = 4,\n inverse_discount=1.2):\n self._game = game\n self._transition_model = transition_model\n self._reward_model = reward_model\n self._task_name = task_name\n self._timeout = timeout\n self._horizon = horizon\n self._inverse_discount = inverse_discount\n\n def __call__(self, belief):\n return self._mcts_helper(belief)\n\n def get_transition_reward(self, current, selected_action, reward_cache, nodes, actions):\n new_node = current + (selected_action,)\n if new_node in reward_cache:\n reward = reward_cache[new_node]\n else:\n if current in nodes:\n belief = nodes[current]\n else:\n a = current[-1]\n ba = torch.cat([nodes[current[:-1]], actions[a:a + 1]], dim=1)\n belief = self._transition_model.forward(ba, self._task_name)\n nodes[current] = belief\n belief_action = torch.cat([belief, actions[selected_action:selected_action + 1]], dim=1)\n reward = self._reward_model.forward(belief_action, self._task_name)\n reward_cache[new_node] = reward\n return reward\n\n @staticmethod\n def ucb(score, plays, parent_plays, lowest_score, c=1.4):\n exploitation = score / plays if plays else 0\n exploitation /= abs(lowest_score) / 5 # normalization\n exploration = c * math.sqrt(math.log(parent_plays) / plays) if plays else float('inf')\n return exploitation + exploration\n\n def _mcts_helper(self, belief):\n # Monte Carlo\n t0 = time.time()\n timeout = self._timeout\n horizon = self._horizon\n inverse_discount = self._inverse_discount\n start_belief = belief # torch.FloatTensor([belief]).to(device)\n actions = torch.eye(self._game.num_cards).float().to(device)\n num_actions = self._game.num_cards\n list_actions = list(range(num_actions))\n nodes = {tuple(): start_belief}\n plays = defaultdict(int)\n reward_cache = {}\n scores = defaultdict(float)\n lowest_score = 1\n while time.time() - t0 < timeout:\n current = tuple()\n plays[current] += 1\n total_reward = 0\n first_selection = True\n\n # Selection\n while len(current) < horizon and current + (0,) in plays:\n action_values = [_MCTSRunner.ucb(scores[current + (a,)],\n plays[current + (a,)],\n plays[current],\n lowest_score)\n for a in list_actions]\n\n # on first selection, only choose from valid moves\n if first_selection:\n first_selection = False\n valid_list = [action for action in range(num_actions - 1) if\n self._game.valid_play_from_belief(belief, action)]\n if len(valid_list) > 0:\n selected_action = max(valid_list, key=lambda a: action_values[a])\n else:\n selected_action = max(list_actions, key=lambda a: action_values[a])\n else:\n selected_action = max(list_actions, key=lambda a: action_values[a])\n\n reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,\n actions)\n total_reward = inverse_discount * total_reward + reward\n current = current + (selected_action,)\n plays[current] += 1\n\n # Expansion\n if len(current) < horizon and current + (0,) not in plays:\n plays[current + (0,)] = 0\n selected_action = random.randint(0, num_actions - 1)\n reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,\n actions)\n total_reward = inverse_discount * total_reward + reward\n current = current + (selected_action,)\n plays[current] += 1\n final_current = current\n\n # Simulation\n while len(current) < horizon:\n selected_action = random.randint(0, num_actions - 1)\n reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,\n actions)\n total_reward = inverse_discount * total_reward + reward\n current = current + (selected_action,)\n\n # Backpropagation\n for i in range(horizon + 1):\n scores[final_current[:i]] += total_reward.item()\n lowest_score = min(lowest_score, total_reward)\n\n # detach tensors\n return scores, plays\n\n\nclass ExpertIterationAgent(BeliefBasedAgent):\n def __init__(self, game: TrickTakingGame,\n player_number: int,\n apprentice_model: Union[ApprenticeModel, MultitaskApprenticeModel]):\n super().__init__(game, player_number)\n self._task_name = game.name\n self._apprentice_model = apprentice_model\n self._current_observation = None\n\n def observe(self, action: Tuple[int, int], observation: List[int], reward: int):\n super().observe(action, observation, reward)\n self._current_observation = observation\n\n def act(self, epsilon: float = 0) -> Card:\n if np.random.rand() <= epsilon:\n return self._game.index_to_card(random.randint(0, self._game.num_cards - 1))\n # valid_cards = self._get_hand(self._current_observation, valid_only=True)\n # return random.sample(valid_cards, 1)[0]\n\n action_values = self._apprentice_model.forward(torch.FloatTensor([self._belief]).to(device),\n self._task_name)\n best_action = torch.argmax(action_values).item()\n return self._game.index_to_card(best_action)\n"
},
{
"alpha_fraction": 0.5722772479057312,
"alphanum_fraction": 0.5728865265846252,
"avg_line_length": 36.947975158691406,
"blob_id": "9a23c4576a864efe4d1ed442d0e5253ffdc66ddd",
"content_id": "6e2c2761b82ead0bfd626c7f096298adb6392286",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6565,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 173,
"path": "/src/game.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "from typing import Any, Dict, List, Tuple, Union\n\nfrom agents.base import Agent\nfrom agents.belief_agent import BeliefBasedAgent\nfrom agents.human import Human\nfrom agents.random_agent import RandomAgent\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card\n\n\nclass Game:\n \"\"\"\n A class that acts as an interface for the game.\n\n Initialize a Game with a TrickTakingGame (or child class). Upon calling run(), the game\n will query the player for an action or run a policy.\n \"\"\"\n\n def __init__(self, game: TrickTakingGame.__class__,\n player_setting: List[Agent.__class__] = None,\n agent_params: List[Dict[str, Any]] = None,\n game_params: Dict[str, Any] = None):\n \"\"\"\n Initialize a new game.\n\n :param game: either TrickTakingGame, or a class that inherits from it, the game to be played\n :param player_setting: List[Player], defining the type of player in order of their seated\n :param agent_params: list containing parameter to be passed to agent i\n position\n \"\"\"\n\n self._game = game()\n\n if player_setting is None:\n player_setting = [RandomAgent] * self._game.num_players\n assert len(\n player_setting) == self._game.num_players, \"number of players doesn't fit the game \" \\\n \"requirements\"\n\n if agent_params is None:\n agent_params = [{}] * len(player_setting)\n\n if game_params is None:\n game_params = {}\n\n self._agent_list = [player_class(self._game, i, **agent_params[i]) for i, player_class in\n enumerate(player_setting)]\n\n self._observations = None\n self._barbs = []\n self._game_params = game_params\n self._info = []\n\n def run(self, state: List[int] = None) -> Tuple[List[int], List[int]]:\n \"\"\"\n Start and play the game. Can only be called once per instance of Game.\n :param state: initial state to force the game to\n :return: final score of the game, its initial state\n \"\"\"\n assert self._observations is None, \"game has already been played\"\n self._observations = self._game.reset(state=state)\n initial_state = self._game._state[:]\n if state: assert state == initial_state\n self._update_observations(-1, None, self._observations,\n [None] * len(self._agent_list)) # set initial observations\n done = False\n\n round_number = 0\n # Play the game\n while not done:\n next_player = self._game.next_player\n selected_card = self._choose_action(next_player)\n card_index = self._game.card_to_index(selected_card)\n observations, rewards, done, info = self._game.step((next_player, card_index))\n self._info.append(info)\n if self._game_params.get(\"verbose\", True):\n self._print_report(round_number, next_player, selected_card, observations, rewards)\n self._observations = observations\n self._update_observations(next_player, selected_card, observations, rewards)\n self._collect_barbs()\n round_number += 1\n\n # Game has finished\n self._game_ended()\n\n return self._game.scores, initial_state\n\n def _print_report(self,\n round_number: int,\n player_who_went: int,\n card_played: Card,\n observations: Tuple[List[int], ...],\n rewards: Tuple[int, ...],\n ):\n \"\"\"\n Print out any information for the user about what just happened in a human-readable way.\n Do not reveal hidden information.\n\n TODO: Implement!\n\n :param player_who_went: index of the player who took the last move\n :param card_played: the card that was played\n :param observations: a vector where the i^th element is the observation of player i\n :param rewards: a vector where the i^th element is the reward given to playre i\n NOTE: rewards are distinct from game scores\n :return: None\n \"\"\"\n # TODO: improve printout\n print(\n f\"Round {round_number}: Player {player_who_went} just played {card_played}. \"\n f\"Observations of players: \"\n f\"{observations}. Player rewards: {rewards}\")\n\n def _game_ended(self):\n \"\"\"\n Do things related to the end of the game, if necessary, e.g. printing things\n\n :return: None\n \"\"\"\n # TODO: improve\n pass\n\n def _choose_action(self, player: int) -> Card:\n \"\"\"\n Retrieve the selected action of a player.\n\n :param player: index of the player who should be taking a turn\n :return: the Card that they are playing\n \"\"\"\n if isinstance(self._agent_list[player], Human):\n return self._choose_action_human(player)\n\n agent = self._agent_list[player]\n return agent.act(self._game_params.get(\"epsilon\", 0))\n\n def _choose_action_human(self, player: int) -> Card:\n \"\"\"\n Display the necessary information for a human player and then prompt them to take their\n turn.\n TODO: Implement!\n :param player: index of the player who should be taking a turn\n :return: the Card the human player selects to play\n \"\"\"\n raise NotImplementedError\n\n def _update_observations(self, player_who_went: int,\n card_played: Union[Card, None],\n observations: Tuple[List[int], ...],\n rewards: Union[Tuple[int, ...], Tuple[None, ...]],\n ):\n \"\"\"\n Updates the observations of each player\n\n :param observations:\n \"\"\"\n for i, agent in enumerate(self._agent_list):\n card_idx = self._game.card_to_index(card_played) if card_played is not None else None\n agent.observe((player_who_went, card_idx),\n observations[i],\n rewards[i])\n\n def _collect_barbs(self):\n for agent in self._agent_list:\n if issubclass(agent.__class__, BeliefBasedAgent):\n barb = agent.barb()\n if barb is not None:\n self._barbs.append(barb)\n\n def get_barbs(self):\n return self._barbs\n\n def get_info(self):\n return self._info\n"
},
{
"alpha_fraction": 0.6169871687889099,
"alphanum_fraction": 0.6241987347602844,
"avg_line_length": 31.0256404876709,
"blob_id": "483ad04934a22c8daecb14719d5aa69769c4f68f",
"content_id": "8d55b316b606afe2c83737c122b94102383e3d8a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1248,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 39,
"path": "/src/environments/oh_hell.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nfrom typing import List, Tuple\n\nfrom environments.bidding_base import BiddingBase\nfrom util import Suit\n\n\nclass OhHell(BiddingBase):\n name = \"Oh Hell\"\n\n def _valid_bids(self, proposed_bids: List) -> bool:\n num_tricks = self.num_cards / self.num_players\n return sum(proposed_bids) != num_tricks\n\n @property\n # TODO: deal more or less cards depending on round\n def cards_per_suit(self) -> Tuple[int, ...]:\n return 8, 8, 8, 8\n\n # TODO: randomize trump suit at beginning of each game\n def _get_trump_suit(self) -> int:\n return Suit.SPADES\n\n def _end_trick(self) -> Tuple[List[int], int]:\n winning_player, winning_card = self._get_trick_winner()\n rewards = [0 for _ in range(self.num_players)]\n # TODO: reward intermediate as getting closer\n self._tricks_won[winning_player] += 1\n\n return rewards, winning_player\n\n def _end_game_bonuses(self) -> List[int]:\n scores = [0 for _ in range(self.num_players)]\n for player in range(self.num_players):\n scores[player] += self._tricks_won[player]\n if self._tricks_won[player] == self._player_bids[player]:\n scores[player] += 10\n\n return scores"
},
{
"alpha_fraction": 0.73102867603302,
"alphanum_fraction": 0.7428330779075623,
"avg_line_length": 37.290321350097656,
"blob_id": "7d3479bafa0a2074c89d00a3224cb01f7d291056",
"content_id": "a4940a6a9423f22058f4b3bc3ef067d08a80d3b8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1186,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 31,
"path": "/src/train_dqn.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nimport torch\n\nfrom agents.dqn_agent import DQNLearner, DQNAgent, DQN, calculate_action_observation_size\nfrom agents.random_agent import RandomAgent\nfrom environments.hearts import SimpleHearts\nfrom environments.test_hearts import TestSimpleHearts\nfrom evaluators import evaluate_random\nfrom game import Game\n\n\ndef train(save_path):\n # old_model_state_dict = torch.load(\"saved_model.pt\")\n # learner = DQNLearner(resume_state=old_model_state_dict)\n learner = DQNLearner()\n trained_model = learner.train([TestSimpleHearts])\n torch.save(trained_model.state_dict(), save_path)\n\n\ndef eval_filename(load_path):\n model_state_dict = torch.load(load_path)\n action_size, observation_size = calculate_action_observation_size(TestSimpleHearts)\n model = DQN(observation_size, action_size).to(\"cuda:0\")\n model.load_state_dict(model_state_dict)\n winrate, avg_score, percent_invalid, scores = evaluate_random(DQNAgent, model, num_trials=1000)\n print(f\"winrate: {winrate}\\npercent invalid: {percent_invalid}\")\n\n\nif __name__ == \"__main__\":\n # train(\"saved_model.pt\")\n eval_filename(\"/home/dolphonie/Desktop/MIT/6.883/saved_models/agent_49048.pt\")"
},
{
"alpha_fraction": 0.7327935099601746,
"alphanum_fraction": 0.7327935099601746,
"avg_line_length": 29.875,
"blob_id": "c5d0b39629bc204584bc33ec659a4eae0919bf10",
"content_id": "76e6b597b17bf378ff2eed7df75f37e2cbd5957f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 247,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 8,
"path": "/README.md",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# multitask-card-games\n\nReminder to mark src as a \"Sources Root\" if you're using PyCharm so that the imports won't scream at you.\n\n\n## Instructions\n\n**Model-based**: Run `src/train_model_based.py`. Tensorboard results can be found in `src\\runs\\`. "
},
{
"alpha_fraction": 0.6165840029716492,
"alphanum_fraction": 0.6307583451271057,
"avg_line_length": 42.41538619995117,
"blob_id": "4a6e707378f2b083b194f1dba518cde60db6b2ad",
"content_id": "7f78a9cbab75fd0eb439c261094cd7d32a25c0f6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2822,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 65,
"path": "/src/train_model_based.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import torch\n\nfrom agents.expert_iteration_agent import ExpertIterationAgent\nfrom agents.expert_iteration_learner import ExpertIterationLearner\nfrom agents.model_based_agent import MonteCarloAgent\nfrom agents.model_based_learner import ModelBasedLearner, ModelBasedAgent\nfrom agents.random_agent import RandomAgent\nfrom environments.test_hearts import TestSimpleHearts\nfrom environments.trick_taking_game import TrickTakingGame\nfrom environments.hearts import SimpleHearts\nfrom environments.twentyfive import TwentyFive\nfrom environments.hattrick import HatTrick\nfrom environments.basic_trick import BasicTrick\nfrom evaluators import evaluate_random\n\n\nMODEL_PARAMS = {\n \"Trick Taking Game\": [104, 24, 4],\n \"Test Simple Hearts\": [104, 24, 4],\n \"Simple Hearts\": [136, 32, 4], \n \"Test TwentyFive\": [136, 32, 4],\n \"Test HatTrick\": [136, 32, 4], \n \"Test Basic Trick\": [136, 32, 4]\n}\n\n\ndef train(tasks, load_model_names, save_model_names, multitask, learner_name):\n # Set up learner\n if load_model_names:\n resume = {\"transition\": {}, \"reward\": {}, \"apprentice\": {}}\n for task in tasks:\n transition_state = torch.load(\"models/transition_model_temp_{}.pt\".format(load_model_names[task.name]))\n reward_state = torch.load(\"models/reward_model_temp_{}.pt\".format(load_model_names[task.name]))\n apprentice_state = torch.load(\"models/apprentice_model_temp_{}.pt\".format(load_model_names[task.name]))\n resume[\"transition\"][task.name] = {\"state\": transition_state, \"task\": task}\n resume[\"reward\"][task.name] = {\"state\": reward_state, \"task\": task}\n resume[\"apprentice\"][task.name] = {\"state\": apprentice_state, \"task\": task}\n else:\n resume = None\n learner = ExpertIterationLearner(agent=ExpertIterationAgent, model_names=save_model_names,\n resume_model=resume, multitask=multitask,\n learner_name=learner_name)\n\n # # Evaluate\n # evaluate = evaluate_random(tasks,\n # ModelBasedAgent,\n # {task.name: learner.get_models(task) for task in tasks},\n # num_trials=100)\n # print(evaluate)\n\n learner.train(tasks)\n\n\nif __name__ == \"__main__\":\n for i in range(5):\n train([TestSimpleHearts, TrickTakingGame],\n None,\n {\"Test Simple Hearts\": f\"multitask_tsh_{i}\", \"Trick Taking Game\": f\"multitask_ttg_{i}\"},\n multitask=True,\n learner_name=f\"exit-{i}\")\n train([TestSimpleHearts, TrickTakingGame],\n None,\n {\"Test Simple Hearts\": f\"singletask_tsh_{i}\", \"Trick Taking Game\": f\"singletask_ttg_{i}\"},\n multitask=False,\n learner_name=f\"exit-{i}\")\n"
},
{
"alpha_fraction": 0.5621740221977234,
"alphanum_fraction": 0.568853497505188,
"avg_line_length": 41.858821868896484,
"blob_id": "105ea6a805d2b5a9f9909d916279b90fa5b43815",
"content_id": "770010bc5f528c252184672db719ce45fa3c146e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10929,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 255,
"path": "/src/agents/model_based_agent.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import math\nimport random\nimport time\nfrom collections import deque, defaultdict, Counter\nfrom typing import Tuple, List, Union\n\nimport numpy as np\nimport torch\n\nfrom agents.belief_agent import BeliefBasedAgent\nfrom agents.models.model_based_models import RewardModel, TransitionModel\nfrom agents.models.multitask_models import MultitaskRewardModel, MultitaskTransitionModel\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\naction_tensor_cache = {}\n\n\ndef mcts(executor, num_workers, belief, game, transition_model, reward_model, task_name,\n timeout: float = 0.5,\n horizon: int = 4,\n inverse_discount=1.2) -> int:\n \"\"\"\n Given models and state, outputs action\n :param executor:\n :param game:\n :param timeout:\n :param horizon:\n :param inverse_discount:\n :return:\n \"\"\"\n mcts_helper = _MCTSRunner(game, transition_model, reward_model, task_name, timeout, horizon,\n inverse_discount)\n # thread_results = executor.map(mcts_helper, [belief] * num_workers)\n thread_results = [mcts_helper(belief) for _ in range(num_workers)]\n thread_scores, thread_plays = list(map(list, zip(*thread_results)))\n # combine scores lists\n scores_counter = Counter()\n for d in thread_scores:\n scores_counter.update(d)\n scores = dict(scores_counter)\n # combine plays lists\n plays_counter = Counter()\n for d in thread_plays:\n plays_counter.update(d)\n plays = dict(plays_counter)\n # compute best move\n list_actions = list(range(game.num_cards))\n card_index = max(list_actions,\n key=lambda a: scores[(a,)] / plays[(a,)] if plays[(a,)] else -float('inf'))\n return card_index\n\n\nclass _MCTSRunner:\n \"\"\"\n Helper class for mcts()\n \"\"\"\n\n def __init__(self, game, transition_model, reward_model, task_name,\n timeout: float = 0.5,\n horizon: int = 4,\n inverse_discount=1.2):\n self._game = game\n self._transition_model = transition_model\n self._reward_model = reward_model\n self._task_name = task_name\n self._timeout = timeout\n self._horizon = horizon\n self._inverse_discount = inverse_discount\n\n def __call__(self, belief):\n return self._mcts_helper(belief)\n\n def get_transition_reward(self, current, selected_action, reward_cache, nodes, actions):\n new_node = current + (selected_action,)\n if new_node in reward_cache:\n reward = reward_cache[new_node]\n else:\n if current in nodes:\n belief = nodes[current]\n else:\n a = current[-1]\n ba = torch.cat([nodes[current[:-1]], actions[a:a + 1]], dim=1)\n belief = self._transition_model.forward(ba, self._task_name)\n nodes[current] = belief\n belief_action = torch.cat([belief, actions[selected_action:selected_action + 1]], dim=1)\n reward = self._reward_model.forward(belief_action, self._task_name)\n reward_cache[new_node] = reward\n return reward\n\n def _mcts_helper(self, belief):\n # Monte Carlo\n t0 = time.time()\n timeout = self._timeout\n horizon = self._horizon\n inverse_discount = self._inverse_discount\n start_belief = torch.FloatTensor([belief]).to(device)\n actions = torch.eye(self._game.num_cards).float().to(device)\n num_actions = self._game.num_cards\n list_actions = list(range(num_actions))\n nodes = {tuple(): start_belief}\n plays = defaultdict(int)\n reward_cache = {}\n scores = defaultdict(float)\n lowest_score = 1\n while time.time() - t0 < timeout:\n current = tuple()\n plays[current] += 1\n total_reward = 0\n\n # Selection\n while len(current) < horizon and current + (0,) in plays:\n action_values = [MonteCarloAgent.ucb(scores[current + (a,)],\n plays[current + (a,)],\n plays[current],\n lowest_score)\n for a in list_actions]\n selected_action = max(list_actions, key=lambda a: action_values[a])\n reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,\n actions)\n total_reward = inverse_discount * total_reward + reward\n current = current + (selected_action,)\n plays[current] += 1\n\n # Expansion\n if len(current) < horizon and current + (0,) not in plays:\n plays[current + (0,)] = 0\n selected_action = random.randint(0, num_actions - 1) # TODO: only expand legally\n reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,\n actions)\n total_reward = inverse_discount * total_reward + reward\n current = current + (selected_action,)\n plays[current] += 1\n final_current = current\n\n # Simulation\n while len(current) < horizon:\n selected_action = random.randint(0, num_actions - 1) # TODO: only expand legally\n reward = self.get_transition_reward(current, selected_action, reward_cache, nodes,\n actions)\n total_reward = inverse_discount * total_reward + reward\n current = current + (selected_action,)\n\n # Backpropagation\n for i in range(horizon + 1):\n scores[final_current[:i]] += total_reward.item()\n lowest_score = min(lowest_score, total_reward)\n\n # detach tensors\n return scores, plays\n\n\nclass ModelBasedAgent(BeliefBasedAgent):\n def __init__(self, game: TrickTakingGame,\n player_number: int,\n transition_model: Union[TransitionModel, MultitaskTransitionModel],\n reward_model: Union[RewardModel, MultitaskRewardModel]):\n super().__init__(game, player_number)\n self._task_name = game.name\n self._transition_model = transition_model\n self._reward_model = reward_model\n self._current_observation = None\n if self._game.num_cards not in action_tensor_cache:\n action_tensor_cache[self._game.num_cards] = torch.eye(self._game.num_cards).to(device)\n\n def observe(self, action: Tuple[int, int], observation: List[int], reward: int):\n super().observe(action, observation, reward)\n self._current_observation = observation\n\n def act(self, epsilon: float = 0) -> Card:\n if np.random.rand() <= epsilon:\n return self._game.index_to_card(random.randint(0, self._game.num_cards - 1))\n # valid_cards = self._get_hand(self._current_observation, valid_only=True)\n # return random.sample(valid_cards, 1)[0]\n\n # search\n horizon = 1\n inverse_discount = 1.1\n actions = self._game.num_cards\n nodes = deque()\n nodes.append((torch.FloatTensor([self._belief]).to(device), None, 0,\n 0)) # belief, first_action, reward, steps\n best_first_action = 0\n best_score = -float('inf')\n while len(nodes):\n belief, first_action, reward, steps = nodes.popleft()\n if steps == horizon: break\n x = torch.cat([belief.repeat(actions, 1), action_tensor_cache[actions]], dim=1)\n action_values = self._reward_model.forward(x, self._task_name)\n next_beliefs = None\n if steps < horizon - 1:\n next_beliefs = self._transition_model.forward(x, self._task_name)\n for i in range(actions):\n new_reward = inverse_discount * reward + action_values[i].item()\n if steps < horizon - 1:\n nodes.append((next_beliefs[i:i + 1],\n first_action if first_action else i,\n new_reward,\n steps + 1))\n elif steps == horizon - 1:\n if new_reward > best_score:\n best_score = new_reward\n best_first_action = i\n return self._game.index_to_card(best_first_action)\n\n\nclass MonteCarloAgent(ModelBasedAgent):\n def __init__(self, game: TrickTakingGame,\n player_number: int,\n transition_model: Union[TransitionModel, MultitaskTransitionModel],\n reward_model: Union[RewardModel, MultitaskRewardModel],\n timeout: float = 0.5,\n horizon: int = 4,\n inverse_discount=1.2):\n super().__init__(game, player_number, transition_model, reward_model)\n self._timeout = timeout\n self._horizon = horizon\n self._inverse_discount = inverse_discount\n\n @staticmethod\n def ucb(score, plays, parent_plays, lowest_score, c=1.4):\n exploitation = score / plays if plays else 0\n exploitation /= abs(lowest_score) / 5 # normalization\n exploration = c * math.sqrt(math.log(parent_plays) / plays) if plays else float('inf')\n return exploitation + exploration\n\n def get_transition_reward(self, current, selected_action, reward_cache, nodes, actions):\n new_node = current + (selected_action,)\n if new_node in reward_cache:\n reward = reward_cache[new_node]\n else:\n if current in nodes:\n belief = nodes[current]\n else:\n a = current[-1]\n ba = torch.cat([nodes[current[:-1]], actions[a:a + 1]], dim=1)\n belief = self._transition_model.forward(ba, self._task_name)\n nodes[current] = belief\n belief_action = torch.cat([belief, actions[selected_action:selected_action + 1]], dim=1)\n reward = self._reward_model.forward(belief_action, self._task_name)\n reward_cache[new_node] = reward\n return reward\n\n def act(self, epsilon: float = 0) -> Card:\n if np.random.rand() <= epsilon:\n return self._game.index_to_card(random.randint(0, self._game.num_cards - 1))\n # valid_cards = self._get_hand(self._current_observation, valid_only=True)\n # return random.sample(valid_cards, 1)[0]\n\n # Monte Carlo\n card_index = mcts(torch.multiprocessing.Pool(2), 2, self._belief, self._game,\n self._transition_model, self._reward_model, self._task_name)\n return self._game.index_to_card(card_index)\n"
},
{
"alpha_fraction": 0.4912280738353729,
"alphanum_fraction": 0.719298243522644,
"avg_line_length": 13.5,
"blob_id": "b9e389e7f8aaebd7eec4d248d14cfd80769ce6b5",
"content_id": "90e0e7b3f12e21a96e071352411f9c3fac89425a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 57,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "Flask~=1.1.2\ntensorboard\ntorch~=1.6.0+cu101\nnumpy~=1.19.2"
},
{
"alpha_fraction": 0.5776492357254028,
"alphanum_fraction": 0.5806942582130432,
"avg_line_length": 32.520408630371094,
"blob_id": "413bfe8fec5d03b6cddb68ed6342f68bdd31f9ad",
"content_id": "587d79c083962f7bf6631f90cabe71c2b311b444",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3284,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 98,
"path": "/src/environments/bidding_base.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\n# Created by Patrick Kao\nimport copy\nfrom enum import Enum\nfrom typing import List, Tuple, Dict\n\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Suit, OutOfTurnException\n\n\nclass Phase(Enum):\n BIDDING = 0\n PLAY = 1\n\n\nclass BiddingBase(TrickTakingGame):\n \"\"\"\n Oh Hell environment\n\n The first action the player gives\n\n Modifications:\n - trump suit is always spades\n - play is with a fixed # of cards instead of varying between rounds\n \"\"\"\n name = \"Bidding ADT\"\n\n def __init__(self):\n super().__init__()\n self._player_bids = None\n self._phase = None\n self._tricks_won = None\n\n def reset(self, state: List[int] = None) -> Tuple[List[int], ...]:\n self._phase = Phase.BIDDING\n self._player_bids = [None] * self.num_players\n self._tricks_won = [0] * self.num_players\n return super().reset(state)\n\n def _get_observations(self) -> Tuple[List[int], ...]:\n \"\"\"\n Same as in superclass, but with player bids and game phase appended to end of observation\n\n :return: [super observation] + [player bids: int x players] + [game phase: int]\n \"\"\"\n new_observations = super()._get_observations()\n for i in range(self.num_players):\n new_observations[i].extend(self._player_bids)\n new_observations[i].append(self._phase)\n\n return new_observations\n\n def step(self, action: Tuple[int, int]) -> Tuple[\n Tuple[List[int], ...], Tuple[int, ...], bool, Dict]:\n \"\"\"\n Execute action according to the rules defined in the docstring of TrickTakingGame.\n\n :param action: Tuple[int, int], (id, i) representing player id playing card i\n :return: Tuple of the following:\n - observation, Tuple[List[int], ...] of observations\n - reward, Tuple[int, ...] of rewards\n - done, bool that is True if the game has ended, otherwise False\n - info, Dict of diagnostic information, currently empty\n \"\"\"\n if self._phase == Phase.BIDDING:\n num_players = self.num_players\n rewards = [0 for _ in range(num_players)]\n player, bid = action\n invalid_plays = {}\n\n # check for invalid\n if player != self.next_player:\n return self._get_observations(), tuple(rewards), False, {\n \"error\": OutOfTurnException}\n\n new_player_bids = copy.deepcopy(self._player_bids)\n new_player_bids[player] = bid\n if None not in new_player_bids:\n # invalid\n if not self._valid_bids(new_player_bids):\n rewards[player] = -100\n invalid_plays[player] = \"invalid\"\n new_player_bids[player] += 1\n\n self._phase = Phase.PLAY\n\n self._player_bids = new_player_bids\n\n self._state[-1] = self._get_next_player(player)\n return self._get_observations(), tuple(rewards), False, invalid_plays\n else:\n return super().step(action)\n\n def _valid_bids(self, proposed_bids: List) -> bool:\n return True\n\n def _get_next_player(self, player):\n return (player + 1) % self.num_players"
},
{
"alpha_fraction": 0.7075209021568298,
"alphanum_fraction": 0.7103064060211182,
"avg_line_length": 22.933332443237305,
"blob_id": "3c14dfc0b107eef4fda2b6eecac0296f42312031",
"content_id": "07343506d762e223176cd40fecf57c722387f438",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 359,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 15,
"path": "/test/human_vs_random.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nfrom agents.human import Human\nfrom agents.random_agent import RandomAgent\nfrom flask_game import FlaskGame\nfrom environments.hearts import SimpleHearts\n\n\ndef test_hearts():\n game = FlaskGame.getInstance(SimpleHearts, [RandomAgent] * 3 + [Human])\n result = game.run()\n print(result)\n\n\nif __name__ == \"__main__\":\n test_hearts()\n"
},
{
"alpha_fraction": 0.5286168456077576,
"alphanum_fraction": 0.5635930299758911,
"avg_line_length": 28.952381134033203,
"blob_id": "f1fc29fef229a25fbdfce993861bd173a605b3cb",
"content_id": "e8a632fbfd6bac8707455a9372733d186361ea93",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1258,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 42,
"path": "/test/test_oh_hell.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nimport unittest\nfrom typing import Tuple\n\nfrom agents.random_agent import RandomAgent\nfrom environments.oh_hell import OhHell\nfrom game import Game\nfrom util import Suit\n\n\nclass MiniOhHell(OhHell):\n @property\n def cards_per_suit(self) -> Tuple[int, ...]:\n return 2, 2, 2, 2\n\n\nclass OhHellTest(unittest.TestCase):\n def test_game(self):\n game = Game(MiniOhHell, [RandomAgent] * 4)\n result = game.run()\n print(result)\n self.assertTrue(result is not None)\n\n def test_simple_game(self):\n game = MiniOhHell()\n state = [0, 1, 2, 3, 0, 1, 2, 3, ] # cards\n state.extend([-1 for _ in range(4)]) # in play\n state.extend([0 for _ in range(4)]) # score\n state.extend([Suit.SPADES, -1, 0]) # trump plus leading + players\n\n game.reset(state)\n\n plays = [0, 1, 0, 2,\n 0, 1, 2, 3, 5, 6, 7, 4]\n for turn in range(12):\n next_player = game.next_player\n play = plays[turn]\n observations, rewards, done, info = game.step((next_player, play))\n if turn != 11:\n self.assertTrue(rewards == tuple([0] * 4))\n else:\n self.assertEqual(rewards, (10, 11, 10, 1))\n"
},
{
"alpha_fraction": 0.6668797731399536,
"alphanum_fraction": 0.6743392944335938,
"avg_line_length": 31.143835067749023,
"blob_id": "c73263730054a07b170a189652f70fc8aadf44f4",
"content_id": "0ca7575c65a7b6118046f84d359db4db3fe72d63",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4692,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 146,
"path": "/src/environments/basic_trick.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import random\nfrom typing import Dict, List, Tuple, Union\n\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card, Suit\n\n\nclass BasicTrick(TrickTakingGame):\n\n\t'''\n\tEnvironment for simplest value-based card game. \n\n\tThis game is played with 8 cards of each suit, for a total of 32. Players receive 5 points \n\tfor every round, which can be won by playing the highest value, regardless of suit.\n\tA trump card can be played at any time and the hierarchy for the trump suit is 5, J, Highest Card of Hearts, \n\tfollowed by the remaining cards in the trump suit in the traditional numerical order. \n\n\tThe game is won by the player with the highest score once all cards have been played (AKA 5 rounds). \n\n\t'''\n\n\tname = 'Basic Trick'\n\n\n\tdef step(self, action: Tuple[int, int]) -> Tuple[\n\tTuple[List[int], ...], Tuple[int, ...], bool, Dict]:\n\t\t'''\n\t\tOverridden to add card redistribution under \"reset trick\". \n\t\t'''\n\t\tassert len(action) == 2, \"invalid action\"\n\t\tplayer, card_index = action\n\t\tassert card_index < self.num_cards, \"Trying to pick card with index higher than allowed\"\n\t\tnum_cards = self.num_cards\n\t\tnum_players = self.num_players\n\n\t\trewards = [0 for _ in range(num_players)]\n\n\t\t# Check if it is this player's turn\n\t\tif player != self.next_player:\n\t\t\treturn self._get_observations(), tuple(rewards), False, {\"error\": OutOfTurnException}\n\n\t\t# Check if the card is a valid play\n\t\tinvalid_plays = {}\n\t\tif not self.is_valid_play(player, card_index):\n\t\t\tvalid_cards = [i for i in range(num_cards) if self.is_valid_play(player, i)]\n\t\t\tif self._state[card_index] == player:\n\t\t\t\trewards[player] -= 10\n\t\t\telse:\n\t\t\t\trewards[player] -= 100\t# Huge penalty for picking an invalid card!\n\t\t\tcard_index = random.choice(valid_cards) # Choose random valid move to play\n\t\t\tinvalid_plays[player] = \"invalid\"\n\t\telse:\n\t\t\tpass\n\t\t # possible to reward player for making good choice here\n\n\t\t# Play the card\n\t\tself._state[card_index] = -1\n\t\tassert self._state[num_cards + player] == -1, \"Trying to play in a trick already played in\"\n\t\tself._state[num_cards + player] = card_index\n\t\tif self._state[-2] == -1:\n\t\t # Trick leader\n\t\t self._state[-2] = card_index\n\t\t# update next player\n\t\tself._state[-1] = (player + 1) % num_players\n\n\t\t# Check if trick completed\n\t\tplayed_cards = self._state[num_cards: num_cards + num_players]\n\t\tif -1 not in played_cards:\n\t\t # Handle rewards\n\t\t\ttrick_rewards, next_leader = self._end_trick()\n\t\t\trewards = [rewards[i] + trick_rewards[i] for i in range(num_players)]\n\t\t\tfor i in range(num_players):\n\t\t\t\toffset = num_cards + num_players # index into state correctly\n\t\t\t\tself._state[offset + i] += trick_rewards[i] # update current scores\n\n\t\t # Reset trick\n\t\t\tfor i in range(num_cards, num_cards + num_players):\n\t\t\t\tself._state[i] = -1\n\t\t\tself._state[-2] = -1\n\t\t\tself._state[-1] = next_leader\n\n\t\t# Check if game ended\n\t\tif self._game_has_ended():\n\t\t\tdone = True\n\t\t # apply score bonuses\n\t\t\tbonus_rewards = self._end_game_bonuses()\n\t\t\trewards = [rewards[i] + bonus_rewards[i] for i in range(num_players)]\n\t\t\tfor i in range(num_players):\n\t\t\t\toffset = num_cards + num_players\n\t\t\t\tself._state[offset + i] += bonus_rewards[i]\n\t\telse:\n\t\t\tdone = False\n\n\t\treturn self._get_observations(), tuple(rewards), done, invalid_plays\n\n\tdef _deal(self) -> List[int]:\n\t\t'''\n\t\tOverridden to deal five cards to each player. \n\t\t'''\n\t\tcards = []\n\t\tfor i in range(self.num_players):\n\t\t\tcards += [i for _ in range(5)] #revisit, is 5 the best number for a reduced deck?\n\t\tcards += [-1]*(self.num_cards - len(cards))\n\t\trandom.shuffle(cards)\n\t\treturn cards\n\n\tdef _end_trick(self) -> Tuple[List[int], int]:\n\n\t\twinning_player, winning_card = self._get_trick_winner()\n\t\trewards = [0 for _ in range(self.num_players)]\n\t\trewards[winning_player] = 5\n\t\treturn rewards, winning_player\n\n\tdef _get_trick_winner(self) -> Tuple[int, Card]:\n\n\t\tstarting_card = self.trick_leader\n\t\tplayed_cards = [self.index_to_card(self._state[self.num_cards + i]) for i in\n\t\t\t\t\t\trange(self.num_players)]\n\n\t\twinning_index = -1\n\t\twinning_card = None\n\t\tfor player_index, card in enumerate(played_cards):\n\t\t\tif winning_card is None:\n\t\t\t\twinning_card = card\n\t\t\t\twinning_index = player_index\n\t\t\telif card.value > winning_card.value:\n\t\t\t\twinning_index = player_index\n\t\t\t\twinning_card = card\n\t\t\telif card.value == winning_card.value:\n\t\t\t\tif card.suit.value > winning_card.suit.value:\n\t\t\t\t\twinning_index = player_index\n\t\t\t\t\twinning_card = card\n\t\t\telse:\n\t\t\t\tpass\n\n\t\treturn winning_index, winning_card\n\n\tdef is_valid_play(self, player_index, card_index) -> bool:\n\n\t\tif self._state[card_index] != player_index:\n\t\t\treturn False\n\t\treturn True\n\n\t@property\n\tdef cards_per_suit(self) -> Tuple[int, ...]:\n\t\treturn 8, 8, 8, 8"
},
{
"alpha_fraction": 0.5799086689949036,
"alphanum_fraction": 0.5898021459579468,
"avg_line_length": 21.65517234802246,
"blob_id": "44dea4d19efc2bf77a79e31ca0b676607d9bd4c1",
"content_id": "23854349f6fe9183ee101d35588ae9cafaf85fea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1314,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 58,
"path": "/src/util.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import torch\n\nfrom enum import Enum\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Suit(Enum):\n \"\"\"Card suits.\"\"\"\n NO_TRUMP = -1\n CLUBS = 0\n DIAMONDS = 1\n HEARTS = 2\n SPADES = 3\n\n\nclass Card:\n \"\"\"An immutable card.\"\"\"\n\n def __init__(self, suit: Suit, value: int):\n self._suit = suit\n self._value = value\n\n @property\n def suit(self) -> Suit:\n return self._suit\n\n @property\n def value(self) -> int:\n return self._value\n\n def __eq__(self, other):\n return self.suit == other.suit and self.value == other.value\n\n def __hash__(self):\n return hash((self.suit, self.value))\n\n def __repr__(self):\n return \"Card<{}, {}>\".format(self.suit, self.value)\n\n def __str__(self):\n return \"{} {}\".format(self.suit, self.value)\n\n\nclass OutOfTurnException(Exception):\n pass\n\n\ndef polynomial_transform(x: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Return x raised to the second order polynomial basis, used for transforming an NN input.\n :param x: tensor to transform\n :returns: transformed tensor\n \"\"\"\n n, d = x.shape\n x1 = torch.unsqueeze(torch.cat([torch.ones((n, 1)).to(device), x], dim=1), 1)\n x = torch.unsqueeze(x, 2) * x1\n return x.reshape(n, d * (d + 1))\n"
},
{
"alpha_fraction": 0.6265193223953247,
"alphanum_fraction": 0.640331506729126,
"avg_line_length": 32.51852035522461,
"blob_id": "38ed4399dc9be914f86a8db1fd53d4a0ebd2bb91",
"content_id": "3a0f4f6a687b831e184eac013b762ffe6b214971",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1810,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 54,
"path": "/src/environments/hattrick.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "from typing import List, Tuple\n\nfrom environments.twentyfive import TwentyFive\n\n\nclass HatTrick(TwentyFive):\n '''\n Environment for modified version of twenty-five, a 4-player trump trick taking game.\n\n This variation plays with 8 cards of each suit, for a total of 32. Players receive 5 points\n for every trick, which can be won by playing the highest of the suit that was led or a trump\n card.\n A trump card can be played at any time and the hierarchy for the trump suit is 5, J,\n Highest Card of Hearts,\n followed by the remaining cards in the trump suit in the traditional numerical order. Winning\n three\n tricks leads to an additional bonus of 25 points, but crossing three tricks results in a 50\n point penalty.\n\n The game consists of 5 rounds and is won by the player with the most points.\n\n '''\n\n name = 'Hat Trick'\n\n def __init__(self):\n super().__init__()\n self._tricks_played = 0\n\n def _end_trick(self) -> Tuple[List[int], int]:\n self._tricks_played += 1\n return super()._end_trick()\n\n def _game_has_ended(self) -> bool:\n return self.tricks_played == 5\n\n def _end_game_bonuses(self) -> List[int]:\n \"\"\"\n Computes additional reward assigned to each player at the end of a game.\n May be overwritten by child classes.\n :return: vector of bonus rewards for each player\n \"\"\"\n rewards = [0 for _ in range(self.num_players)]\n for player in range(self.num_players):\n if self.scores[player] == 10:\n rewards[player] += 25 # 25pt bonus\n elif self.scores[player] >= 40: # aka crossing three tricks\n rewards[player] -= 50\n\n return rewards\n\n @property\n def tricks_played(self):\n return self._tricks_played\n"
},
{
"alpha_fraction": 0.6562707424163818,
"alphanum_fraction": 0.6572660803794861,
"avg_line_length": 36.67499923706055,
"blob_id": "05bc155079232059327f0cfc5ae1368993fa1c8d",
"content_id": "0e4bcf87a0927e93a3c0c5b156b8d1e4968ce7f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3014,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 80,
"path": "/src/agents/base.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import abc\nimport multiprocessing\nimport sys\nfrom pathlib import Path\nfrom typing import List, Set, Tuple\n\nimport torch\n\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card\n\n\nclass ExecutorManager:\n executor = None\n num_threads = None\n\n @staticmethod\n def get_executor(use_thread):\n if ExecutorManager.executor is None:\n is_linux = sys.platform == \"linux\" or sys.platform == \"linux2\"\n use_thread = use_thread if use_thread is not None else is_linux\n if use_thread:\n torch.multiprocessing.set_start_method('spawn') # allow CUDA in multiprocessing\n\n num_cpus = multiprocessing.cpu_count()\n ExecutorManager.num_threads = int(num_cpus / 2) # can use more or less CPUs\n ExecutorManager.executor = multiprocessing.Pool(ExecutorManager.num_threads)\n\n return ExecutorManager.num_threads, ExecutorManager.executor\n\n\nclass Agent:\n \"\"\"Abstract base class for an AI agent that plays a trick taking game.\"\"\"\n\n def __init__(self, game: TrickTakingGame, player_number: int, executor: bool = None):\n self._game = game\n self._player = player_number\n\n @abc.abstractmethod\n def observe(self, action: Tuple[int, int], observation: List[int], reward: int):\n \"\"\"\n Handle an observation from the environment, and update any personal records/current\n belief/etc.\n\n :param action: tuple of the player who moved and the index of the card they played\n :param observation: the observation corresponding to this player as returned by the env\n :param reward: an integral reward corresponding to this player as returned by the env\n :return: None\n \"\"\"\n pass\n\n @abc.abstractmethod\n def act(self, epsilon: float = 0) -> int:\n \"\"\"\n Based on the current observation/belief/known state, select a Card to play.\n :return: the card to play\n \"\"\"\n pass\n\n def _get_hand(self, observation: List[int], valid_only: bool = False) -> Set[Card]:\n \"\"\"\n Get the hand of an agent based on an observation.\n :param observation: observation corresponding to this player as returned by the env\n :param valid_only: True if only valid card plays should be returned, False if entire hand\n should be returned\n :return: the set of cards in the player's hand\n \"\"\"\n cards = observation[:self._game.num_cards]\n return set(self._game.index_to_card(i) for i, in_hand in enumerate(cards)\n if in_hand and (not valid_only or self._game.is_valid_play(self._player, i)))\n\n\nclass Learner:\n \"\"\"Abstract base class for an AI that learns to play trick taking games.\"\"\"\n\n @abc.abstractmethod\n def __init__(self, use_thread: bool = None):\n self._use_thread = use_thread\n self.num_threads, self.executor = ExecutorManager.get_executor(use_thread)\n Path(\"models\").mkdir(parents=True, exist_ok=True)\n"
},
{
"alpha_fraction": 0.6238244771957397,
"alphanum_fraction": 0.6238244771957397,
"avg_line_length": 21.785715103149414,
"blob_id": "196133e2fff005964df2b43d0d7c6f5f86d2c406",
"content_id": "351353d40f79ed1759d06ba383b64a9cf1bd1b7d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 319,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 14,
"path": "/test/test_flask.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nfrom flask import Flask, render_template\n\napp = Flask(__name__, template_folder=\"../templates\", static_folder=\"../static\")\napp.config['DEBUG'] = True\n\n\[email protected](\"/\", methods=[\"POST\", \"GET\"])\ndef run():\n return render_template('main.html', )\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.5302850604057312,
"alphanum_fraction": 0.5383016467094421,
"avg_line_length": 43.31578826904297,
"blob_id": "cd50d80d1d4bfcd426ddc53697e9ca0bfaf8dd68",
"content_id": "3edd694611b3d5a4b4df3cd95753deb25f0e4cb4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3368,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 76,
"path": "/src/evaluators.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nimport numpy as np\nimport torch\n\nfrom agents.random_agent import RandomAgent\nfrom game import Game\n\n\ndef evaluate_random(tasks, agent_type, models, num_trials=50, compare_agent=None):\n \"\"\"\n Evaluate an agent against 3 random agents on a list of tasks\n :param tasks: list of task classes to evaluate on\n :param agent_type: class of the agent to evaluate\n :param models: dictionary accepted by Game of models for this agent\n :param num_trials: int, number of trial games to run per task\n :param compare_agent: optional other agent to compare agent_type to, default will use a random agent\n :return: (win_rate, avg_score, percent_invalid, scores)\n win_rate: percentage of time agent_type scores strictly better than compare_agent would on the same initial deal\n match_rate: percentage of time agent_type scores at least as well\n avg_score: average score that agent_type beats compare_agent by on the same initial deal\n percent_invalid: percentage of time agent_type plays an invalid card\n scores: list of score vectors\n \"\"\"\n with torch.no_grad():\n scores = []\n random_scores = []\n num_invalid = 0\n total_cards_played = 0\n for task in tasks:\n for trial_num in range(num_trials):\n # print(trial_num)\n # Evaluate agent\n game = Game(task,\n [agent_type] + [RandomAgent] * 3,\n [models, {}, {}, {}],\n # [{\"model\": model}, {}, {}, {}],\n {\"epsilon\": 0, \"verbose\": False})\n score, state = game.run()\n infos = game.get_info()\n scores.append(score)\n\n # Evaluate current agent on same starting state\n if compare_agent and not isinstance(compare_agent, RandomAgent):\n game = Game(task,\n [compare_agent] + [RandomAgent] * 3,\n [models, {}, {}, {}],\n # [{\"model\": model}, {}, {}, {}],\n {\"epsilon\": 0, \"verbose\": False})\n else:\n game = Game(task, [RandomAgent] * 4, [{}] * 4, {\"epsilon\": 0, \"verbose\": False})\n random_score, _ = game.run(state)\n random_scores.append(random_score)\n\n for info in infos:\n if 0 in info and info[0] == \"invalid\":\n num_invalid += 1\n constant_game = task()\n total_cards_played += constant_game.num_cards / constant_game.num_players * num_trials\n\n wins = 0\n matches = 0\n for i, score in enumerate(scores):\n if score[0] > random_scores[i][0]:\n wins += 1\n matches += 1\n elif score[0] >= random_scores[i][0]:\n matches += 1\n # record.append(True if np.argmax(score) == 0 else False)\n winrate = wins / len(scores)\n matchrate = matches / len(scores)\n avg_score_margin = (np.asarray(scores)[:, 0] - np.asarray(random_scores)[:, 0]).mean()\n\n # calculate invalid\n percent_invalid = num_invalid / total_cards_played\n\n return winrate, matchrate, avg_score_margin, percent_invalid, scores\n"
},
{
"alpha_fraction": 0.688811182975769,
"alphanum_fraction": 0.690559446811676,
"avg_line_length": 30.77777862548828,
"blob_id": "b912fe4f3e5663cb3ecb355795107d94e8179a59",
"content_id": "5c54c38bf3782dadd5e39d7e3ce217752fd79b40",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 572,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 18,
"path": "/src/agents/human.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nfrom typing import Tuple, List\n\nfrom agents.base import Agent\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card\n\n\nclass Human(Agent):\n def __init__(self, game: TrickTakingGame, player_number: int):\n super().__init__(game, player_number)\n self._current_observation = None\n\n def observe(self, action: Tuple[int, int], observation: List[int], reward: int):\n self._current_observation = observation\n\n def act(self, epsilon: float = 0) -> Card:\n raise ValueError(\"Human agents can't act\")\n"
},
{
"alpha_fraction": 0.5795373916625977,
"alphanum_fraction": 0.5911021828651428,
"avg_line_length": 39.35238265991211,
"blob_id": "d307b21299a4353c3fcde55773b7bad6618c43c2",
"content_id": "d17502198c27f7bb94cdcc029d29c646cb0bbf9b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8474,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 210,
"path": "/src/agents/dqn_agent.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import itertools\nimport multiprocessing\nimport random\nfrom collections import deque\nfrom concurrent import futures\nfrom datetime import datetime\nfrom typing import List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch import nn\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom agents.base import Learner\nfrom agents.belief_agent import BeliefBasedAgent\nfrom environments.test_hearts import TestSimpleHearts\nfrom environments.trick_taking_game import TrickTakingGame\nfrom evaluators import evaluate_random\nfrom game import Game\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass DQN(nn.Module):\n def __init__(self, observation_size, action_size, H1=200, H2=160, H3=120, H4=60):\n \"\"\"\n\n :param observation_size: Size of belief as defined in belief_agent.py\n :param action_size: Model has 1 output for every single possible card in the deck.\n :param H1: size of hidden layer 1\n :param H2: size of hidden layer 2\n \"\"\"\n super().__init__()\n self.fc1 = torch.nn.Linear(observation_size, H1)\n self.fc2 = torch.nn.Linear(H1, H2)\n self.fc3 = torch.nn.Linear(H2, H3)\n self.fc4 = torch.nn.Linear(H3, H4)\n self.fc5 = torch.nn.Linear(H4, action_size)\n\n def forward(self, observation):\n '''\n Maps observation to action values.\n '''\n x = F.relu(self.fc1(observation))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n return self.fc5(x)\n\n\nclass DQNAgent(BeliefBasedAgent):\n \"\"\"\n DQN agent\n\n A set of cards is represented by a one-hot vector with length equal to number of cards in the\n game. The vector has a one if the card is present in the set and 0 otherwise\n \"\"\"\n\n def __init__(self, game: TrickTakingGame, player_number: int, model: DQN):\n super().__init__(game, player_number)\n self._current_observation = None\n self.model = model\n\n def act(self, epsilon: float = 0):\n if np.random.rand() <= epsilon:\n return self._game.index_to_card(random.randint(0, self._game.num_cards - 1))\n\n # reformat observation into following format: hand +\n action_values = self.model.forward(torch.FloatTensor(self._belief).to(device))\n chosen_card = torch.argmax(action_values).item()\n return self._game.index_to_card(chosen_card)\n\n def observe(self, action: Tuple[int, int], observation: List[int], reward: int):\n super().observe(action, observation, reward)\n self._current_observation = observation\n\n\nclass GameRunner:\n def __init__(self, task, agent_params, game_params):\n self.task = task\n self.agent_params = agent_params\n self.game_params = game_params\n\n def __call__(self, game_num):\n # print(f\"Running game {game_num}\")\n game = Game(self.task, [DQNAgent] * 4, self.agent_params, self.game_params)\n result = game.run()\n barbs = game.get_barbs()\n return barbs\n\n\ndef calculate_action_observation_size(game):\n # calculate parameter sizes\n constant_game = game()\n cards_per_suit = constant_game.cards_per_suit[0]\n num_cards = constant_game.num_cards\n return num_cards, num_cards*2\n\nclass DQNLearner(Learner):\n\n def __init__(self, resume_state=None):\n super().__init__(use_thread=True)\n self.action_size, self.observation_size = calculate_action_observation_size(TestSimpleHearts)\n \"\"\" + len(\n constant_game.cards_per_suit) + constant_game.num_players\"\"\"\n self.memory = deque(maxlen=4000) # modification to dqn to preserve recent only\n self.gamma = 0.1 # discount rate\n self.epsilon = 1.0 # exploration rate, percent time to be epsilon greedy\n self.epsilon_min = 0.1 # min exploration\n self.epsilon_decay = 0.995 # to decrease exploration rate over time\n self.learning_rate = 5E-4\n\n # training hyperparams\n self.num_epochs = 5000\n self.games_per_epoch = 100\n self.batch_size = 1000\n self.num_batches = 5\n\n # Init agents and trainers\n self.model = DQN(self.observation_size, self.action_size).to(device)\n if resume_state is not None:\n self.model.load_state_dict(resume_state)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n self.evaluate_every = 50 # number of epochs to evaluate between\n self.step = 0\n self.save_every = 1001 # save model every x iterations\n self.save_base_path = \"saved_models/agent\"\n\n self.writer = SummaryWriter(f\"runs/dqn {datetime.now().strftime('%d/%m/%Y %H:%M:%S')}\")\n # TODO: add network graph to tensborboard\n torch.multiprocessing.set_start_method('spawn') # allow CUDA in multiprocessing\n\n def train(self, tasks: List[TrickTakingGame.__class__]) -> nn.Module:\n for task in tasks:\n for epoch in range(self.num_epochs):\n # collect experiences\n print(f\"Starting epoch {epoch}/{self.num_epochs}\")\n specific_game_func = GameRunner(task,\n [{\"model\": self.model} for _ in\n range(4)], {\"epsilon\": self.epsilon,\n \"verbose\": False})\n barb_futures = self.executor.map(specific_game_func, range(self.games_per_epoch),\n chunksize=2)\n # wait for completion\n barbs = list(barb_futures)\n barbs = list(itertools.chain.from_iterable(barbs))\n self.memorize(barbs)\n # update policy\n losses = []\n for _ in range(self.num_batches):\n loss = self.replay(self.batch_size).item()\n losses.append(loss)\n\n self.writer.add_scalar(\"avg_training_loss\", np.mean(losses), epoch)\n\n # evaluate\n if (epoch + 1) % self.evaluate_every == 0:\n winrate, avg_score, invalid_percent, scores = evaluate_random(DQNAgent,\n self.model,\n num_trials=25)\n self.writer.add_scalar(\"eval_winrate\", winrate, epoch)\n self.writer.add_scalar(\"eval_score\", avg_score, epoch)\n self.writer.add_scalar(\"invalid_percentage\", invalid_percent, epoch)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n self.writer.add_scalar(\"epsilon\", self.epsilon, epoch)\n\n if (epoch+1) % self.save_every == 0:\n # save model\n torch.save(self.model.state_dict(), f\"{self.save_base_path}_{epoch}.pt\")\n\n return self.model\n\n def memorize(self, barbs):\n \"\"\"\n Keep memory as array with size num samples x 4 (b, a, r, b) x varies\n :param barbs:\n :return:\n \"\"\"\n self.memory.extend(barbs)\n\n def replay(self, batch_size):\n batch = random.sample(self.memory, batch_size)\n criterion = torch.nn.MSELoss() # tutorial uses torch.nn.SmoothL1Loss\n\n # TODO: what about terminal behavior?\n # Batch data correctly\n batch = np.asarray(batch)\n belief = np.vstack(batch[:, 0])\n action = np.vstack(batch[:, 1])\n reward = np.vstack(batch[:, 2])\n next_belief = np.vstack(batch[:, 3])\n\n belief, next_belief = torch.from_numpy(belief).type(torch.FloatTensor).to(device), \\\n torch.from_numpy(next_belief).type(torch.FloatTensor).to(device)\n # Linear expects dims batch size x feature size (feat size is observation size here)\n target = torch.from_numpy(reward).to(device) + self.gamma * torch.max(\n self.model.forward(next_belief), dim=1, keepdim=True)[0]\n pred = self.model.forward(belief)\n pred = torch.gather(pred, 1, torch.from_numpy(action).to(device)).to(\n device) # convert prediction\n loss = criterion(pred, target).to(device)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n return loss\n"
},
{
"alpha_fraction": 0.655339777469635,
"alphanum_fraction": 0.655339777469635,
"avg_line_length": 17.727272033691406,
"blob_id": "4309589de5c8051908e2cb22fbcd2765f1274ad7",
"content_id": "18adce111a7247ea4fc6e69018273aa23401ecbc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 11,
"path": "/src/main.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "from environments.hearts import SimpleHearts\nfrom game import Game\n\n\ndef test_hearts():\n game = Game(SimpleHearts)\n result = game.run()\n print(result)\n\nif __name__ == \"__main__\":\n test_hearts()\n"
},
{
"alpha_fraction": 0.7014084458351135,
"alphanum_fraction": 0.7042253613471985,
"avg_line_length": 24.35714340209961,
"blob_id": "33986e96722c881358de8899cb927d8f51076d03",
"content_id": "3f4bc5d50529f94ae9f3e41845cf8d80d846d7c3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 355,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 14,
"path": "/test/test_hattrick.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nimport unittest\n\nfrom agents.random_agent import RandomAgent\nfrom environments.hattrick import HatTrick\nfrom game import Game\n\n\nclass HatTrickTest(unittest.TestCase):\n def test_game(self):\n game = Game(HatTrick, [RandomAgent] * 4)\n result = game.run()\n print(result)\n self.assertTrue(result is not None)\n"
},
{
"alpha_fraction": 0.6225975751876831,
"alphanum_fraction": 0.6389050483703613,
"avg_line_length": 38.930233001708984,
"blob_id": "c830a893a99ad02ab312257df494118e794f0b6b",
"content_id": "9fb6b112b3e1f0287df760f962349fca32ab2b83",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1717,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 43,
"path": "/src/environments/test_hearts.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "from typing import List, Tuple\n\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card, Suit\n\n\nclass TestSimpleHearts(TrickTakingGame):\n \"\"\"\n Environment for the simplified game of Hearts, a 4-player no-trump trick taking game.\n\n The variation plays with 8 cards of each suit, for a total of 32.\n The player receives a negative point for each heart card they take, as well as -7 points for taking SPADES 5.\n If a player takes all hearts as well as SPADES 5, they \"shoot the moon\", which means that instead of getting\n -15 points, they receive 0 and everyone else takes -15 points.\n\n The rules for passing cards do not apply in this variant, for simplicity.\n \"\"\"\n name = \"Test Simple Hearts\"\n\n def _get_trump_suit(self) -> int:\n return -1\n\n def _end_game_bonuses(self) -> List[int]:\n scores = self.scores\n loser = min(range(self.num_players), key=lambda i: scores[i])\n if scores[loser] == -13:\n return [13 if i == loser else -13 for i in range(self.num_players)]\n return [0 for _ in range(self.num_players)]\n\n def _end_trick(self) -> Tuple[List[int], int]:\n winning_player, winning_card = self._get_trick_winner()\n rewards = [0 for _ in range(self.num_players)]\n for i in range(self.num_cards, self.num_cards + self.num_players):\n card = self.index_to_card(self._state[i])\n if card.suit == Suit.HEARTS:\n rewards[winning_player] -= 1\n if card == Card(Suit.SPADES, 4):\n rewards[winning_player] -= 7\n return rewards, winning_player\n\n @property\n def cards_per_suit(self) -> Tuple[int, ...]:\n return 6, 6, 6, 6\n"
},
{
"alpha_fraction": 0.5829934477806091,
"alphanum_fraction": 0.5871413946151733,
"avg_line_length": 48.200679779052734,
"blob_id": "bed22745fbb9028c7d7f454ff4127fe913641831",
"content_id": "c8beec70fa0e25629818667049d79c85d2b75d88",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14465,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 294,
"path": "/src/agents/expert_iteration_learner.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import itertools\nimport multiprocessing\nfrom datetime import datetime\nfrom typing import Any, Dict, List, Tuple\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom agents.base import Learner\nfrom agents.belief_agent import BeliefBasedAgent\nfrom agents.expert_iteration_agent import ExpertIterationAgent, mcts\nfrom agents.models.model_based_models import RewardModel, TransitionModel, ApprenticeModel\nfrom agents.models.multitask_models import MultitaskRewardModel, MultitaskTransitionModel, MultitaskApprenticeModel\nfrom environments.trick_taking_game import TrickTakingGame\nfrom evaluators import evaluate_random\nfrom game import Game\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass ModelGameRunner:\n def __init__(self, agent_type, task, agent_params, game_params):\n self.agent_type = agent_type\n self.task = task\n self.agent_params = agent_params\n self.game_params = game_params\n\n def __call__(self, game_num):\n # print(f\"Running game {game_num}\")\n game = Game(self.task, [self.agent_type] * 4, self.agent_params, self.game_params)\n result = game.run()\n barbs = game.get_barbs()\n return barbs\n\n\nclass ExpertIterationLearner(Learner):\n def __init__(self, agent: ExpertIterationAgent.__class__ = ExpertIterationAgent, multitask: bool = False,\n resume_model: Dict[str, Dict[str, Dict[str, Any]]] = None,\n model_names: Dict[str, str] = None, learner_name: str = \"MBL\"):\n \"\"\"\n :param agent: either ModelBasedAgent or a subclass to use\n :param multitask: whether to use multitask learning or not\n :param resume_model: maps \"transition\" or \"reward\" to dictionaries:\n map task names to dictionaries:\n \"state\": model state dict\n \"params\": list of parameters to pass into model constructor\n :param model_names: maps task names to names to save model as\n :param learner_name: name of the trial for tensorboard records\n \"\"\"\n super().__init__(use_thread=True) # TODO: Support no threading\n self._agent_type = agent\n self._model_names = model_names\n if multitask:\n self._transition_model = MultitaskTransitionModel().to(device)\n self._reward_model = MultitaskRewardModel().to(device)\n self._apprentice_model = MultitaskApprenticeModel().to(device)\n else:\n self._transition_model = TransitionModel().to(device)\n self._reward_model = RewardModel().to(device)\n self._apprentice_model = ApprenticeModel().to(device)\n self._transition_optimizer = None\n self._reward_optimizer = None\n self._apprentice_optimizer = None\n\n # Load existing\n if resume_model is not None:\n for key, item in resume_model[\"transition\"].items():\n self._transition_model.make_model(item[\"task\"])\n self._transition_model.models[key].load_state_dict(item[\"state\"])\n for key, item in resume_model[\"reward\"].items():\n self._reward_model.make_model(item[\"task\"])\n self._reward_model.models[key].load_state_dict(item[\"state\"])\n for key, item in resume_model[\"apprentice\"].items():\n self._apprentice_model.make_model(item[\"task\"])\n self._apprentice_model.models[key].load_state_dict(item[\"state\"])\n\n # Hyperparameters\n self._num_epochs = 2000\n self._games_per_epoch = 20\n self._batch_size = 112\n\n self._simulations_per_epoch = 2\n self._max_simulations = 50\n self._simulations_delta = 0.3\n\n self.epsilon = 1.0 # exploration rate, percent time to be epsilon greedy\n self.epsilon_min = 0.1 # min exploration\n self.epsilon_decay = 0.999 # to decrease exploration rate over time\n\n self._reward_lr = 1e-4\n self._transition_lr = 1e-4\n self._apprentice_lr = 1e-4\n\n self.writer = SummaryWriter(\n f\"runs/{learner_name}-{datetime.now().strftime('%d-%m-%Y-%H-%M-%S')}\")\n self.evaluate_every = 50\n\n def train(self, tasks: List[TrickTakingGame.__class__]):\n for task in tasks:\n self._setup_single_task(task)\n self._transition_optimizer = optim.Adam(self._transition_model.get_parameters(),\n lr=self._transition_lr)\n self._reward_optimizer = optim.Adam(self._reward_model.get_parameters(), lr=self._reward_lr)\n self._apprentice_optimizer = optim.Adam(self._apprentice_model.get_parameters(), lr=self._apprentice_lr)\n\n for epoch in range(self._num_epochs):\n transition_losses, reward_losses, apprentice_losses = [], [], []\n for task in tasks:\n transition_loss, reward_loss, apprentice_loss = self._train_single_task(task, epoch)\n transition_losses.append(transition_loss)\n reward_losses.append(reward_loss)\n apprentice_losses.append(apprentice_loss)\n\n self.writer.add_scalar(\"avg_training_transition_loss\", np.mean(transition_losses),\n epoch)\n self.writer.add_scalar(\"avg_training_reward_loss\", np.mean(reward_losses), epoch)\n self.writer.add_scalar(\"avg_training_apprentice_loss\", np.mean(apprentice_losses),\n epoch)\n\n if epoch % self.evaluate_every == 0:\n winrate, matchrate, avg_score, invalid, scores = evaluate_random(tasks,\n self._agent_type,\n {\"apprentice_model\":\n self._apprentice_model},\n num_trials=50,\n compare_agent=None) # MonteCarloAgent)\n print(\"Done EVAL\")\n self.writer.add_scalar(\"eval_winrate\", winrate, epoch)\n self.writer.add_scalar(\"eval_matchrate\", matchrate, epoch)\n self.writer.add_scalar(\"eval_score_margin\", avg_score, epoch)\n self.writer.add_scalar(\"invalid_percentage\", invalid, epoch)\n\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n self.writer.add_scalar(\"epsilon\", self.epsilon, epoch)\n\n if self._simulations_per_epoch < self._max_simulations:\n self._simulations_per_epoch += self._simulations_delta\n\n for task in tasks:\n torch.save(self._transition_model.models[task.name].state_dict(),\n \"models/transition_model_{}.pt\".format(self._model_names[task.name]))\n torch.save(self._reward_model.models[task.name].state_dict(),\n \"models/reward_model_{}.pt\".format(self._model_names[task.name]))\n torch.save(self._apprentice_model.models[task.name].state_dict(),\n \"models/apprentice_model_{}.pt\".format(self._model_names[task.name]))\n\n def _setup_single_task(self, task: TrickTakingGame.__class__):\n \"\"\"\n Setup models and such for a task.\n :param task: class of task to setup\n :return: None\n \"\"\"\n if task.name not in self._transition_model.models:\n self._transition_model.make_model(task)\n if task.name not in self._reward_model.models:\n self._reward_model.make_model(task)\n if task.name not in self._apprentice_model.models:\n self._apprentice_model.make_model(task)\n\n def _train_single_task(self, task: TrickTakingGame.__class__, epoch: int) -> Tuple[\n np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Train a task for a single epoch.\n :param task: class of task to train\n :param epoch: the index of the current epoch\n :return: (average transition loss, average reward loss, average apprentice loss)\n \"\"\"\n if epoch % 1 == 0:\n print(f\"Starting epoch {epoch}/{self._num_epochs} for {task.name}\")\n\n experiences = self._agent_evaluation(task)\n transition_loss, reward_loss = self._train_world_models(task, experiences)\n apprentice_loss = self._train_agent_policy(task)\n\n if (epoch + 1) % 200 == 0:\n torch.save(self._transition_model.models[task.name].state_dict(),\n \"models/transition_model_temp_{}.pt\".format(self._model_names[task.name]))\n torch.save(self._reward_model.models[task.name].state_dict(),\n \"models/reward_model_temp_{}.pt\".format(self._model_names[task.name]))\n torch.save(self._apprentice_model.models[task.name].state_dict(),\n \"models/apprentice_model_{}.pt\".format(self._model_names[task.name]))\n\n return np.mean(transition_loss), np.mean(reward_loss), np.mean(apprentice_loss)\n\n def _agent_evaluation(self, task: TrickTakingGame.__class__) -> List[\n Tuple[List[int], int, int, List[int]]]:\n \"\"\"\n Collect (b, a, r, b') experiences from playing self._games_per_epoch games against itself\n :param task: the class of the game to play\n :return: list of (b, a, r, b') experiences\n \"\"\"\n specific_game_func = ModelGameRunner(self._agent_type, task,\n [{\"apprentice_model\": self._apprentice_model}\n for _ in range(task().num_players)],\n {\"epsilon\": self.epsilon, \"verbose\": False})\n\n barbs= self.executor.map(specific_game_func, range(self._games_per_epoch))\n # barbs = [specific_game_func(i) for i in range(self._games_per_epoch)]\n # wait for completion\n barbs = list(itertools.chain.from_iterable(barbs))\n return barbs\n\n def _train_world_models(self, task: TrickTakingGame.__class__,\n experiences: List[Tuple[List[int], int, int, List[int]]]) -> Tuple[\n np.ndarray, np.ndarray]:\n \"\"\"\n Train the transition and reward models on the experiences\n :param task: the class of the game to train models for\n :param experiences: list of (b, a, r, b') experiences as returned by _agent_evaluation\n :return: (transition loss mean, reward loss mean)\n \"\"\"\n\n transition_losses, reward_losses = [], []\n\n # Construct belief_action input matrices\n experience_array = np.asarray(experiences, dtype=object)\n sample_task = task()\n belief_size = BeliefBasedAgent(sample_task, 0).get_belief_size()\n belief_actions = np.pad(np.vstack(experience_array[:, 0]), (0, task().num_cards),\n 'constant')\n actions = experience_array[:, 1].astype(np.int)\n indices = np.arange(len(experiences))\n belief_actions[indices, actions + belief_size] = 1\n\n rewards = np.vstack(experience_array[:, 2])\n next_beliefs = np.vstack(experience_array[:, 3])\n\n # Shuffle data\n np.random.shuffle(indices)\n belief_actions = torch.from_numpy(belief_actions[indices]).float().to(device)\n rewards = torch.from_numpy(rewards[indices]).float().to(device)\n next_beliefs = torch.from_numpy(next_beliefs[indices]).float().to(device)\n\n # Train\n for model, optimizer, targets, losses in (\n (self._transition_model, self._transition_optimizer, next_beliefs,\n transition_losses),\n (self._reward_model, self._reward_optimizer, rewards, reward_losses),\n ):\n for i in range(0, len(experiences), self._batch_size):\n x = belief_actions[i: i + self._batch_size]\n pred = model.forward(x, task.name)\n y = targets[i: i + self._batch_size]\n loss = model.loss(pred, y)\n losses.append(loss.item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return np.mean(transition_losses), np.mean(reward_losses)\n\n def _train_agent_policy(self, task: TrickTakingGame.__class__) -> np.ndarray:\n \"\"\"\n Train the apprentice model for one epoch for this task.\n\n - using the world model, generate trajectories with the apprentice policy (s, pi(s))\n - compute expert action for each visited state s -> a\n - backpropagate on cross entropy loss between a, pi(s)\n\n :param task: the class of the game to train for\n :return: apprentice loss mean\n \"\"\"\n game_instance = task()\n horizon = 3 #game_instance.num_cards // game_instance.num_players\n belief_agent = BeliefBasedAgent(game_instance, 0)\n\n losses = []\n for simulation_num in range(int(self._simulations_per_epoch)):\n predicted = []\n expert_actions = []\n belief = belief_agent.update_belief(game_instance.reset()[0])\n belief = torch.FloatTensor([belief]).to(device)\n for _ in range(horizon):\n action_values = self._apprentice_model.forward(belief.detach(), task.name)\n predicted.append(action_values)\n expert_actions.append(mcts(self.executor, self.num_threads, belief, game_instance,\n self._transition_model, self._reward_model, task.name, timeout=0.15))\n action = torch.zeros((1, game_instance.num_cards)).float().to(device)\n action[0, torch.argmax(action_values)] = 1\n belief_action = torch.cat([belief.detach(), action], dim=1)\n belief = self._transition_model.forward(belief_action, task.name)\n\n predicted = torch.cat(predicted)\n expert_actions = torch.LongTensor(expert_actions).to(device)\n loss = self._apprentice_model.loss(predicted, expert_actions)\n losses.append(loss.item())\n self._apprentice_optimizer.zero_grad()\n loss.backward()\n self._apprentice_optimizer.step()\n\n return np.mean(losses)\n"
},
{
"alpha_fraction": 0.6840277910232544,
"alphanum_fraction": 0.6875,
"avg_line_length": 19.571428298950195,
"blob_id": "1a5d789b7424d1b01e171aaa64070ae41496122b",
"content_id": "16a14b6835c5711fa59662c9a1717a6225231b76",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 288,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 14,
"path": "/test/four_random.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nfrom agents.random_agent import RandomAgent\nfrom environments.hearts import SimpleHearts\nfrom game import Game\n\n\ndef test_hearts():\n game = Game(SimpleHearts, [RandomAgent]*4)\n result = game.run()\n print(result)\n\n\nif __name__ == \"__main__\":\n test_hearts()\n"
},
{
"alpha_fraction": 0.5662732720375061,
"alphanum_fraction": 0.573989987373352,
"avg_line_length": 37.64912414550781,
"blob_id": "1a820b620e5e98ccc06f988f5d53ff2228915aeb",
"content_id": "485306a1230110fd19bf2af5abdbc46e3a6844e8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4406,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 114,
"path": "/src/environments/twentyfive.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import random\nfrom typing import Dict, List, Tuple\n\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card, Suit, OutOfTurnException\n\n\nclass TwentyFive(TrickTakingGame):\n '''\n Environment for twenty-five, a 4-player trump trick taking game.\n\n This variation plays with 8 cards of each suit, for a total of 32. Players receive 5 points\n for every trick, which can be won by playing the highest of the suit that was led or a trump\n card.\n A trump card can be played at any time and the hierarchy for the trump suit is 5, J,\n Highest Card of Hearts,\n followed by the remaining cards in the trump suit in the traditional numerical order.\n\n The game is won when a player earns 25 points -- AKA collects 5 tricks.\n\n Variations on normal game:\n - No ante(win at 25 points)\n - No robbing the pack\n - No jinking (nonstandard anyway)\n - No reneging\n - No strange ranking based on suit\n '''\n\n name = 'Twenty-Five'\n\n # TODO: randomize trump suit at beginning of each game\n def _get_trump_suit(self) -> Suit:\n return Suit.SPADES\n\n def _deal(self) -> List[int]:\n '''\n Overridden to deal five cards to each player.\n '''\n cards = []\n for i in range(self.num_players):\n cards += [i for _ in range(5)]\n cards += [-1] * (self.num_cards - len(cards))\n random.shuffle(cards)\n return cards\n\n def _end_trick(self) -> Tuple[List[int], int]:\n '''\n Updated reward to match twenty-five scoring rules.\n '''\n winning_player, winning_card = self._get_trick_winner()\n rewards = [0 for _ in range(self.num_players)]\n rewards[winning_player] = 5\n return rewards, winning_player\n\n def _get_trick_winner(self) -> Tuple[int, Card]:\n\n trump_suit = self.trump_suit\n starting_card = self.trick_leader\n played_cards = [self.index_to_card(self._state[self.num_cards + i]) for i in\n range(self.num_players)]\n\n winning_index = -1\n winning_card = starting_card\n trump_played = False\n for player_index, card in enumerate(played_cards):\n if trump_played and card.suit == trump_suit:\n # special trump cases\n if (card.value == 5) or (card.value == 11 and winning_card.value != 5) or \\\n (card == Card(Suit.HEARTS,\n self._num_cards - 1) and winning_card.value not in {5, 11}):\n winning_index = player_index\n winning_card = card\n\n elif card.value >= winning_card.value:\n winning_index = player_index\n winning_card = card\n\n elif not trump_played and (\n (card.suit == winning_card.suit and card.value >= winning_card.value) or \\\n (card.suit == trump_suit and winning_card.suit != trump_suit) or (\n card == Card(Suit.HEARTS, self._num_cards - 1))):\n winning_index = player_index\n winning_card = card\n if card.suit == trump_suit or card == Card(Suit.HEARTS, self._num_cards - 1):\n trump_played = True\n return winning_index, winning_card\n\n def is_valid_play(self, player_index, card_index) -> bool:\n\n if self._state[card_index] != player_index:\n return False\n\n # Check if player is empty of the starting suit if different suit was played\n played_card = self.index_to_card(card_index)\n starting_card = self.trick_leader\n\n if starting_card is None:\n return True\n\n # always allow trump suit\n if played_card.suit != starting_card.suit and played_card.suit != self.trump_suit:\n for i in range(self.num_cards):\n card_in_hand = self.index_to_card(i)\n if self._state[i] == player_index and \\\n (card_in_hand.suit == starting_card.suit):\n # TODO: allow reneging\n # or (card_in_hand.suit == self.trump_suit and card_in_hand.value not in {5, 11}) or \\\n # card_in_hand == Card(Suit.HEARTS, self._num_cards-1)):\n return False\n return True\n\n @property\n def cards_per_suit(self) -> Tuple[int, ...]:\n return 8, 8, 8, 8\n"
},
{
"alpha_fraction": 0.5785296559333801,
"alphanum_fraction": 0.5854914784431458,
"avg_line_length": 42.53333282470703,
"blob_id": "fd6816dca52d04c424a05c8e563fe8a42e018a05",
"content_id": "75f6e47abe588c42f0f594bf1fe0f702c9854c61",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7182,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 165,
"path": "/src/agents/belief_agent.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "import abc\nfrom typing import List, Set, Tuple, Union\n\nfrom agents.base import Agent\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card, Suit\n\n\nclass BeliefBasedAgent(Agent):\n \"\"\"\n Abstract base class for an AI agent that plays a trick taking game, and\n that maintains a belief of the state instead of just the current observation.\n \"\"\"\n\n def __init__(self, game: TrickTakingGame, player_number: int):\n super().__init__(game, player_number)\n self._last_belief = None\n self._last_action = None\n self._last_reward = None\n self._running_reward = 0\n self._running_action = None\n self._belief = None\n self._player_belief = None\n self._player_action = None\n\n def observe(self, action: Tuple[int, int], observation: List[int], reward: int):\n \"\"\"\n Handle an observation from the environment, and update any personal records/current belief/etc.\n\n :param action: tuple of the player who moved and the index of the card they played\n :param observation: the observation corresponding to this player as returned by the env\n :param reward: an integral reward corresponding to this player as returned by the env\n :return: None\n \"\"\"\n if action[0] == self._player:\n self._last_belief = self._player_belief\n self._last_action = self._player_action\n self._last_reward = self._running_reward\n self._running_reward = 0\n self._player_belief = self._belief\n self._player_action = action\n self._running_action = action\n self._running_reward += reward if reward else 0\n self._belief = self.update_belief(observation)\n\n def barb(self) -> Union[None, Tuple[List[int], int, int, List[int]]]:\n \"\"\"\n Return an experience if one exists.\n :return: (old belief, action, reward, new belief) experience if the last action taken was by this agent,\n else None\n \"\"\"\n if self._running_action[0] == self._player:\n try:\n return self._last_belief[:], self._last_action[1], self._last_reward, self._player_belief[:]\n except TypeError:\n return None\n return None\n\n @abc.abstractmethod\n def act(self, epsilon: float = 0) -> Card:\n \"\"\"\n Based on the current observation/belief/known state, select a Card to play.\n :return: the card to play\n \"\"\"\n pass\n\n def get_belief_size(self) -> int:\n \"\"\"\n :return: the number of elements in the belief\n \"\"\"\n # return 4 * self._game.num_cards + self._game.num_players\n return self._game.num_cards * 4 + self._game.num_players + len(self._game.cards_per_suit)\n\n def update_belief(self, observation: List[int]) -> List[int]:\n \"\"\"\n Updates the current belief based on the observation\n Should NOT mutate the current belief, the belief is reassigned in self.observe(...)\n\n Invariant: self._belief should only ever be set to the output of _update_belief\n\n :param observation: a given observation, returned from the env\n :return: an updated belief that takes into account all information to summarize the agent's knowledge, suitable\n for input into a NN\n \"\"\"\n num_cards, num_players = self._game.num_cards, self._game.num_players\n\n # # valid cards in hand (BINARY) +\n # # trump cards in hand (BINARY) +\n # # cards discarded (BINARY) +\n # # cards in play (BINARY) +\n # # my score (LINEAR) + everyone else's score (LINEAR)\n # # = belief(4 * num_cards + num_players)\n #\n # # Valid cards\n # belief = [1 if observation[i] == 1 and self._game.is_valid_play(self._player, i) else 0\n # for i in range(num_cards)]\n #\n # # Trump cards\n # if observation[-3] != -1:\n # trump_suit = self._game.trump_suit\n # total = 0\n # for suit_index, suit_cards in enumerate(self._game.cards_per_suit):\n # if Suit(suit_index) == trump_suit:\n # belief.extend([1 if observation[total + i] else 0 for i in range(suit_cards)])\n # else:\n # belief.extend([0 for _ in range(suit_cards)])\n # total += suit_cards\n # else:\n # belief.extend([0 for _ in range(num_cards)])\n # assert len(belief) == 2 * num_cards, len(belief)\n #\n # # Cards discarded\n # belief.extend([1 if observation[i] == -1 else 0 for i in range(num_cards)])\n #\n # # Cards in play\n # belief.extend([0 for _ in range(num_cards)])\n # for card_index in observation[num_cards: num_cards + num_players]:\n # if card_index != -1:\n # belief[card_index - num_cards] = 1\n #\n # # Own score\n # belief.append(observation[num_cards + num_players + self._player])\n # # Other scores\n # belief.extend([observation[num_cards + num_players + i] for i in range(num_players) if i != self._player])\n\n # cards in hand (BINARY) + cards discarded (BINARY)\n # + cards in play (BINARY) [maybe 1-HOT instead? but more variables]\n # + trick leader (1-HOT or all zeros) + trump suit (1-HOT)\n # + score (LINEAR)\n belief = [0 for _ in range(num_cards * 4 + len(self._game.cards_per_suit))]\n\n # Cards in hand / discarded\n for card_index in range(num_cards):\n if observation[card_index] == 1:\n belief[card_index] = 1\n elif observation[card_index] == -1:\n belief[num_cards + card_index] = 1\n # Cards in play\n for card_index in observation[num_cards: num_cards + num_players]:\n if card_index != -1:\n belief[2 * num_cards + card_index] = 1\n # Trick leader\n if observation[-2] != -1:\n belief[3 * num_cards + observation[-2]] = 1\n # Trump suit\n if observation[-3] != -1:\n belief[4 * num_cards + observation[-3]] = 1\n # # Scores\n # belief.extend(observation[num_cards + num_players: num_cards + 2 * num_players])\n # Own score\n belief.append(observation[num_cards + num_players + self._player])\n # Other scores\n belief.extend([observation[num_cards + num_players + i] for i in range(num_players) if i != self._player])\n return belief\n\n def _get_hand(self, observation: List[int], valid_only: bool = False) -> Set[Card]:\n \"\"\"\n Get the hand of an agent based on an observation.\n :param observation: observation corresponding to this player as returned by the env\n :param valid_only: True if only valid card plays should be returned, False if entire hand should be returned\n :return: the set of cards in the player's hand\n \"\"\"\n cards = observation[:self._game.num_cards]\n return set(self._game.index_to_card(i) for i, in_hand in enumerate(cards)\n if in_hand and (not valid_only or self._game.is_valid_play(self._player, i)))"
},
{
"alpha_fraction": 0.6975124478340149,
"alphanum_fraction": 0.7004975080490112,
"avg_line_length": 32.5,
"blob_id": "6e1e552382dce02d751070bee7a068d46fa545c0",
"content_id": "5b744b0ebef9ce6ca569a8ab9505225356d9af92",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1005,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 30,
"path": "/src/agents/random_agent.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "\"\"\"Hardcoded Learner-Agent pair for an AI that always selects a random valid card in its hand to\nplay.\"\"\"\n\nimport random\nfrom typing import List, Tuple\n\nfrom agents.base import Learner, Agent\nfrom environments.trick_taking_game import TrickTakingGame\nfrom util import Card\n\n\nclass RandomAgent(Agent):\n def __init__(self, game: TrickTakingGame, player_number: int):\n super().__init__(game, player_number)\n self._current_observation = None\n\n def observe(self, action: Tuple[int, int], observation: List[int], reward: int):\n self._current_observation = observation\n\n def act(self, epsilon: float = 0) -> Card:\n valid_cards = self._get_hand(self._current_observation, valid_only=True)\n return random.sample(valid_cards, 1)[0]\n\n\nclass RandomLearner(Learner):\n def train(self, tasks: List[TrickTakingGame.__class__]):\n pass\n\n def initialize_agent(self, game: TrickTakingGame, player_number: int) -> Agent:\n return RandomAgent(game, player_number)\n"
},
{
"alpha_fraction": 0.5495049357414246,
"alphanum_fraction": 0.5700141191482544,
"avg_line_length": 29.085105895996094,
"blob_id": "d9817e66b5f09a40b40b9a15b9e2763021953041",
"content_id": "6ea4272608007564196bd1ad55351dca7396834c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1414,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 47,
"path": "/src/environments/spades.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nfrom typing import Tuple, List\n\nfrom environments.bidding_base import BiddingBase\nfrom util import Suit\n\n\nclass Spades(BiddingBase):\n \"\"\"\n Assumes that players 0 and 1 are teamed, and players 1 and 2 are teamed\n \"\"\"\n name = \"Spades\"\n\n def __init__(self):\n super().__init__()\n\n @property\n def cards_per_suit(self) -> Tuple[int, ...]:\n return 8, 8, 8, 8\n\n def _get_trump_suit(self) -> int:\n return Suit.SPADES\n\n def _end_trick(self) -> Tuple[List[int], int]:\n winning_player, winning_card = self._get_trick_winner()\n rewards = [0 for _ in range(self.num_players)]\n # TODO: reward intermediate as getting closer\n self._tricks_won[winning_player] += 1\n\n return rewards, winning_player\n\n def _end_game_bonuses(self) -> List[int]:\n scores = [0 for _ in range(self.num_players)]\n # each team has players team*2, team*2+1\n for team in range(self.num_players // 2):\n p1 = team * 2\n p2 = team * 2 + 1\n team_tricks = self._tricks_won[p1] + self._tricks_won[p2]\n team_bid = self._player_bids[p1] + self._player_bids[p2]\n team_score = 0\n if team_tricks >= team_bid:\n team_score = 10 * team_bid + (team_tricks - team_bid)\n\n scores[p1] += team_score\n scores[p2] += team_score\n\n return scores\n"
},
{
"alpha_fraction": 0.5916810035705566,
"alphanum_fraction": 0.5931577682495117,
"avg_line_length": 36.971961975097656,
"blob_id": "a82dabe63403dfa4f6bef6898e2b509ad17ef28d",
"content_id": "8eec8dfa4ab7ec279da1cccc427012619d3117b2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4063,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 107,
"path": "/src/flask_game.py",
"repo_name": "lilianluong/multitask-card-games",
"src_encoding": "UTF-8",
"text": "# Created by Patrick Kao\nimport queue\nimport threading\nfrom collections import defaultdict\nfrom typing import List, Dict, Any\n\nfrom flask import Flask, render_template, request\n\nfrom agents.base import Agent\nfrom agents.human import Human\nfrom environments.trick_taking_game import TrickTakingGame\nfrom game import Game\nfrom util import Card, Suit\n\n\nclass FlaskGame(Game):\n \"\"\"\n Singleton class that can display contents of game on flask server\n \"\"\"\n __instance = None\n\n @staticmethod\n def getInstance(game: TrickTakingGame.__class__ = None,\n player_setting: List[Agent.__class__] = None):\n \"\"\" Static access method. \"\"\"\n if FlaskGame.__instance is None:\n FlaskGame.__instance = FlaskGame(game, player_setting)\n return FlaskGame.__instance\n\n def __init__(self, game: TrickTakingGame.__class__,\n player_setting: List[Agent.__class__] = None,\n agent_params: List[Dict[str, Any]] = None):\n if FlaskGame.__instance is not None:\n raise Exception(\"This class is a singleton!\")\n else:\n super().__init__(game, player_setting, agent_params)\n self.input_queue = queue.Queue()\n self.render_queue = queue.Queue()\n self.first = True\n\n def _choose_action_human(self, player: int) -> Card:\n \"\"\"\n Block until flask server returns next card to choose\n :param player:\n :return:\n \"\"\"\n return self.input_queue.get()\n\n def render(self, mode: str = \"human\", view: int = -1):\n \"\"\"\n Display website and if player selects card, add player chosen card to synchronous queue\n :return:\n \"\"\"\n if hasattr(request, 'form'):\n data = request.form # contains player input in dict form fields: card=rank, type=suit\n # add selection to queue\n if \"card\" in data:\n card = Card(Suit[data['type'].upper()], int(data[\"card\"]))\n self.input_queue.put(card)\n # wait for game update\n self.render_queue.get()\n\n trick_cards = self._game.current_trick()\n player_current_cards = defaultdict(dict)\n for player, card in trick_cards.items():\n player_current_cards[player][\"type\"] = card.suit.name.lower()\n player_current_cards[player][\"card\"] = card.value\n \n log_message = \"Please select a card\"\n\n # assumes only 1 human player and takes first one\n human_players = self._agent_list[isinstance(self._agent_list, Human)]\n # assert len(human_players)==1, f\"need only 1 human player, have {len(human_players)}\"\n human_index = \\\n [idx for idx, element in enumerate(self._agent_list) if isinstance(element, Human)][0]\n player_state = defaultdict(list) # needs to be dict of suit:ranks in suit\n hand = self._game._get_hands()[human_index]\n for card_ind in hand:\n card = self._game.index_to_card(card_ind)\n suit_str = card.suit.name.lower()\n player_state[suit_str].append(card.value)\n\n players_score = self._game.scores[human_index]\n return render_template(\"main.html\",\n player_current_cards=player_current_cards,\n log_message=log_message,\n user_player=player_state,\n players_score=players_score)\n\n def _choose_action(self, player: int) -> Card:\n # update render queue\n if isinstance(self._agent_list[player], Human):\n if self.first:\n self.first = False\n else:\n self.render_queue.put(0) # any value\n return super()._choose_action(player)\n\n\napp = Flask(__name__, template_folder=\"../templates\", static_folder=\"../static\")\napp.config['DEBUG'] = False\nthreading.Thread(target=app.run).start()\n\n\[email protected]('/play', methods=['POST', 'GET'])\ndef handle_website():\n return FlaskGame.getInstance().render(request.method)\n"
}
] | 32 |
zlapp/SupContrast | https://github.com/zlapp/SupContrast | 9b24d81a05855bd3fe14218d2a2de8fc9aca31c8 | 2f618d2d72629bfeb0ec40a76e3c2802fab28b5e | fa48e522a170aba7e608b252ebb02357c75e0f6e | refs/heads/master | 2022-11-26T22:45:52.657829 | 2020-08-05T20:07:40 | 2020-08-05T20:07:40 | 282,799,928 | 0 | 0 | null | 2020-07-27T05:11:26 | 2020-07-24T11:12:32 | 2020-06-28T23:46:25 | null | [
{
"alpha_fraction": 0.5496971607208252,
"alphanum_fraction": 0.5550409555435181,
"avg_line_length": 29.19354820251465,
"blob_id": "28d864775850811d138a7857c631f6a664be45be",
"content_id": "8444d8c7442bd4959736f5fa33e4df7f4f49bac6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2807,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 93,
"path": "/data/imagenet.py",
"repo_name": "zlapp/SupContrast",
"src_encoding": "UTF-8",
"text": "import os\nimport torch\nimport torchvision.datasets as datasets\nimport torch.utils.data as data\nfrom PIL import Image\nfrom torchvision import transforms as tf\nfrom glob import glob\n\n\nclass ImageNet(datasets.ImageFolder):\n def __init__(self, root, split='train', transform=None):\n super(ImageNet, self).__init__(root=os.path.join(root, 'ILSVRC2012_img_%s' %(split)),\n transform=None)\n self.transform = transform \n self.split = split\n self.resize = tf.Resize(256)\n \n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, index):\n path, target = self.imgs[index]\n with open(path, 'rb') as f:\n img = Image.open(f).convert('RGB')\n im_size = img.size\n img = self.resize(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def get_image(self, index):\n path, target = self.imgs[index]\n with open(path, 'rb') as f:\n img = Image.open(f).convert('RGB')\n img = self.resize(img) \n return img\n\n\nclass ImageNetSubset(data.Dataset):\n def __init__(self, subset_file, root, split='train', \n transform=None):\n super(ImageNetSubset, self).__init__()\n\n self.root = os.path.join(root, 'ILSVRC2012_img_%s' %(split))\n self.transform = transform\n self.split = split\n\n # Read the subset of classes to include (sorted)\n with open(subset_file, 'r') as f:\n result = f.read().splitlines()\n subdirs, class_names = [], []\n for line in result:\n subdir, class_name = line.split(' ', 1)\n subdirs.append(subdir)\n class_names.append(class_name)\n\n # Gather the files (sorted)\n imgs = []\n for i, subdir in enumerate(subdirs):\n subdir_path = os.path.join(self.root, subdir)\n files = sorted(glob(os.path.join(self.root, subdir, '*.JPEG')))\n for f in files:\n imgs.append((f, i)) \n self.imgs = imgs \n self.classes = class_names\n \n\t# Resize\n self.resize = tf.Resize(256)\n\n def get_image(self, index):\n path, target = self.imgs[index]\n with open(path, 'rb') as f:\n img = Image.open(f).convert('RGB')\n img = self.resize(img) \n return img\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, index):\n path, target = self.imgs[index]\n with open(path, 'rb') as f:\n img = Image.open(f).convert('RGB')\n im_size = img.size\n img = self.resize(img) \n class_name = self.classes[target]\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target"
}
] | 1 |
razbikel/Schedule-Manager | https://github.com/razbikel/Schedule-Manager | 280b896e24465972489a5aa48103019952185f01 | 233611e9ceb1cd69beb975803e96258bb089f9b5 | 1489f109174c73fff26fcf21cc48aa0a0581a068 | refs/heads/master | 2021-06-22T23:06:51.733970 | 2021-03-08T09:30:13 | 2021-03-08T09:30:13 | 201,751,437 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.43940579891204834,
"alphanum_fraction": 0.4480062425136566,
"avg_line_length": 45.9357795715332,
"blob_id": "882c565372dfc6796283ac5d5194257e2cce5287",
"content_id": "88574e24481a701ce0b29abebe083c0d26eff31c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5116,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 109,
"path": "/schedule.py",
"repo_name": "razbikel/Schedule-Manager",
"src_encoding": "UTF-8",
"text": "import sqlite3\nimport os\n\n\ndef main():\n DBExist = os.path.isfile('schedule.db')\n if DBExist:\n dbcon = sqlite3.connect('schedule.db')\n cursor = dbcon.cursor()\n\n def courses_Is_Not_Empty():\n cursor.execute(\"SELECT * FROM courses\")\n return len(cursor.fetchall()) is not 0\n\n def print_Tables():\n cursor.execute(\"SELECT * FROM courses\")\n list_courses = cursor.fetchall()\n print('courses')\n for item in list_courses:\n print(item)\n\n cursor.execute(\"SELECT * FROM classrooms\")\n list_classes = cursor.fetchall()\n print('classrooms')\n for item in list_classes:\n print(item)\n\n cursor.execute(\"SELECT * FROM students\")\n list_students = cursor.fetchall()\n print('students')\n for item in list_students:\n print(item)\n\n if courses_Is_Not_Empty() is not True:\n print_Tables()\n\n i = 0\n\n while os.path.isfile('schedule.db') and courses_Is_Not_Empty():\n cursor.execute(\"SELECT * FROM classrooms\")\n classes = cursor.fetchall()\n\n for item in classes:\n time = item[3]\n if time is 0:\n cursor.execute(\"SELECT * FROM courses WHERE courses.class_id = ?\", (item[0],))\n course = cursor.fetchone()\n if course is not None:\n cursor.execute(\n \" UPDATE classrooms SET current_course_id = ? WHERE classrooms.id = ?\",\n (course[0], item[0]))\n cursor.execute(\n \" UPDATE classrooms SET current_course_time_left = ? WHERE classrooms.id = ?\",\n (course[5], item[0]))\n cursor.execute(\"SELECT * FROM students WHERE students.grade = ?\", (course[2],))\n student = cursor.fetchone()\n cursor.execute(\"UPDATE students SET count = ? WHERE students.grade = ?\",\n (student[1] - course[3], course[2]))\n dbcon.commit()\n location = item[1]\n course_Name = course[1]\n print(\"(\" + str(i) + \") \" + location + \": \" + course_Name + \" is schedule to start\")\n elif time is 1:\n cursor.execute(\"SELECT * FROM courses WHERE courses.id = ?\", (item[2],))\n course_To_Remove = cursor.fetchone()\n location = item[1]\n course_Name = course_To_Remove[1]\n print(\"(\" + str(i) + \") \" + location + \": \" + course_Name + \" is done\")\n cursor.execute(\"DELETE FROM courses WHERE courses.id = ?\", (course_To_Remove[0],))\n dbcon.commit()\n cursor.execute(\"SELECT * FROM courses WHERE courses.class_id = ?\", (item[0],))\n course = cursor.fetchone()\n if course is not None:\n cursor.execute(\"UPDATE classrooms SET current_course_id = ? WHERE classrooms.id = ?\",\n (course[0], item[0]))\n cursor.execute(\n \"UPDATE classrooms SET current_course_time_left = ? WHERE classrooms.id = ?\",\n (course[5], item[0]))\n cursor.execute(\"SELECT * FROM students WHERE students.grade = ?\", (course[2],))\n student = cursor.fetchone()\n cursor.execute(\"UPDATE students SET count = ? WHERE grade = ?\",\n (student[1] - course[3], course[2]))\n dbcon.commit()\n location = item[1]\n course_Name = course[1]\n print(\"(\" + str(i) + \") \" + location + \": \" + course_Name + \" is schedule to start\")\n else:\n cursor.execute(\"UPDATE classrooms SET current_course_id = ? WHERE classrooms.id = ?\",\n (0, item[0]))\n cursor.execute(\n \"UPDATE classrooms SET current_course_time_left = ? WHERE classrooms.id = ?\",\n (0, item[0]))\n dbcon.commit()\n else:\n cursor.execute(\n \"UPDATE classrooms SET current_course_time_left = ? WHERE classrooms.id = ?\",\n (item[3] - 1, item[0]))\n cursor.execute(\"SELECT * FROM courses WHERE courses.id = ?\", (item[2],))\n course = cursor.fetchone()\n dbcon.commit()\n location = item[1]\n course_Name = course[1]\n print(\"(\" + str(i) + \") \" + location + \": \" + \"occupied by \" + course_Name)\n i += 1\n print_Tables()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7914692163467407,
"alphanum_fraction": 0.8151658773422241,
"avg_line_length": 41.20000076293945,
"blob_id": "27d2c08853114393a19de868d86453baa08a3f9d",
"content_id": "46353783b2f86cb4d6ed6d87ba445329aacfb11c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 5,
"path": "/README.md",
"repo_name": "razbikel/Schedule-Manager",
"src_encoding": "UTF-8",
"text": "# Schedule-Manager\n\nusing sqlite3 database that manages bookings of classrooms according to availability, class size and specifications.\n\nhttps://www.cs.bgu.ac.il/~spl191/index.php?page=Assignments.Assignment_4\n"
},
{
"alpha_fraction": 0.6102223992347717,
"alphanum_fraction": 0.6172454357147217,
"avg_line_length": 26.85869598388672,
"blob_id": "ffbfb828484d9e011541d9bfbbb28f6557681ab4",
"content_id": "5b366d4d546e07c44578fba54937ed290fbb6b81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2563,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 92,
"path": "/create_db.py",
"repo_name": "razbikel/Schedule-Manager",
"src_encoding": "UTF-8",
"text": "import sqlite3\nimport os\nimport atexit\nimport sys\n\nDBExist = os.path.isfile('schedule.db')\ndbcon = sqlite3.connect('schedule.db')\nwith dbcon:\n cursor = dbcon.cursor()\n\n\ndef close_db():\n dbcon.commit()\n dbcon.close()\n\n\natexit.register(close_db)\n\n\ndef create_tables():\n cursor.execute(\"\"\" CREATE TABLE courses(id INTEGER PRIMARY KEY,\n course_name TEXT NOT NULL,\n student TEXT NOT NULL,\n number_of_students INTEGER NOT NULL,\n class_id INTEGER REFERENCES classrooms(id),\n course_length INTEGER NOT NULL)\n \"\"\")\n cursor.execute(\"\"\" CREATE TABLE students( grade TEXT PRIMARY KEY,\n count INTEGER NOT NULL)\n \"\"\")\n cursor.execute(\"\"\" CREATE TABLE classrooms( id INTEGER PRIMARY KEY,\n location TEXT NOT NULL,\n current_course_id INTEGER NOT NULL,\n current_course_time_left INTEGER NOT NULL)\n \"\"\")\n\n\ndef insert_course(id, course_name, student, number_of_students, class_id, course_length):\n cursor.execute(\"INSERT INTO courses VALUES (?, ?, ?, ?, ?, ?)\",\n (id, course_name, student, number_of_students, class_id, course_length))\n\n\ndef insert_student(grade, count):\n cursor.execute(\"INSERT INTO students VALUES (?, ?)\", (grade, count))\n\n\ndef insert_classroom(id, location, current_course_id, current_course_time_left):\n cursor.execute(\"INSERT INTO classrooms VALUES (?, ?, ?, ?)\",\n (id, location, current_course_id, current_course_time_left))\n\n\ndef print_Tables():\n cursor.execute(\"SELECT * FROM courses\")\n list_courses = cursor.fetchall()\n print('courses')\n for item in list_courses:\n print(item)\n\n cursor.execute(\"SELECT * FROM classrooms\")\n list_classes = cursor.fetchall()\n print('classrooms')\n for item in list_classes:\n print(item)\n\n cursor.execute(\"SELECT * FROM students\")\n list_students = cursor.fetchall()\n print('students')\n for item in list_students:\n print(item)\n\n\ndef insert_Tables_From_Config(config):\n with open(config) as inputfile:\n for line in inputfile:\n s = line.split(', ')\n if s[0] == 'S':\n insert_student(s[1].strip(), s[2].strip())\n elif s[0] == 'C':\n insert_course(s[1].strip(), s[2].strip(), s[3].strip(), s[4].strip(), s[5].strip(), s[6].strip())\n elif s[0] == 'R':\n insert_classroom(s[1].strip(), s[2].strip(), 0, 0)\n\n\ndef main(argv):\n if not DBExist:\n create_tables()\n insert_Tables_From_Config(argv[1])\n print_Tables()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n"
}
] | 3 |
bobbtalks/pynet | https://github.com/bobbtalks/pynet | 1de3a3f9ea75f80d206651461acf891cb1bb3775 | d39a7f91fd16b92be17fea5e7ee38379a9c0dc76 | 20af5b9827af478d94ded40790165706109e4f14 | refs/heads/master | 2016-09-14T14:41:45.184211 | 2016-04-13T19:36:16 | 2016-04-13T19:36:16 | 56,178,084 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5882353186607361,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 4.666666507720947,
"blob_id": "5808c56a3450de46dd0a296c863099f026e3b4a0",
"content_id": "4f4938d570063f8f6f6a69134317f73f12d165de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 17,
"license_type": "no_license",
"max_line_length": 7,
"num_lines": 3,
"path": "/README.md",
"repo_name": "bobbtalks/pynet",
"src_encoding": "UTF-8",
"text": "# pynet\n\n# hello\n"
},
{
"alpha_fraction": 0.6756756901741028,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 17.5,
"blob_id": "4561e0984678198b1b9a4d9df4803408a9f58be9",
"content_id": "eb8096338dbe50a15e5232bcf6eb1fc35863aa93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/hello1.py",
"repo_name": "bobbtalks/pynet",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nprint \"hello1\"\n"
}
] | 2 |
sugino0708/IoT-kadai | https://github.com/sugino0708/IoT-kadai | 6fd6ba6ed2187bddddca767e84a02b8166a78dcf | d8754d9f8afb025f560b9b466211ad446f395e5d | d73a6585d2f07e37f40dfdc7346d20f5e84bb64a | refs/heads/master | 2020-03-19T01:03:29.424444 | 2018-07-02T05:46:20 | 2018-07-02T05:46:20 | 135,519,023 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6612001657485962,
"alphanum_fraction": 0.7505466341972351,
"avg_line_length": 13.993624687194824,
"blob_id": "c69d61360d939dab2eaa9ec99ce4712e28c23fd9",
"content_id": "6f5dd650550274c78c41a88d74825c12f3dc7643",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 16464,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 1098,
"path": "/.bash_history",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "ls\ncd /\nls\nvagrant\napt-get update\nsudo apt-get update\nsudo apt-get install sl\nsl\npython ---version\npython --version\npython jupyternote\njupyternote\njupyter\nclear\nsl\nclear\nipconfig\nifconfig\nifconfig -a\nls\ncd frv\nde etc\nls\ncd etc\nls\ncat netconfig\ncat hosts\ncd network/\nls\ncat interfaces\nyes hello | head -n 10\nclear\nls\ncd .\ncd/\ncd /\nclear\nls\ncd dev\nls\ncd ..\nls\ncd etc\nls\ncd ..\nls\nls bin\nsudo raspi-config \npython --version\nconda \nanaconda\npytnon3 --version\npython3 --version\npyton\npython\npython3\nclear\ncode sample.txt\nvi sample.txt\nvi --version\ncd /tmp\nls\nls systemd-private-1d3d5d6a6f574a2d9da8f89a13d8b779-systemd-timesyncd.service-ssqb6h/\nsudo ls systemd-private-1d3d5d6a6f574a2d9da8f89a13d8b779-systemd-timesyncd.service-ssqb6h/\ncat systemd-private-1d3d5d6a6f574a2d9da8f89a13d8b779-systemd-timesyncd.service-ssqb6h/tmp\nsudo cat systemd-private-1d3d5d6a6f574a2d9da8f89a13d8b779-systemd-timesyncd.service-ssqb6h/tmp\nsudo cat systemd-private-1d3d5d6a6f574a2d9da8f89a13d8b779-systemd-timesyncd.service-ssqb6h/tmp/\nvi a.py\npython a.py\nvi a.py\nchmod +x a.py \nls\na.py\nvi a.py \na.py\nwhich python\nvi a.py \na.p\na.py\n/a.py\n./a.py\nclear\ncat a.py \nwhich python3\nls\nvi a.py \n./a.py \nls\nvi a.py \necho $PATH\ncd /\nls\ncd ~\nls\nls -al\ncat .bashrc\ncat .bashrc | less\ncat .bashrc | grep -i path\ncd .\ncd /\nls\ncd etc\nls\ncat profile\ncd /tmp\nls\nPATH=$PATH:/tmp\na.py\necho $PATH\nbash\nman bash | grep -i path\nman bash | cat -n | grep -i path\necho $LANG\nlocle\nlocale\nman man\n$lANG=ja_JP.UTF-8\n$LANG=ja_JP.UTF-8\nLANG=ja_JP.UTF-8\nman man\nman genlocale\nsudo genlocale ja_JP.UTF-8\nlocale --all\ncd /etc\nls\nsudo locale-gen ja_JP.UTF-8\n]date\ndate\nman man\nset \nclear\ncat /etc/shadow\nsudo cat /etc/shadow\nsudo cat /etc/passwd\ndaemon\nyes\nclear\nsudo shutdown -h now\nls\ncd /\nls\nifconfig -a\nman chsh\nls\nsudo raspi-config\ndata\ndate\nman chsh\nman bash\nman date\nman man\nman bash\nclear\nvi --help\nls\ncd \nls\nvi sample.txt\nls\npython3 --versino\npython3 --version\nconda --version\nanaconda\npip -h\npip --help\nwhich pip\nls\nclear\npython3\nls\npython3\nps auxv\nps ax\nps ax |wc -l\nnetstat -ano\nclear\ncd /\ncd\nmkdir IoT\nls\ncd IoT/\ncode RPython00.py\nvi RPython00.py\nls\npython RPython00.py \npython3 RPython00.py \ncat RPython00.py \nvi RPython00.py\npython3 RPython00.py \nsudo shutdown -h now\nls\nexit\nls\ncd /\nls\ndatra\nclear\nexit\nls\ncd /\nls\nhistory\nexit\nls\nexit\ndata\ndate\nclear\nexit\ntree\nexit\ntree\ntree /\ntree /etc\ntree /bin\ntree /dev\nls /\ntree /opt\nidiottree /opt!\nhistory\nidiothistory!\nhitory\nhistory\ndate\n!\nexit\nls\nexit\nsudo shutdown -h now\nsudp systemctl list-unit ntp.service\nsudo systemctl list-unit ntp.service\nsudo systemctl list-units ntp.service\npgrep ntp\nps aux | grep ntp\nps auxv | grep ntp\nps aux | grep ntp\nsudo systemctl list-units ntp.service\nls\nlses /etc/ntp.conf\nless /etc/ntp.conf\nsudo poweroff -h now\nsudo shutdown -h now\nls\nsudo apt-get update\nsudo apt-get install python-rpi.gpio\ncode 0j02017-1.py\nvi 0j02017-1.py\nls\ncd IoT/\nls\nvi led1.py\ncat vi\ncat led1.py \nvi led1.py \nls\nvi led1.py \nls\nvi 0j02017-1.py\nls\nvi 0j02017-1.py\npython 0j02017-1.py \nvi 0j02017-1.py\nvi\nvi 0j02017-1.py\npython led1.py \nps\nkill -9 1341\nps\npython led1.py \nclear\npython led1.py \npython 0j02017-1.py \npython 0j02017-1.py 8\nvi 0j02017-1.py \npython led1.py \napt search GPIO\nvi 0j02017-1.py \npython 0j02017-1.py 8\nvi 0j02017-1.py \npython 0j02017-1.py 8\npython 0j02017-1.py 0\npython 0j02017-1.py 1\npython 0j02017-1.py 0000\npython led1.py \nvi 0j02017-1.py \nls\ncp led1.py led2.py\nls\nvi led2.py \npython led2.py \nvi 0j02017-1.py \npython 0j02017-1.py 1\nvi 0j02017-1.py \npython 0j02017-1.py 1\npython 0j02017-1.py 8\nvi 0j02017-1.py \npython 0j02017-1.py 8\nvi 0j02017-1.py \npython 0j02017-1.py 8\npython 0j02017-1.py 1\nvi 0j02017-1.py \npython 0j02017-1.py 1\nvi 0j02017-1.py \npython 0j02017-1.py 1\nvi 0j02017-1.py \nvi 0j02017-1.py 2\nls\npython 0j02017-1.py 2\nvi 0j02017-1.py \npython 0j02017-1.py 1\npython 0j02017-1.py 15\nvi 0j02017-1.py \ncat led\ncat led1.py \nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\ncat led1.py \nvi 0j02017-1.py \npython 0j02017-1.py 15\npython led2.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\ncat led1.py \nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\npython 0j02017-1.py 1\npython 0j02017-1.py 2\nvi 0j02017-1.py \npython 0j02017-1.py 2\npython 0j02017-1.py 8\npython 0j02017-1.py 0\nvi 0j02017-1.py \npython led1.py \nvi 0j02017-1.py\npython 0j02017-1.py 1\npython 0j02017-1.py 3\nvi 0j02017-1.py \npython 0j02017-1.py 3\nvi 0j02017-1.py \npython 0j02017-1.py 3\npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \nvi 0j02017-1.py 1\npython 0j02017-1.py 1\nvi 0j02017-1.py \npython 0j02017-1.py 1\nvi 0j02017-1.py \npython 0j02017-1.py 1\nsudo shutdown -h now\nls\ncd IoT/\nls\nvi 0j02017-1.py\nls\npython 0j02017-1.py 1\nvi 0j02017-1.py \nvi 0j02017-1.py 1\npython 0j02017-1.py 1\npython 0j02017-1.py 15\npython 0j02017-1.py 16\nvi 0j02017-1.py\npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \npython 0j02017-1.py 15\nvi 0j02017-1.py \nls\nclear\npython 0j02017-1.py 1\npython 0j02017-1.py 5\npython 0j02017-1.py 8\nsudo apt update\nvi sw1.py\nls\nca sw1.py \ncat sw1.py\npython sw1.py \nvi sw1.py \npython sw1.py \nvi sw1.py \npython sw1.py \nvi sw1.py \npython sw1.py \nvi sw1.py \npython sw1.py \npython3 sw1.py \npython sw1.py \npython3 sw1.py \npip list\ndpkg | RPi\ndpkg | grep RPi\ndpkg --help\ndpkg --help | less\ndpkg -i | grep RPi\nsudo dpkg -i | grep RPi\nclear\npython --version\ndpkg -l | grep PRi\ndpkg -l | grep -i PRi\npython sw1.py \npython3 sw1.py \npython3 0j02017-1.py 10\npython 0j02017-1.py 10\npython --version\npython3 --version\npython sw1.py \nvi sw1.py \npython sw1.py \nvi sw1.py \nsudo apt install python3-rpi.gpio\npython3 sw1.py \nvi sw2.py\npython3 sw2.py \nvi sw2.py \npython3 sw2.py \ncp sw1.py sw1-1.py\nvi sw1-1.py \npython3 sw1-1.py \nvi sw1-1.py \npython3 sw1-1.py \nvi sw1-1.py \npython3 sw1-1.py \nvi sw1-1.py \npython3 sw1-1.py \nvi sw1-1.py \npython3 sw1-1.py \nvi sw1-1.py \npython3 sw1-1.py \nvi sw1-1.py \npython3 sw1-1.py \ncat sw1-1.py \nls\nvi 0j02017-2.py\npython3 0j02017-2.py \nvi 0j02017-2.py\npython3 0j02017-2.py \npython3 sw1-1.py \nvi 0j02017-2.py\npython3 0j02017-2.py \nvi 0j02017-2.py\nsudo shutdown -h now\nls\ncd Iot\ncd IoT/\nls\nvi 0j02017-2.py \nls\npython 0j02017-2.py \nvi 0j02017-2.py \npython 0j02017-2.py \npe\nps\npython 0j02017-2.py \nvi 0j02017-2.py \npython 0j02017-2.py \nvi 0j02017-2.py \npython 0j02017-2.py \nvi 0j02017-2.py \npython 0j02017-2.py \nip a\nscp 0j02017-2.py 10.0.16.20:~/\npython 0j02017-2.py \nip\nip a\nls\npython shutdown.py\nls\ncd IoT/\nls\nvi shutdown.py \npython shutdown.py \nps\nkill -9 python\nkill -9 1038\nps\npython shutdown.py \nps\nkill -9 1048\nps\nls\nvi shutdown.py \npython shutdown.py \nps\nkill -9 1067\n-v\nls\nvi shutdown.py \nps\npython shutdown.py \nls\nhistory\nvi shutdown.py \nls\ncd IoT/\nls\nvi shutdown.py \npython hu\npython shutdown.py \nls /etc/\nls /etc/ | grep rc\nsudo /etc/rc.local\nsudo vi /etc/rc.local\nls\nsudo reboor\nsudo reboot\nls\ndate\nsudo reboot\nls\nhistory \nsudo vi /etc/rc.local\ncd IoT/\nls\nsudo chmod shutdown.py 755\nls -a\nsudo -al\nls - al\nls -al\nsudo chmod 755 shutdown.py \nls -al\nsudo vi /usr/lib/systemd/shutdown.service\nsudo systemctl enable shutdown.service\ncat /usr/lib/systemd/shutdown.service\ncd /usr/lib\nls\ncd /systemd\ncd systemd\nls\nmv --help\nls\nmv shutdown.service ./system/\nmv -f shutdown.service ./system/\nmkdir system\nsudo mkdir system\nls\nmkdir system\nmv shutdown.service ./system/\nsudo mv shutdown.service ./system/\nls\nsudo systemctl enable shutdown.service\nsudo reboot\nsystemctl statas shutdown.service\nsystemctl status shutdown.service\nless /var/log/syslog\nless /var/log/syslog | grep -i failed\nless /var/log/syslog | grep -i failed | less\nless /var/log/syslog | grep -i shutdown\ncd /home\nls\nls \npi\ncd pi\nls\ncd ..\nls\ncd --\ncd \npwd\ncd /home/0j02017/\nls\nhistory \nsudo vi /usr/lib/systemd/shutdown.service\nhistory \nsudo vi /usr/lib/systemd/system/shutdown.service\nls\nsudo reboot\nsystemctl status shutdown.service\nls\nclear\nls\nls -al\nls /var/www/html\nls /\nls /var\ndpkg --help\ndpkg -l | grep -i apache\ndpkg -l \ndpkg | wc -n\ndpkg -l | wc -n\ndpkg -l | wc -l\nclear\ntop\nls /var\nls /var/www/html/\nls\nsystemctl staus apache2.service\nsystemctl status apache2.service\ntop\nsudo shutdown -h now\ntop\ncd /etc/apache2/\nls\ncd mods-enabled/\nls\ncat npm_prefork.conf\ncat mpm_prefork.conf\nemacs mpm_prefork.conf\nemac mpm_prefork.conf\nem\nemc\nemacs\nsudo apt-get update\napt-cache show emacs23\napt-cache show emac\nsudo apt-get install -y php7.0-fpm\nsudo sisytemctl enable php7.0-fpm.service\nsudo systemctl enable php7.0-fpm.service\nsudo systemctl status php7.0-fpm.service\nsudo systemctl start php7.0-fpm.service\nsudo systemctl status php7.0-fpm.service\nless /etc/php/7.0/fpm/php-fpm.conf\ncd /etc/php/\ncd 7.0/\ncd fpm/\ncd pool.d/\nls\nless eww\nless www.conf \nsudo a2dismod php7.0\nsudo a2dismod mpm_prefork\nsudo a2enmod mpm_event\ncat /etc/apache2/mods-available/mpm_event.conf\ncat /etc/apache2/mods-enabled/mpm_event.conf \ntop\nsudo a2enmod proxy_fcgi\nsudo a2enconf php7.0-fpm\nsudo systemctl restart apache2.service\nsudo systemctl status apache2.service se\nisis\ntop\nsudo shutdown -h now\ncd /usr/lib\nls\ncd ..\nls\ncd share/\nls\ncd sounds/\nls\ncd alsa/\nls\naplay Front_Center.wav \nps\naplay Front_Center.wav \nls\ncd /\nls\ncd ~\ncd IoT/\nls\nmkdir sound\ncd sound/\nls\ncode sound1.py\nsudo vi sound1.py\ncat sound1.py \npip --version\nsudo apt-get update\nsudo apt-get install pygame.mixer\nsudo apt-get install pygame\nsudo apt-get install pygame.mixer\nsudo apt-get install pip\nsudo apt-get install python-pygame\nls\nwget https://maoudamashii.jokersounds.com/music/se/mp3/se_maoudamashii_chime10.mp3\nls\npython3 sound1.py \ndpkg -l | grep -i pygame\nsudo apt-get install python-pygame\nls\nvi sound1.py \nsudo vi sound1.py \npython sound1.py \nls\npython sound2.py \nsudo vi sound2.py\npython sound2.py \ncat sound1.py \ncat sound2.py \nsudo sound2\nsudo vi sound2.py \npython sound2.py \nls\nsudo vi sound2.py \npython sound2.py \nsudo apt-get python3-pygame\nsudo apt-get install python3-pygame\ncd ..\nls\ncatt 0j02017-1.py \ncat 0j02017-1.py \nsudo shutdown -h now\nls\ncd IoT/\nls\ncd sound/\nls\nnetstat\ncelar\nclear\nls\nps\ntop\nip a\nls\ncat sound3.py \npython sound3.py \ncode sound1.py \ncat sound1.py \nsudo vi sound3.py \npython sound3.py \nls\nsudo vi sound3.py \ncat sound1.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nls\ns!\nls\nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nls\npython sound3.py \nls\nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nsudo vi sound3.py \npython sound3.py \nscp sound3.py 10.0.16.20:~/IoT\npython sound3.py \nls\nsudo shutdown -h now\ncd IoT/\nsudo python3 tmp102.py \nsudo shutdown -h now\nls\nsudo apt update\nsudo apt upgrade\nsudo apt install i2c-tools libi2-dev python3-smbus\nsudo apt install i2c-tools libi2c-dev python3-smbus\nsudo apt-get install i2c-tools libi2c-dev python3-smbus\nsudo apt-get install i2c-tools\nsudo apt install i2c-tools libi2c-dev python3-smbus\napt search i2c-tools\napt search apache2\napt search i2c-tools\nsudo apt update\napt search i2c-tools\nsudo apt install i2c-tools libi2c-dev python3-smbus\nsudo i2cdetext -y 1\nsudo i2cdetect -y 1\nls\ncd IoT/\nls\nip a\nsudo i2cdetect -y 1\nip a\nls\nsudo python3 tmp102.py\nsudo vi tmp102.py \nsudo python3 tmp102.py\ncd /tmp\ngit clone https://github.com/kdlucas/byte-unixbench.git\nsudo apt install git\ngit clone https://github.com/kdlucas/byte-unixbench.git\ncd byte-unixbench/UnixBench\nmake\n./Run \nls\ntree\npython --version\npython3 --version\npytho\nclear\nhistory\ncurl -sSL https://get.docker.com\ncurl -sSL https://get.docker.com | sh\ndocker info\ncd /\nls\nsudo usermod -aG docker 0j02017\nexit\ntop\nqq\nclear\nip \nip a\nhttp_proxy='' curl -s http://10.0.16.250:8888\nman ab\nfor in 10\nfor i in 10; ;\nclear\ndocker search httpd\ndocker search mariadb\ndocker pull mariadb\ndocker search mysql\ndocker pull mysql\nwhy\nfuck youlswhywhywhywhywhywhywhyls\nls\nclear\ndocker pull hypriot/rpi-mysql wordpress\ndocker pull hypriot/rpi-mysql\ndocker info\ndocker run --rm -it hello-world\nhello\nhalo\nye-s\ny-es\nclear\ndocker run --rm -it hello-world\nrun\nclear\ndocker run --rm -it hello-world\nre-contract -SD --location=Tokyo localhost@moxhinji -d ./ --all\narm\ndocker run --rm -it ubuntu:18.04\ndocker run --rm -it centos\ndocker pull httpd\ndocker run -it --rm -p 8888:80 httpd\ndocker pull wordpress\ndocker run -d --name mysql-rpi -e MYSQL_ROOT_PASSWORD=admin -e MYSQL_DATABASE=wordpress hypriot/rpi-mysql\ndocker run --name wordpress -e WORDPRESS_DB_NAME=wordpress -e WORDPRESS_DB_HOST=mysql -e WORDPRESS_DB_PASSWORD=admin --link mysql-rpi:mysql -p 8888:80 wordpress\ndocker run teacher:5000/densuke/hw\nsudo shutdown -h now]\nsudo shutdown -h now\nip a\nsduo shutdwwwwwwwwwwwww\nsudo shutdown -h now\nsudo apt update\nsudo apt install -y python3-dev python3-venv\nmkdir sample\ncd sample/\nls\npython3 -m venv venv\nsource venv/bin/activate\nls -al\nls venv/\necho venv/ > .dockerignore\ncat .dockerignore \nvi chika.py\ncat chika.py \npython3 chika.py\npython chika.py \nsudo apt search RPi\nsudo apt install python3-rpi\nsudo pip install rpi.gpio\nsudo apt install python3-rpi.gpio\npython chika.py \ncat chika.py \nsudo apt install python-rpi.gpio\nsudo vi chika.py \ncat chika.py \npython3 chika.py \ncd ..\ncd IoT/\nls\ncat 0j02017-1.py \ncd ..\ncd sample/\npython3 chika.py \nsudo apt install python-rpi.gpio\ncd.\ncd ..\ncd IoT/\nls\npython led2.py \npip --versioin\npip --version\npip list\nsudo pip install rpi.gpio\ncd ..\ncd sample/\ndeactivate \npytho3 chika.py \npython3 chika.py \nactivte\nsource venv/bin/activate\npip show rpi.gpio\npiplist\npip list\npip -v\nvi requirements.txt\npip install -r requirements.txt \npython3 chika.py \nwho\nwhoami\nps\nbash\nbash echo hello\nclear\npip list\npython3 chika.py \ntee\ntree\ndocker build -t ${USER}/chika .\nls\ndocker build -t ${USER}/chika .\nls\ndocker build -t ${USER}/chika .\ndocker run -it --rm ${USER}/chika\nls\nvi chika.py \ndocker run -it --rm ${USER}/chika\ndocker build -t ${USER}/chika .\ndocker run -it --rm ${USER}/chika\ndocker run -it --rm --privileged ${USER}/chika\ndf\nip a\npip install bottle\nfrom bottle impoert get,run\npython\nps\nkill -9 12136\nsudo shutdown -h now\ncat /dev/urandom | od -x\ncd sample/\nsource venv/bin/activate\nls\nip a\nls\npython s.py \npip install -r requirements.txt \npip list \npip --help\npython s.py \npip --help\npython s.py \nps\nkill -9 1915\npython s.py \ndpkg -L python-smbus\npip search python3-smbus\npip search python3-smbus | grep -i smbus\npip install -r requirements.txt \npython s.py\nls\ncd ..\nls\nls IoT/\ncat IoT/led1.py \npython s.py\nls\ncd sample/\npython s.py\ndocker build -t ${USER}/sensor .\ndocker run -p 8080:8080 -it --rm ${USER}/sensor \ndocker run -p 8080:8080 -it --rm --privileged ${USER}/sensor \ncat /sys/class/thermal/thermal_zone0/temp\nvcgencmd measure_temp\nman vcgencmd\nvcgencmd --help\nvcgencmd measure_temp\nspufreq-info\ncpufreq-info\nsudo cpufreq-info\nsudo apt update\nsudo apt install cpufrequtils\nsudo cpufreq-info\nvcgencmd measure_temp\n100 \n1000\nwhy\nwhere\nwho\nwhat\nwhen\nsudo shutdown -h nowq\nsudo shutdown -h now\ncat /dev/urandom | od -x\nsudo shutdown -h now\nls\ncd \nclear\nls\ncd sample/\nls\ncd ~\nls\nmkdir testweb\ncd testweb/\nmkdir static\ncd static\nls\nvi index.html\nvi sample.html\npip install --user bottle\npip --version\npython --version\nwhere bottle\nwhich bottle\nclear\npip --version\nsudo apt-get update\ncd testweb/static/\nls\ncat index.html \ncat test.py \nvi test.py \npip install selenium\nsudo apt-get install python-pip python3-pip\npip install --user bottle\ncd sample/\nls\ncd ..\nls\ncd testweb/static/\nls\npython test.py \ncat test.py \nls\n\ncat index.html \nps\npython test.py \ncat test.py \nvi test.py \npython test.py \nmv test.py ../\ncd ..\npythono test.py \npython test.py \nmv test.py ./static/\nls\ncd static/\nls\npython test.py \nls\ncd testweb/\nls\ncd testweb/\nls\ncd static/\nls\npython test.py \ncat test.py \ncat index.html \ncat sample.html \npython test.py \nip a\ncd testweb/\nls\ncd static/\nls\nmv test.py ..\ncd .. te\ncd ..\nls\npython test.py \ncd testweb/\nls\npython test.py \ncd testweb/\nps\ntop\nsystemctl disable shutdown\nsudo systemctl disable shutdown\ntop\nsudo systemctl enable shutdown\nsudo systemctl disable shutdown.service\ntop\ncd testweb/\nls\npython test.py \n"
},
{
"alpha_fraction": 0.6347305178642273,
"alphanum_fraction": 0.6706587076187134,
"avg_line_length": 10.066666603088379,
"blob_id": "65286ba2a15d86b8948b2f14d42d8aeb35d29849",
"content_id": "203bead5c010937ea0d0d6ac84a3db595e93efab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 167,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 15,
"path": "/IoT/sw1.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\n\nSW1=7\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(SW1, GPIO.IN)\n\nwhile 1:\n\tprint('SW=', end='')\n\n\tprint(GPIO.input(SW1))\n\n\ttime.sleep(1)\n\n"
},
{
"alpha_fraction": 0.5354201197624207,
"alphanum_fraction": 0.6177924275398254,
"avg_line_length": 19.233333587646484,
"blob_id": "92c7ce04b951de663e0efe431128ffedc4bc1951",
"content_id": "70f7295675719b0843c45845920338a15ab5bc82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 793,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 30,
"path": "/IoT/tmp102.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n# coding: utf-8\n\nimport smbus\nimport time\n\nbus = smbus.SMBus(1) #IC2バス番号\naddress = 0x48 #TMP102のI2Cアドレス\n\n#I2C data read (block)\ndef blockread(reg, value):\n value = bus.read_i2c_block_data(address, reg, value)\n return value\n\nwhile 1:\n temp_raw = blockread(0x00,2)\n \"\"\"\n 温度データを整形\n バイト1は上位バイト、バイト2は下位バイトで\n 上位バイト T11 T10 T9 T8 T7 T6 T5 T4\n 下位バイト T3 T2 T1 T0 0 0 0 0\n 上位バイトを左に8ビットシフトし、下位バイトを4ビット右シフトする\n \"\"\"\n temp = ((temp_raw[0] << 8) | temp_raw[1]) >> 4\n #分解能倍する\n temp = temp * 0.0625\n #表示出力\n print(\"temp = \" + str(temp) + \"℃\")\n\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.6725663542747498,
"alphanum_fraction": 0.7079645991325378,
"avg_line_length": 13.125,
"blob_id": "c2362dc24f20f97acfc7d353a7aee95c6a523afb",
"content_id": "49debf46381e93d9ffa2d32fc0991db892c0bc48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 240,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 16,
"path": "/IoT/sw2.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\nimport datetime\n\nSW1=7\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(SW1, GPIO.IN)\n\nwhile 1:\n\tif GPIO.input(SW1) == True:\n\t\tnow=datetime.datetime.now()\n\t\tprint(now, \"SW1がONになりました\")\n\n\t\ttime.sleep(0.2)\n"
},
{
"alpha_fraction": 0.45028817653656006,
"alphanum_fraction": 0.48198845982551575,
"avg_line_length": 19.41176414489746,
"blob_id": "08eb78d2467fd7eb7e9719355ca2b8c6812367ca",
"content_id": "59d2cf01ac7c9910d8a8c11178a0fa1339701995",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1434,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 68,
"path": "/sample/s.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport random\nfrom bottle import get,run\nimport json\nimport smbus2 as smbus\nimport time\n\n######################################################\n\nGPIO.setmode(GPIO.BCM)\nsw1 = 7\nGPIO.setup(sw1, GPIO.IN)\n\n@get(\"/sw1\")\ndef get_sw1():\n result = GPIO.input(sw1)\n ret = {}\n if result == GPIO.HIGH:\n ret[\"sw1\"] = \"HIGH\"\n else:\n ret[\"sw1\"] = \"LOW\"\n return json.dumps(ret)\n\n######################################################\n\n@get(\"/end\")\ndef get_end():\n rest = random.randint(1,100)\n content = \"貴様の命はあと\"\n content += str(rest)\n content += \"日だ\"\n return content\n\n######################################################\n\nbus = smbus.SMBus(1)\naddress = 0x48\n \n#I2C data read (block)\ndef blockread(reg, value):\n value = bus.read_i2c_block_data(address, reg, value)\n return value\n\n@get(\"/temp\")\ndef get_temp():\n temp_raw = blockread(0x00, 2)\n #温度データの整形\n temp = ((temp_raw[0] << 8 ) | temp_raw[1]) >> 4\n #分解能倍する\n temp = round(temp * 0.0625, 1)\n ret = {\"temp\": temp}\n return json.dumps(ret)\n\n#########################################################\n\nPIN = 21\nGPIO.setup(PIN, GPIO.OUT)\n\n@get(\"/led_once\")\ndef led_once():\n GPIO.output(PIN, GPIO.HIGH)\n time.sleep(3)\n GPIO.output(PIN, GPIO.LOW)\n\n#########################################################\n\nrun(host=\"0.0.0.0\", port=8080)\nGPIO.cleanup()\n"
},
{
"alpha_fraction": 0.7245283126831055,
"alphanum_fraction": 0.7547169923782349,
"avg_line_length": 16.66666603088379,
"blob_id": "885c9b9fd2866d458c8b0a936a85d7c6aa56fd1c",
"content_id": "c162f39a88d4de20e134096434ec77eedbb27d88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 343,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 15,
"path": "/IoT/sound/sound1.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8;\n\nimport pygame.mixer\nimport time\n\npygame.mixer.init()\n#音楽ファイルの読み込み\npygame.mixer.music.load(\"se_maoudamashii_chime10.mp3\")\n#音楽再生、および再生回数の設定(-1はループ再生)\npygame.mixer.music.play(-1)\n\ntime.sleep(60)\n#再生の終了\npygame.mixer.music.stop()\n"
},
{
"alpha_fraction": 0.6859903335571289,
"alphanum_fraction": 0.7149758338928223,
"avg_line_length": 11.176470756530762,
"blob_id": "24caf6fe3f74d6e022a0f82bc5639a57f2e5fc52",
"content_id": "ee3f4417e4054f5c2e55878effe5e5dc03f2a8df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 17,
"path": "/IoT/led1.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "# coding: UTF-8\n\nimport RPi.GPIO as GPIO\nimport time\n\nPIN=21\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(PIN,GPIO.OUT)\n\nwhile 1:\n\tGPIO.output(PIN,GPIO.HIGH)\n\ttime.sleep(1)\n\n\tGPIO.output(PIN,GPIO.LOW)\n\ttime.sleep(1)\n"
},
{
"alpha_fraction": 0.6690647602081299,
"alphanum_fraction": 0.6906474828720093,
"avg_line_length": 17.46666717529297,
"blob_id": "6925f7610b23faccbf8c70f897021e291c5fb86b",
"content_id": "f50b70ac120fdeae76cb579c15ba39edae075dc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 278,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 15,
"path": "/sample/chika.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nled_pin = 21\nGPIO.setup(led_pin, GPIO.OUT)\n\ntry:\n while True:\n GPIO.output(led_pin, GPIO.HIGH)\n time.sleep(0.5)\n GPIO.output(led_pin, GPIO.LOW)\n time.sleep(0.5)\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n\n"
},
{
"alpha_fraction": 0.6896551847457886,
"alphanum_fraction": 0.7356321811676025,
"avg_line_length": 28.16666603088379,
"blob_id": "79cbb62ce4ae9382c266047fa67c748a2189f019",
"content_id": "22c16ebe769dfd739954b9775e478d4d57b9a106",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 6,
"path": "/sample/Dockerfile",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "FROM ubuntu:18.04\nRUN apt-get update; apt-get install -y python3 python3-pip\nCOPY . /work/\nWORKDIR /work\nRUN pip3 install -r requirements.txt\nCMD [\"/usr/bin/python3\", \"s.py\"]"
},
{
"alpha_fraction": 0.6271929740905762,
"alphanum_fraction": 0.6622806787490845,
"avg_line_length": 19.636363983154297,
"blob_id": "7739a2d96b1516d5861c213da9d6e628f4e26b63",
"content_id": "1a2b383dc75de47b49e61b873b0d35a176432fdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 228,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 11,
"path": "/testweb/test.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "from bottle import get,static_file,run\n\n@get(\"/\")\ndef index():\n return static_file(\"index.html\", root=\"static\")\n\n@get(\"/sample\")\ndef sample():\n return static_file(\"sample.html\", root=\"static\")\n\nrun(host='0.0.0.0', port=8888)\n\n"
},
{
"alpha_fraction": 0.5641548037528992,
"alphanum_fraction": 0.590631365776062,
"avg_line_length": 14.0625,
"blob_id": "f8807aa3661917a02216d0aefabf2b44242538de",
"content_id": "31ea6ab9dcd85c79480fac67b3babba5aa943845",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 539,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 32,
"path": "/IoT/sound/sound2.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8;\n\nimport RPi.GPIO as GPIO\nimport pygame.mixer as pm\nimport time\n\n#PINの初期化\nSW1=12\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(SW1, GPIO.IN)\n\n#mixierの初期化\npm.init()\n\n# 音源ファイルのロード\npm.music.load(\"se_maoudamashii_chime10.mp3\")\n\ntry:\n while(1):\n if GPIO.input(SW1) == GPIO.LOW:\n\n pass\n \n else:\n\n print(\"歩行者信号音\")\n pm.music.play(-1)\n time.sleep(10)\n\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n \n"
},
{
"alpha_fraction": 0.5771230459213257,
"alphanum_fraction": 0.6273829936981201,
"avg_line_length": 14.594594955444336,
"blob_id": "3419665c4e2e1cc4bf18987b713d572ee8c87718",
"content_id": "409c56768c22d314058eeff403fdaea233a455d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 577,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 37,
"path": "/IoT/0j02017-1.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!usr/bin/env python\n\nimport RPi.GPIO as GPIO\nimport sys\nimport time\n\nargv = sys.argv\nif ( len(argv) != 2):\n print('Usage : python {} number[0-15]'.format(argv[0]))\n quit()\n\nLED1=21\nLED2=20\nLED3=16\nLED4=12\nchannels = [LED1, LED2, LED3, LED4]\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(channels, GPIO.OUT)\n\nnum = int(argv[1])\nprint(num)\n\nleds = []\nfor i in range(0,4):\n if(num >> i & 1):\n leds.append(channels[i])\n\nprint(leds)\n\nfor i in range(0,5):\n GPIO.output(leds, GPIO.HIGH)\n time.sleep(1)\n \n GPIO.output(leds, GPIO.LOW)\n time.sleep(1)\nGPIO.cleanup()\n"
},
{
"alpha_fraction": 0.5937334895133972,
"alphanum_fraction": 0.6004224419593811,
"avg_line_length": 39.8776969909668,
"blob_id": "3746fb0aefdc84fe21aa6c62dbca92251e57abac",
"content_id": "f99cc73abb7346c5a9495d5165da2c3394760d08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5681,
"license_type": "no_license",
"max_line_length": 459,
"num_lines": 139,
"path": "/.ansible/tmp/ansible-tmp-1525152550.47911-92139290921372/apt.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER\nimport os\nimport os.path\nimport sys\nimport __main__\nscriptdir = None\ntry:\n scriptdir = os.path.dirname(os.path.realpath(__main__.__file__))\nexcept (AttributeError, OSError):\n pass\nif scriptdir is not None:\n sys.path = [p for p in sys.path if p != scriptdir]\nimport base64\nimport shutil\nimport zipfile\nimport tempfile\nimport subprocess\nif sys.version_info < (3,):\n bytes = str\n PY3 = False\nelse:\n unicode = str\n PY3 = True\ntry:\n from io import BytesIO as IOStream\nexcept ImportError:\n from StringIO import StringIO as IOStream\nZIPDATA = \"\"\"UEsDBBQAAAAIAKVzoUwtQUufYQAAAHcAAAATAAAAYW5zaWJsZS9fX2luaXRfXy5weUsrys9VKMhOLy3JzFHIzC3ILypRSK0oSc1LiS9ILMngigdT8fG2SIIaMEGd+Pi8xNzU+HhNoLqy1KLizPw8oFIlIz1TPUMloFhiaUlGfhFIyDGvODMpJ1VHwTMvWU+JCwBQSwMEFAAAAAgApXOhTJ3F8Ws3AAAASAAAACAAAABhbnNpYmxlL21vZHVsZV91dGlscy9fX2luaXRfXy5weUsrys9VKMhOLy3JzFHIzC3ILypRSK0oSc1LiS9ILMngigdT8fG2SIIaMEGd+Pi8xNzU+HhNLgBQSwMEFAAAAAgApXOhTGeuiAgEKgAAA5oAABUAAABhbnNpYmxlX21vZHVsZV9hcHQucHnNPf1X40aSv/uv6JiXJ3liG5hJcllenD3CMFneEoYbmOT2CKcRtgxabMnRB4xvb//3q6qu/pJaNkyyu8fdZkDqrq6urq7vbu18tluXxe5Nmu2u1tVdnvV2xOjFSEzzWZrdHoi6mo++wSc9eHGUr9ZFentXHYhwOhAv9/ZfDsWbRf5Y5HWViNPTI2j0c5FWVZKJm7X4Ma6qu+RR/JwuFmm8LMW3S/nk3+eq03iaL7+DXt/HZTITeSbW9VIs81m9SMSjgXSRVHfip3QWL8S35f0D/RJXYp7M8iJeFflfk2k1zovb7xDNH87eix+SLCmg0Xl9s0in4jSdJlmZiIdX470vRFgmiTh6e/6Xk7MfRF6Iu6palQe7u4+Pj+PbrEZAuwvZo9y9XS1G0GtcfawGvd68yJciiuZ1VRdJFIl0ucqLSsQ3Zb6A6UTy76GYpQ9pmebZUKyKNKugQzat4O9eFC2TKp4u4rKE7hNRrVdJr3d4dnHy/elx9OPx5eHrw8tDePG3ABvO4iqOHpICYQUHItgf7wfDnvD8BGUVV3UJja7w15tFAuMmxTyeJsF1V5d6hegms+hmjdCneZEEf+/1Xr89ev/j8dnl4eXJ2zPAJQiC3mg06smFORDxquqVd9AzmiXltEhXOLUDWO8svk1KfD1axdN7/KNntwAsRrrVSQjtBkI1hFWpp3ciLsUcluR1cpPG2e77mzqr6sG4xySI4tksmR2I/h6syMt+Lye4JQLO4mVyQNNsjIg/I3EoFmlZiXyuBqQO5RAe3wMzhPM8HwyRGdTrcpVM03maFOIxBeZjBOz2k/3xHqCmqHkG8KDtYjaNixlMZ54Bt0/vBqoHzPbFQMTZTMGyGrtAsVmRiHhR5kIvkRwoho0EXAmLrBAFDru/FdfwFhlgEwlOslk6hSalgD2ITdIC9pyeL/YeAxILbAILA+wPPI6NYadhD/lCY5+WIs2g12IByEG/mxqmM5olK7trouHTaxh1lWSzJJumwBqKcjhZA4onMI/rBQiaFcBJsooeTu9y2JU0edhw8HQo9KBDRm+oehBJ6hVsoCSaxtO7TZR5V2eEa/JrnT7EC+wNjEJLNrpNKgYzEDcJ8GZCTfMVyBeEAlOPQUYlogAgwL2ruKDe9tx5ctQeeQyaxaJMoCmABconKzlrFAYH4ibPFy4RgiwP4AlNIwIE01lUpRvZ/T1hTEjAJGRPkc5FWpUiX8yAq2FZ5aRPwibcwVhc3sHyyt2FC10CEdIM/pnm2aw0PH9Y4lQPszIFeSNejr8cAkhkjOUKBGhaLdbYFfe6vRIDxASeN5Z6D/5c1cVt57xGpElQPkwTagkqCscHpObpbS3XQ8zTBbBeKleAVQkxt5pIlaPsIQYaPInu/HdUJIsEdt8Gsh/lBXDfComEwyAGR+EINhOTEkUbyz0UBESbFRAWlEQO2k5uCuYWGA6U4xK2S/nsEUdZPmIwIwPGgwZu3HUCb7h1KXRzIxvKsYBmWT4QsxxIm+WVaq4Zwd/r+7Wi3VAzySMuYV1K3ixRZnKTEreFtbVgbcs17I3lWFzUtwCvsmCTzMgSkEVNyWEkZNCmQHDdXm9ipw0UDjpITP1GRDzJUVJYyJWlDQBoggxZgK0T1VlcQ8esIgkMGgz7BXqMvr3fiEKztEQdDjIvmd4jLW5y0EKWUCkDUaa3WVyRpMUx8eU0KSrQWlLMSymk1+gxuQGOK1B+4/M10XCWP2aLPMZ1Q9tm3DfztnF6kdK6v/CIyRWYMogg02U+EPNFfNugSS5lDa4DorpIs8TMPnjxgsZKUSzCGlRFDcbSgyVkpQ6m2ecVUhFMOxiA2uZrsc7rgpllKMlfCbBPalA48WKZg9KSrAJSGjhvNn7xwthD57SnWdmiXRgChkIjjltlmZOCgl+XUuQHT5Ea/oXfoJ5vMxpnbph8Gme42QBtBwiLZ/h/mM28JomIeFRAjXi1wsVIsoe0yDNg+oo1+JLMLqRhWghYdJrifUICun6a/hGiaYa9HO/3Sc3eFsBCmyY3R45H3VfGc7RZwDIFciLjIiJpVc8SejViWGO7K8xx0dEHX3n7wAaqWn1Im+ObdpfgDDjrQFJWizluhdpamoNTY3ahDCO9MmHDCFcBST0OmkAv0mzK6lEhIRdvJk2BOUqpGwCMq6+nJrecsmfG/hXYlyvgGEdy6pJouHRDJnqwRqlx7ddxYEVGljndZUbPZtSU5QIJRDIw5MYeg+HOwhxeBKNc9F9D64ODtwz5YKIEJyptQKIvtjUCc6VvKMpt1PaGrYEWMshaIiWhoS2rGdn87nSdwYfOKKTpb3iuntnD6OcxiiGYshhDU23fsXQrkiUsN+y06R3It7HpBuwI7iVaUFKAV3copix9CG5xslyRaaIEMuKiDE5WZLCxxyL8ie1v4gBgqv1Bj1UwiOUCmQLYqWSx32aXr5FdQJzkiO3Dlj0rjYOhkG1FnRHPaiN+bUQVMj+g6BhcpUg+ThOY1onlHIzFCdqjmZS4LJG16s/AYpSsZXbmeZE8pHlNLKWcD7mXYBZTEN+gHnAbITyCm7ITaWY5hJ30iFoBxSaawI+Zsb81714kqA+nNUrNWBtK87oA1ApHAfwmaanRfhrtqakkzyKfguIrklVepjC1NarfIqmKFLSb8eSkAUxCH+mc5dAvu5Xqz6j7T57DlzgHXKhou9h/i+tphKj2h+a8FvGiSOLZumnDfSplaTtHII8iELEb0HpDHkRd0t6dG5kMWAA6/IiE8G8gEu9HUsE09EjIENsIBWYofxcvB/arV867V/xOa4SQ5QHAH/TQJMgLGK0VZgv/fXn7yH8M+j1QIonC4PKuSBLlnqqFgT2L8ZejEFUGcNxRiPqCDVm0y2lHSX4caDkj/WNCbACgcHe+HL8CXaOUnXGgBx3qbqznhwKjgKGy5HFhsQPZq6jWMAioXYmKrZ9HFZmoQRoVizVZobBK+C+qcI56obx+yNOZ7Ae2Im0L8gSh74cP796fnR7/dHz64YNtNomHuEjJBIf++2NFvkRzC4sNNGVVlMY4vkCl2+SjoAgQYA/yAyMgFdjggJmQRt00RhMilbt0geisYWMDUkVe394JMEvTpJBTCNF+AsSxBQerBC3JHH9hhB5JIzLtjGQmQXAXg/juz/M+KyGwATMZrwKFFoP8LDKkGq44bFNwBFbKniEBXbA0loYMTSILGCr3lT6I7Fq2FRd1Ap8GjWJaiPF7tLozUBFMMY6XoXqskiJDIYFTfoxhLVSkDADxnvgZzGHJUjyFD4s8Xx3AEsYEQwoaUq4g9gAtYCJsnWYYmp3VgMh6yFJoiYFHMvKTOZh4Ka4+LDo6NlLsYuRwBjw/xYAGu4AfkH4ftLLCEGnv+D8Pfzw/Pb5QIVMZlFTBGC23UzLtMSJDO4wXDFYHlocxRzWxYhEmgcBb+ssJaaFR3dPjnDCkeEXAee8Q38kNQJsMFwPnDR7xLAVQizUPRlAmsvNLNm1N+M3aGhItuW1Oj08PxL7B4Z20FHyTkfDhBcOWIZg2+nbkjOB0E0OGPHWQ0AtKmQzB/nhvL7AjwN3AMQq7t3EEK8Bm6WOzqnKZzFDZbZp97CPzNMKoNTnPLNsEh5hE+WudJP+TkJREydLGkgDaKEq4tobSAasWuPKprNRAFojXB6c8++vsfvT1CP4L8gTdVpxCvx1u8RDX6d2BvicABiLTpj0bFI6Yyz1B6vb4/Rd9z6jNZX0eZGUHkeNlbQVvVLnvBpX7nmCwC7tzjci4wqhz3240gTZ9Ff5cxJjzyMiXXMrQNYjcV1/v7alYrohv883j4dNW5JlgGFTOUVRaLqF0ETPErptQ3cM5zqgIHC9t6HhwgUf6Ob6ZPTx6d2K3Wq52l2t+jU39YqOdryCF2Ck70INtyiXt9WzDkmJvNCwl7TIZG3cRV1nK5GMMRgZlTjlnO1qtfo32xvuj/QhAu1NieQyaEpyC0vLX1IBEensw45647MaQHIqQbUGhWO1kKPOwCZLdTQmTtOW748v371R6UXKYZAdPlEwxtBSw6O5xUwwpgTlFHcAVqoF2YIGX9RT1/ZCSFvkS+5XMW8aYhynKxSJ6HojLok4cPDqSLAIfKxuadpjEikV+uIfI1tk9uJzZ4LmIpZzrUkjtf/nyq29efvPqy2/2YMOV1SyvPZ6NgKcrUPO0qpid7Rj1Ea2mLElmtD5qzLJCAe4M238HjhBKdcWgaAOV4/H4l+x7ZGqps3QYoAKvgl6qbpxwsVxmfIvmM+xhMAEpRvmxKmLDkcpW08b/wS+ZYFvm5egGCAYw+kiCpCg8JICnwAu/OyEO/7S399VX3xwoTA7EEdnZaMODfkUXAV0T2LVL9AQqNrySIigpBLcWv4KxiZlkkCb5MoZ54H4asuLff/lvY9i68H9Y5EDdgwvqj/nkgM1ODIbfLvIbtFpRvhIlYNsIsSPO3v7HIZY9yHhQRdYVxs7nGDfF7IBlod/kaAuu8H9pj2sX1Nue+mU8TxcwHfVnGJCKT4Ih6S5xeH5Ck5dFBrCfq/5QvKGRfpZdBj0FGzcE7hb1N9v46s8UhkGHrVQPcv1bofuUa/2QQMkaDA6fjWW8KaqrdFGOb8BlmqqaDE44/UgNNnSKKmBE1anKo5t1hXUB8FsWI+E3dK2LRal6zhOYWARPcC0Ozy+ROUhAgaYCUmVgc8HT6Pjsp+inw3foIszSaRUSs70+/v7k8Cx68+7t2eXx2etJkOUZKYKYlp4rPrjV+buTt+9OLv8yCaaYMpzGC36/I34G3pvCVszgv/HKuKrk1qkggtohuRPSAmmELhpuCQYGnLSM79G3LcjlUzm7IxmGSuQeAg1PniN7w1KUnR6e/TAJjhiv06Po8PTU+fvH44uLwx+OL5yHR5d/OT+WT4CDXp//+Yfo7TlWoJA/tSFwGxBlfzi+jP7r+N1baNz/JdtTxsZsKPaaYYU+dji5fP/62OmhRdG2rtHpycXlRXR+ePkn7Lv7EBe7i/RmF2i8S4JStnp//vrw8ji6eH90BPONLi7BO/R3WiVFmgM/7EodMmI5NYJBl6t+r/enQxjtL5d/ensWIWtNpK6qirWUg6oKieWd+RNtAQwHNh5HYKz0VGyWnh6j8JTAWmO9oXByD3P263KsYl3IO1d71+Jb8Ur2c/oEJtQV9JKFSpr72rySjXo9WFe1BFG5WqRVCGhi3mXAFhZFiCaCn45lm2ACgolj4MpNmYgzMHrlpOcCLO+Q+g7Ed2JfR/Ot1vT2av/a0hb8bO96qC1+F0N+WiKSUqDDL/QfLnKQA+k1skbESZQJTE8RU1qWaBNAd/WwlHPiZTqsQDeBJ59YKyX3KZjeNdojVNVhOWpWsNEu1bF7AoMoO/hc/rujUWQ7zgbz3UTsjf9t/AdT/6FnG1GMZCLClTMX+VIBp6qM1ZgKpSa0jki4gQPNIhHQh3Mdpymn11y4NOigm7rQ+6IqTEetbsbTO9DD4Qt7wAHIHGv51WP/qkcg7oBBklCXheWYI1CvfWvPcHn3jZ8I6Cks0IDMNDvaArix36h4MFwOhWZn3ZFoPZQmnW9mO+KWLZeWN8NdwVx/TNDpKZ2ChB3QJI+jRfKQLDT25yqldoM1naBj0ukdqtAKFqy0ekoDE6yqxUymSXV8LibRSbk1hYfV7y69BTKYQZsbgAd2WBwYiiZyxbS51i8Xi8h6P47cZqRFKW2jZrlyBrEX98/JurGsKG9pkrBVVClLYF63lkEjXOQPKSivSKszhR4sUyTfgsbWr8PWPrQwaAFrj0cty8jE7JXG8DWUmpUMx03NdqRx8ZAWFRjPmmwaeU4UZprlhkDk/B7TEJKTyT2LC2RMEwjw4D1n0sAEeDA1U00Wsp5IhzRpMcCl2fdThFaC0Z60yQiKpbObpuTQItdQ3MVlJFN7E9+2ZTbesHcnmovaS20RxPhfna2opbvkZJD42rF0spu78/JzwXI8j9NF9Ncyz8JleTvpn+WanuRDIBcEn5cBpREfoC1C64vP22oFf7aIUPWzY6s7pUtBZaB2lQVHFUqlBl+WHjjLuLgXcWkzvOSjykk3YiholgAoymKkVQsSk4+INCQqNylmrCt/H/uftvC2uYqYHGSwXij5Bs2mPVtWvc/SaT5LXif43wYlbXiaJ7ZQ3+5DaCIBpT1jLYcphqK0D6+G9LozDIJh6eis156i2iwNjpWyezytiyLJqkjLWqWJjt6/A4sd7PeTM/j39PT49Ya5NFgHzaT/xHkhE61B+cX3lhawpH8LWeYePGXBaULwQFChxQAHy4vNIKu8rEZyJK6GoXycOyGQPOBcVGtngA6K0NpbjwyLbd89n26HbkXoRHdRFj1D9xrWW+xzW1EbEUECRNUzIBQOVHAoRBlRpZasbCwyPj70n6CpzT6zXBF7/ykr8VNE2VbgPym3xl1GyipMF3G6dEvYraVTwVjrJIBMdM+UpfeUxdUklv+GLYR9xN6GoyVusZQOhcYspbCsSsVbiHdhucVIQX/CwAW/wuUez1J0+w8azrC9YCR3/Uq4C9lOLXxTgAjaoDK6AGKcPHTnxz6JA6LF3l3wWMSYZ/8SGeNB6b3ByI0CtHl36AdlWWjsXCUfV7C8kZ3NCu0/iAsKqktgv0q9YG+6o7GKfAzZkLObYWhJZjiRTa03yKg2eEPaRncwqjpqMj8v+4H4pcsq/Fw4kxvacO00iDPeGEPuq3DgUowjPBH6bxHLh0R5p/iG5ThTbUfIMltd4Ivlu75KHKpDQceC6k7iqXYOKCTPsGQVChffzvMaKwPFRb5MKupPR7WwgOXAbk9KAx0/MsBlyAEcQ6C5PnpTJGMp6PQcVPPBNYMKL1TReplbZiElBQ4CWYteL6o0LgBDRiTRhTNojYG9AHaGJPeOiQqTYTBLpuiD0Lyo0FSU6RLs5qKTVlg5Ik2RKr5X9InF+TuqqZrNZAqrzOWJJ13J40JRdFL5y1uYUn1D6UsOqqt/RzK4Xo7wHOJuCns9KXf3X371DUHIkkfFGEjra2UM8DPDz0x84iBdTJS125FQkMaBajd0goKeUKRq2FJJWGQn06EZuqUyO1ji+Y24kCcbMO1FBUhAwXBG5xsHanwHGMwJGP9/8PgpbPIXf7y6/iwYjCkfUCZ0ijRsoD3weTJ3WJyWWAzDnTC3gRwJ/IV4Ec6Y2HnRZzPSA4vcbUwhw5aRWRH7lE/NAA7SV998raCAz4kJvce8uG8BhBn2D/p8kKi5Bn6NtyPekCkmT3WZOfnz2So+MPVrQ681ZrFERPhISBMRwYpGekhvP1ZeGNnsMMeeCH2rFEE5YOg21nGmLP81PhBvvvlyvzV0W9sboh6BXYD1Lopsc40gqtHpoqZoi0Hvd6VrLF0wOdzvTlcH+ka6NgnYDkZoEd/wC9xRh01WHrRBwRLi8jFE/3w2xD5C8Mbd8IcT+wBt2pIN7WBPN0NYMnacfKySbBYypo1wihcC6mHrUDNnulvN7EHiFdYQtKUrtmNzwWqug9YgCKNZOp+HMpnJ8g+f4JF6lblVb6W9hIW6ZTjwxa/pkB2xBVVIUvAGRI0qIjCV4GqUSLab0B8gmmfJxzB4l5T54sGpjEgTrJkInDD+T/GibnKzJwSwBSc0dJweG9DqLsmwIoEbsJMIgVh/FHiMbk2GUPfgo/3NNAbSqNlQ8TRNiY43+vLVLt2x7wRY4mMVpjKFjUW8yRKZLcnAN8AiuhCbDhybK7jaG/3h+gsRaoUx+KPK8gYSxMBZpYsqX51UfPzwwIcERstooF6DAl9MxH7PaayfMEv/LQBLHv2/Gd668EsWjP+apxLalQF0oPpfD/7OnM8+SNsSViHVZMLhvkYJ6AQTou4tEO0qS9lInqJgOO4qW4b7xOfc2Dn7gduXSuEi2BgKQVMTpp7YZ0p8w3sPVMqGKkMMuLDrxPahlQNht8iYkM91NrRXZY71t41KYDo91eYOOpXBXzbE+fhC3jpLbc9ECcg+ynqKbsuxXXGM+bE0qxOjbpw0QNuYbQL5pJTDM1MNQJnQOjqOwaJMOiP2wg+woC/UR4aoklohNGjZ7k+kkZ3VaMC0L+RorhedBb0j6KX0G7siYsDOqKesoERaNoBpPT3Uh7Wt+Fms9aY6rSstDhSLLqAG2J/1gRLAE4vDrHM9dA5bnTN5TFQEXR5lXXtgAxoKJ7qewDrdCj5wSPS4ScCZTPHA9TJFNQEUAr2gSk8MqAoty9KCSK6snmXqEHPwCQtrbe1AsARVPQc6VNzOW6J7ZY78qx95dGwtwVmH+wPdqm3zOH0CJyK8HJO+i/BoldtJPo+LWzkQuOA1UnjTOE4Xd5zmQU71Y57LYfDvkXywaSS3mztU+8if+rHfyOHwiTpivWm8Zk93xA5BOl2i+u1jmGot4L/6/3W5NfyB7BKqsrCjH1874ajSVTZDs5JDQ2wd5rNsX8/adCHzuRYPvwUb+wirFzObXt57SmxEwQ7pi1El1H4KGz2GLry2hYCiihRuF+wcSx8PDjhlM3pnbIss71tk7ISOEeznA8czD5at6QeN1g3mDmUO4O2Fope7obpvbWgjNKLWo0brvgFYgPEATsgQi5OBTZbjoqbYP5ZKhgBn4MoM8mncwdincV2eTQzJHf72d/2UKyIa2QG8WUwVoU7v4uwWbCmZXpZF5hPCW1ZbT+B/Q4I8MRYvY100Imp6tHbixB6S/FpyYNHXxdps3iVAFSLXoBuRYjoppgYJNqpDOfKQhvElKVQ7OUtn4tKIVFForJOhyqIon4P4uUFjB/4hU2goa44mfU6f9dkoAbTJFqZVRgA3aYbu7F3Yx8d9mbsfqLZYk6N6fEGsRGCl4OCt6QzILvFUUUVRpJuraG3EZxM7h9SKLMB4kv44JpGeie6OMpH/OH65cbNl2wHH8tGfGbj+ikXEUhuKJOWGnr069G9CV2YqsssFdNgNFEAZAXZK/BpnAIx17wtKlTC90TJBPK0ky6Y8F624VWk7fp3ccCmZXsK2dUNhqMkWRusznH67vzHst4DQXNqMMKP3qiodEAZG91IsbsHrhMJBo1TWHpvDlFsGPrTg9dshKASEl8+Q6jxglleUGepxnhJ1MqBUf6fJlsy7WcAjZMtwcMXw2vVZvoy6A2hL4p4p35UKttbVmwyeTLqywTvitbqwBuv31S0io11tUZBFR6VMFDvZWBOnnUmH9DIqckz/tFwlicSPdalrVizPSMHjEifrKrz2UsZl0206orCRIZvldjnpYyYuXZiDVT3YLRxoF9Nj8eNPUx5i1wgf4e2e8jRQK9ODea84W4P7U/LR3ZU5I8p3TNIz+ziF6W4cfjxygfdwBYU8cDqjg5oZlTWWd3kjvOcKNRWdRYQZETSUbYOwuWRYm9EgQUsdvM/UjQeNawSUejZCPxlYY+3IkN2M71jAk40NitinaZsBj4ZcVm6fEZ89OYbCSV5fuTJXJVgHvR5ADRjzJ503CWcIEOrDYNxtYPZzuJyY+M+kAYI12ETqMe8eav14gm0exfc0WFvDcI6edIw1cv/ltLs54cULRRDdxKhZfoU6Iwz4ccDVhZbX7a6oGcsqM1A++1WfKguAuT6SIv5IGtipESBN3B/2B9cNg7nLyVbDsJmunOy+3dsjEhrdZBAA8Lcsevb3yHBDpy9V9pr25fqir4MRNhGslXiWW9CX5hXd3MHUb1rc+N6szZXqcQ2mJfy7wVnQXe1mOCaa+d0jso+hx6PW101ZbILctKIt10ZBujINr5H0YECCCbkV9y2eEVMOKLyRcnIFbMphD6Qc/LuZcrIrNnP9IY+mXoK8Tiu5vbzOltfc9rpcbVR+F6PeBevDl//VwBxJIJ+BIAiCgYbfbAHPVAuamPMen8i3ynmQcQ9froNKfVXaoCtd8cxsRSsh0Z1T+MdmEP7loXsOE3aEzzE8b4DTnT+4Gp8Um39aCJdsulYY18eirIXa/PxPifladxdrMuAjCZ1+3QRZt/2XRXj/KUFrK2D6qydmyrhtD5kSuT4xVvr/IizniZ21JHmgMuKaLK14mZreM4NmWzXSpvCflM90SUi9IjnXJZDNFb6eDPTzJDRLmPZW3ryN3aXwbd/21vVvWxdSc7t27p+Ne8eF6dszGxIMz98iejWsHaLOvT5pT/j2w4a94E7P3QMN/u/mfQ/TW/N4Mtf7OJ7K23UhxwY4Lebn0AoyPy73pI8piAbvb6/BqEt9XeXvUHBhmzA4FuwYRWmfFtmqQdzV82qOfxTXI1Ek5+uT/PJqwUgWF7sn/HFotLr7eK9Un0wU/QjvYOnLpL8htmWt7HjvhTaz1ohYG02/5dZqnwhGQUHp82xtFA0+dMF02bivVKPUvt7aj5O8xmILUjaYvmcJ8MIiHybbKdAxoMrpjkYY9RtlyeMIDV21fw0vbdKZFh3sq8Gx5h63HErxHZ3E60S5TaAOnO0x+k7jBusV/fC/X+fmdkoMacnL2yn6ZV+mSRdPUtHJ+jFe//LH//3vX17g/41f/HLFmE/g9+tBX++nhnaTN4LL6ThL8DvbsrD/6XANdrIym1VR44cXnqFLFbaccf09mOwJPq+JXM5ToLtad/Kw4upurO75N1fauEvc/OkDJDo10YSE0gXraQLnsmGynQJOdPBkKP3Xzgfy26G+KW0iM4OWtg9a2j6gXWPBbej6jXNxV8sxjhvbwDhdnbUEz6kj2GZTDJ2dNbH/+OebG0znhqWhV6tBKZ/hYRkbWOzm37YkFpz7ijDkX1ey8M3to0WX6mTdWcS9nuAQg4UCE1Qmjce86baPpEewpbvHOlLXjIfyRA9lBtXVIclyNUuRHfJyTFsT/sJ4RBhRTCGKnNiA1Y7CA9x9SKXuAHVccJ52l64Butq/5jjrjrwiWGFCAhnRGFK9Gl35y1f4cbZCX7wv7y5mIGX8IK+Vht44NqWIw6+/vJejfP/+zcXJfx0Dll9/9dWrrz2nzwG/IVVd4+kBdUeYTRegb1Ld5bNJABzRjLrM8yv1YbdrzOW/3GtGFglQk5nfEBM7s/ocNr4pLXhiqmHDj8QNRguurcj2jjimb38JmUov6b5X5FUQf3iCncwgjICd8xXnRpvgSq/oqia+0SN4vAlsyFjILvbvb/DLf7G8hBFvu4tlarOIl7rt4x2O3i4l4gIUWJIx3jsf8uoNfI3UTXAh/j2UtwuWkwAmV+S3eDlkXlCGcFoFjRQhZ9epDEV860uj02lcSSy6bV7mzOVTiTteZu1CnY/xe4hJaKpb5NPpIi+T0DxBbp44Cd4ticBuDqqt7wPhJmiwkJsMdK9PojtIdTGNvK1piUsWshjo9/vvZNOluk4zFnTBrPXBMiqooOYXFV/17r6UF2hjiNB+mJby3KZCRl5ecSD/Kg/Ehw9pVn34oPCQipdQJFQm3AFrHln0gGQsqzLceKmbJYodYDkeco2rLZ2hjSSQcRk8g5t7554ymtXaBc90MR2tteJ7VSPzsr1kdAZPbkD8D91Sp6ua1cWsckGwgVzCwwVYwKX+XoUFhOWwWVo89KKu57d4R66YfS9/e2WrerVIvGu7ZAq1GFJqP/xdTmSir60c618wtK9bhFZ32btNNMolVyF1Xt7TQAbAGH8jTMPBwCmqMm2GHqA9vFvyE8/RvvzDV/vuhmQlxKsbBMGh+QqNutGL10NexmXda05nO9OKjneWeH+vprPtmjuKUL21am1s8XRBn++ik0cNAYWpxPYVi7uUVbRF0BgP2BbhoOlEMsp8SW8GqPMnUmLSUx9lUkPypfxkjb4WVv2ol0o0qB8prNXbb8XLtqwPqWYvh/8lWGDA0tY2hK9UpAvTXxJZ/G30a3A9aIFTY+kjRfZPV/pR/bi3QJgen3l7eBVDQDekq++98R4H6YvfyJtW9G+9MufqaamUTf1L9nmJ/yNnxl45JJD4AinUilPLJazw8pvbOHVLhro4Cn88LqNvOmhFJu4WlBdiS4GI1/UqhuIvG03ci2VDE5UpbunLQZGsGdGXu6ofmV2j53jbMJgSFaZDVRQg4E8MBEP1Ka8JMMYNPwp0bTv+obNx+Ic8n4G/GQicw5vZT68bJ7KcS+MttDAmGgzVdxQBCb6TlBq2oDRvhrchpYSLmuBeo6fMGbQHVu1lMq/RR7KV3Qs5zEEX3GKcNroULWzBLLH7onoNWk3coG1rwcxI3HTETVujeYp+NtHZ97nKBkQZXn4ezdSJutZEDJvNJA0DDBMG6ptxAcbDAv5qXIuQdmS6k6c3nAe0QtXPm46+nv6Z/ZzThc/r6kZ6nomu99DiFhg2V3iPGDjrYf26rCv6sk0EWmxRlyBdJ1fIpzckCOTmkVpGHs25tr4MrsJREWjvKJ9jR7NI2MkHxoiQLrDq00iRyRLIsAK1YK+hrRUj/t4M26McGFS3Zzu1Ae6FxY7dwHC70tp+1+dz/CJQ6Zaxoo6rS/4cK3mx4y0BRPrpn8zpCx0Znrmmu9vv6EscpEnoI3iYc1F1jWk1RufKzMWos1YV8xPtiGsV7QO1ag4gPBWIOsQ5tHBCy2St7JON0OWF9aidh/rSaft949bq9mPn9ur2awdg123Wm5faXOXPYPnLa2xAHzjIP23FOc5szn7pqzmba9uziNTKUzjRP202ugdLVHakr+svXYhOHN0OQXbDw5XvM2or0w4/TrMsTbr8ymx1up4WFIaVGXfeSm9Aekkm+o8hiis3hB5cO7vabqyyZa2Q6ErWjqnhhjQYfsxaXGll5qixVtgDkzWhxQeUPVCEHYv3VOLd+EDg2DrQ4M6JTli1PULneIrXV+SIQ/vcGtGp/Zw/ZO3VLrKP9xV3a9z45cssAwT7b+WKOEnglaMhrnUD+U1O/Z7+VKu7I17re63AmoUZEcqs7ywGI3OZ2cvYu6310zDC/gVdcGE1FlSKrsZQiynvwdBmsVlUXRA36b8c/8FaYxsb07EDV2V5PwFT1XQjnuwCPBtN7qeo/oPHoXec9lZYwBPLlrNtWMj2pmr1UD9KhOIXG9Lbq4AOcvK3gkfvFCDJMy34DrQnXn+pb3H9zQPugGVEUWz+9tAuLqj8noucTdsfGstAdpHf4oV9shbDRHM3x3h4KXxhOCsWTu4ux0upF7GQ/Ibc3pCvTacv6tTmQ3XqEikLDn8Dh4TpB9uc+0AfWaIoXA5Lah0H6pZshkUcw/AawyvwsOkqNhkHP9Hri73B89C1LKpZspBHSVUb/A89DPm7ZhPvgK1jcHQXkwn9faFAfzdBdHynnN4lGIxAc7BJWPkN1Fe0Hu3rrOYcY1pTNTqm18JXnnvbaHqbrtGSDCYHD9vxIfXTDvWoH95AaF9JWG8wKyUzRhvOd6mf1hktgtl5o5Q/iGTyU0xFHfsdey4n795TzZZN3vTeh/rp+68hF04obl2oL7XTnVxICb5dEdOpY3mJCO1E3ExSqMfNU276gM1NjHdb51nn987GPg4GXlde2bU2lhwzzHqIbtx1Z7TP5IC9y6kSww55/MlE54tuz+0gY0meCHizk7Uodj2KMi6V2cHEsmnikkDX83Fi1m7plFD69MXQNgObVZh2rVNLk7aWwjUoPrPsjScGaPuYAJT3NaqPEpf2p1zbR4GDg10ZTO9mDvUGRW4zta/eNeDaJ78bLdWBg04Z8xtPy/ljLv5D5Z1AZKjNWngn5OUxj42CzOTtgO4nOJw9iqUeluFmWl3pT6i4p0R8MK1DscgnLyy7BSll37UNLzug6C78edNJ057l8LLDutyWznzaI7VZufOrIX69ULfOn8azWYoktr62IK9ikruTvDMMnejJqlYNPdLc3+QO/oM2tnfSzWM/nZQxEMBarjP6wlXHcfxOQvbTTNqGakQ6YsTJe++dS+2VhWEtRJ+j3dXZpaxe3iT4lTn8BF4K7gt9yRscP/tiqQOubtNY2aT0H/3hl+ZDpW2LQR1NMJJHFvIrWT7xFcVPLHe1sd+dI7U+RHxHdH4rJjr0ugGVhkZROxcWLvRnhkwiqWF9UlfrMqb2/S2yBQHAS5Z8Tdr+MCPRpkxzvJat1oZlcO8CZyFnYjG6SePAt3Xeu0M+tdWDPm7jt1Lbj1XKwZluu1kz7eSVRu0d+2mKssl4bWy6GK7d1Fzs6K5Au6WV8rGOarWaOYkaIIP9t48Gz9D2Tt+WOX+BOLmGN37B5SYhbTNrAVCHhgPH0iUjyTFcvcPo6wR1QcVS2vn4QeHtw7A/2xyKq0Ts7vgZMd+Jf/xp2fyeo//443fvWsLf05sKmRq7mCNTB41ZysO+SkzqU23qyO8mudnNrn6m46VvecKn+fS+0xHeVmm5gM7kx9Idtyr/ZmR6f+AfdLP7vS1vQSWluuaKhsfaFozh4xc7I3k9dURHT6II6xmiqM8FDVTc0Ps/UEsDBBQAAAAIAKVzoUwLSY2JJzIAACSlAAAcAAAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvdXJscy5wec19e3fbOO7o//4UusntsZ1xlFen7eRudsdJnEeneTRJ39OfI9u0rYksuZIcx5273/3iQVIUJTnpzJ6zN2d3asskCIIgCIAAtOrcjP3E6UcD4cC/Uy9OnWjotMPE7wWi5fRmKT73QscPB2Iq4D9hCs0n0yiET25tlQFgR78/C7zYGfqBcJLQn05F2oKeAyfFFuZjJwqDRQsB718fOoHfF2EiBgjsLBrMApE4i2jmzGM/Fc4s8cMRg9BA52O/P8buYtITg4EYOINF6E38vhcEC6e3UPgDwCT1g8DpiSBCKBEAEo43S8dRjPPEbxMakjGdeAvHSxJ/FOJPPrSZhwo/ACb74/QDAbjNo/gOsIZfDqLpIvZH49Rp9Jst5wzw80TgHIoTD2j3jwl/dwdiDN9/HU08P3ABzD9bzvbm1vY6/GenCOUmSsZ+5Pw2i700mvjOP9I7+fFXjyeYwfiZ0Lj2ATN/6ANFkLJvGHOnkQihppFsJLpRt5cM3PQB1iN2xmk6TXY3NiJY5CSaxX3hRvFoQ/cCeOvb6weBN0tEkwa7QeJ5aX/cHUdJCvQXznAW9lM/ComYyWw6jYAtgPCKv2bAPzHRMBXxBPlqAID6UTjwsVui1uRyASsUOtfRMJ17sXCOIujpEWQ5J9fB8RfOXMDPqXcnQmcYRxNcJN1/B1YfRvDiAUy+F3vxgvDyBt40BQoNYdYwGeBs2X4bgF4LWt8JMHeCvwA05CIiCPVg3qMJTWPR9xMBLGdMDT4qFIlKl9dHFesAv6zLL09dBMZzfdvdbNZqtXq9XsNFWFubxUGytubMgNkTydBAyqGIgcROLKaB1xc4JZoBEgg6AEm2nSkTWpLHrdXUDzANbCd/T9IBPHXmsD1gD8QijX1xL2hPJ0R3anwawqqGsL1JaqQ1GPUOdm0EjJGI/gy288IZCi+dxdCr4cW4qM69iP3hApnk+vqN0xfAMEPYyKlImgDUS3GJ4qSWjKNZMHD6yA1eL8IBQphpkjqJn86INRLXeSeFRcaJchF9TZZ+FMOypUltIGAcX4Twf7XSmiyy7XwM3AUIOtMo4f1WQ4IjCvj/IEF54MeDdRR/C0lFBNdAOvshsl8fREssvs1EksKMJPeATOihaBODGtGTFkiPPvfTMazbJIol4YReIOckmiNGIK5ArNHgjjV4DUfzcVcg9XBWivl7gnEKAuD+iGc88UJvJFDy9cd+CNvqBnuxyBcPaezB2otpTe4V4KeJd4c0xo0Dm9WTpHKd06GmEiB3H/mDcvRQPJNMr6XZyQNYCQ8OAOToGognEBtOz0vEi+fqG7BV3FdfokR9AsKlgNtEfY+F+pRE/TuR6m8L3SUVkykyrv4eA+17wKq1WhovdmsO/MmfcDsC4jXx0BfT1Dmlp504jmJutqoE1Y7dy+0HPu43L9FA1HhKdDPhurRn3cR/gAf3InGxebcfRXe++ANOUw9JJL88EQKzkSuZDiHwk6588mNgBE7XAELfa7TnSwHAsvl9RYqRSLsDPwFxASIBduOSft0U2E31S6NubwEioIWfQtjd94I+YhtjmValfMqR/9HpY1vCw2op+7d7CbJEenJzc3kC50Ug4iUMwAhsFxHYfnzk7WVDZrOspllhsWCv4YGm8aDvLfw0C+kzATxpX3ffXb25bF9dd5w95yaeCTnD3bLfj0DQieLmSJJAt0bRXQGIf7JhgKpyfurQXFGahEIMEkVL96X7Sz3B4+AgCmnFNRVgTDV8P4aTRHRBinuzIIUdQ01bdjeJwsHF+U3n400eyeJ6FlpLvC10URrKk/EfDuHLMptXYUdpPuZ05U//3NtyQVczqPzmdH8Hx+x+uGpfdq8vDn7rZANjOw3E4p4dF+cc46ovSF/IaHO5uIAHFiWq5qzBqlMKuKh/B4eC4qofHcic2OWni8vO+dMXQBPqH0inFpLrxVOAVpAr9+Xp9IOP3XnsTbvyEFG9l5Hw75CxarzlLEKEfAytKZgzxL8iAK0HzutpHKVRPwpYPfDDfjAbwKmM6oTUNVLQIVWrBLp2HqgNntLcADHXDRw/O/jhjBuqPaTWE/SCuUCNpw7aJxiRUqvAQSYCttAAhDwAdC+vLm4uDi7edG/eXN9voQaSzIasoTFBVAOYebE9DhxGqb2BUd3P4ZNjCCVJ0sUU1KbSpy4K2keJrI4BkAXPnUbg36FNEI9F8LNU+AaRSJAAY++e6AvMrYQEqp4+2BqSJZr5laORgbWzsYCTkF3I3NozsXSRul2p6zXq0KjetHplHQ4O37xpGKCylijbeGEQs0adyNvd6vKzesuRT7bVk2Z+DxS2XDb8F+7xtfDzKht4ZCqAVggSdh2Ypg4sGaCzQG4WMNujUIyi1AfBT0s7hp9KgKFeC7oqKu5jOIRBAfF6qPICkPtt6rgDll4Pz3xmTVgftwCnit+Al+63dwrNe3Ae3eWeSo5pp6z+iBKBkVtn/BuIQBIL7Ls3F+3DzmH3fefq9OhT983FQfvm9OL8GhESaQMsQGTts/bNwUn35OL65rx9po/zvPJgiJq8wd5yDjKDi/CrlMzFYwh1ZmwEeghwkeUJeOpwBqFKt1bpFOWpbOz5fBMle9A4HkZBEM2Rp3oBSFc0WpZ4I4q+iLyiv8QjUaOWKysrRb9Io5l5Roh0ymxwn7cckSQg4nwvQHMzlP4uYDIXQDHMfgAsUiBd470XzPhj05b3zEpDpzsIE0SAV6cxCFtOthYT76E794MB2NSDZG/LAAIjn2EHxMTrg8U88Nl7dnV04LzY2v65hWcFTeeF+9zdqemeaL3sbmykEZwMri/SIfkvxukk2IiHfey6Knuuc09jSGMOIA/3nC+ZoJArPQjz2ycW6SwOFTuop7BYwEfo4EFSSwthPVmEqfdgyuxADFP0ILSctVhMPJ+4YQ8GcRMw2NJGXHfrpjiOCauKnxUwaEEtv2xm2JvQ+cet3a8ZvnoN4GcFBnSFWZg26mvGEECErOk/88uXpwucukkyE87q1stfXm3uSnN8IELgMuJrlI7AtWiLx2LILIfuBgsKqAihAzJXj+tMYRLD2BuhMwlkaBtOMZCzC4QJ4hTErJ+MxcACM40Cv79wvEkkvTzogCR/FPttnGQczWG9fDT80EvhWQBAtCZRiELc6Y8jQDwvrWPPB6unsD8KknYF2BIdHguDjnDGGT4n5xDEK+6OXWfF+QkdMzFsmmbTZK5kCsaKM5uSmxB2QN+DwecbUQbUZtuKVZLcCwwFAkrEICT29vT2VM/MkSt2H3yd9fxUTJwt12iNYkh6Ia5PLt69OXTOL0AjStH/QYQmmUAaoUAJhAuA3n30DKPzilUXAyAKSs0I/bGHFiu0RG94DAuArsbA68EBFkHLmJmH3E/A0+u0N+hn1yRPtm32HGB2m40/oESE5+SQ0oyHbAsqkwiGLeQZmgcNH4JYwdkt4EBPA2GcqwxOc27uOYob15vi5Uaj/uV/3K8/GZtOBAaWLrA4bF+09Rr1h3B9vd4kr61as+LP9oQeXcId1+rxt5bRgkX+zKp1NK9TcAo+evGd9rpcUxvWO/XDkG+H0PVL29kL/O94IxOh0KPNVE3uWLgi6XtT0VAkbpqkN/VeHvUCWWvuo1MD2UIvfaiX3EFvUziCc9Uduc58Pl/7kdFd6YsF6f77Giq7yA8ghHP70BsMiIgs18khIBkLbBl/FKLfNidlcso1tsXtpU+F3ScgiJ1MJKAVnBYxXQBN/YDwbddRZNV/d+vuH5EfNhBSkx99rqMT2j09Pr+46hy0rztN43AiKQSNXVYSFDc3MzXC0mZQYGaKRF5veE/ufDYu17DhmtOAyQ4EKl105wLA0DXG48Kj3kL3h7PhmixgdyRSELMxAmg0m3qdkeprauA1OIBwP22/2npFupvaXNnc6C4R1TXWAsWAbzRPL3ENY9S8+GeU0qDoCH0vZAySEd0+YXDL0NlD3vSh5wdgHbvORQi7GYAl6L40riIskic4KqpYrkk/++xACljHBp12mfLXWGGRB/iEkXmYrWSLLNVAS6tKYOPuUQ+kN5iMs94fIJXaQXoOjYFlGs28XXgnFi3nHkdGBobuecwAZWhBohzO0XrR1PELGikBK+Wl3JSJYIWf1KzUdiFgTZuCqpUtSlCwyhnjQuKVNEhDAew3YE08JWnp4zo5g/NrpIkDezxeWICQFDnK5X1CQDf4WSkaOUoziYtzLqH1rFdOmlXn48ePRSUd90RLXm/jHdlU9JErnAPWWQpYZuAmM2gvL6eKRrGkq1pmVoGIW8rRk+3/0qqrv4rVV3+Pc0EgwoZq1QTNeatsQxXUxxVtyz6LQRtcUT4cPm+FT0oOnH7PkhXnmdPI7Co8NlgCT7xpA3XIlkay2WwWdAsTOSDqX8Auj9qz2EJIgQebZOnxWjUU8D/QNo5AzUMdOVt0h3nbYH0HtI4A1Gq6lh+ijbwiz5FVpwMiGuj1uCU9IAu9pv2+5S6Omoo9Id1wMJtMYP96uMfQOXnvy+PGOfP6zsW1AzYOGDwYV4KXDaBAAeU9YPUtDEDwHD7TYjGC85rUCMAUOMkf+CnsQ4Qj8Yb/SYiw1fAWlCMEYEeAKgU7VUl76AtCGR5IDznBIIheWnbb7ZKTFgwPaUJP4PSbAcEDFxRdsNz6eNRvzP07f+OgbaxRsgrodC+uux+7W5vuiy4chN2xPwLmrPW6h+/Ozj51D9rdg84Vuox7cMKs499+5/j03MGnp0enB+2bDj2tnZ2eHtwfHhy0/Q+j9vx0vz06fd2+eNXZ2r5+ufGtMz1rbx4fXH87vj7t7Ry+7ezvv30HK3L3cPC9/Xp/dP6+tt/+dNMO3t+cXfXnR28/Hb5/+/a0M//5orfzetM72D/89PH1fe84mH266VwisPbWu87+w9nx1cn7hXd8lNbOrtrzwxF1/K0zH+z3wvPpp8lDcLb//OPhTWfz7PDs4eLwdHF22F6cBRE8O6Vn5zf8rAYP59c3nTdn7TuGPj47eP/+7OHoe/s9oghzujm8+/m+H15Fp53zcX/yS+J9+Hl89vbV/BAwrsHIJ535587g4+vo04fNh85h+4I7Rjf7m0ez/nbg947fzUefqcPr36LPp+P7/nmb6NE+rI0+bbbPTo9ft6Pj/fZ5ej+9/LZzGpwHV72Dk/Hndv/yxYeD7+P+z1e95PDbYrz1Pb6bvPE3j+f9tzvfN+JfaiPvw/Dt/qfxyTTq/bZ943/b2roBBfBEnO/vbLwfnp9O/vjcP56evzoKds4+zGcv3wzf371e7IzOz8OHu/ZW7flxtLnxZnJ6f3V0Mu59Hw1n0S/nR6M/LicTr/eL+PbxdV98+3wafN8+ePXQftkRxy/vZps/3bdHZ/vt9vEfgEEyH3mj+Qku4dXmxf7+p87RZXi+FV5efTs//HgcHHw7uB987o+vzv2NI+/shIg9OJ1fLc5ODtq1ZY2jd9t319/3X5+9TeYHzCPHnfnr9+++dz6e7b+jVasd7J9d3Ex+WQyOR6O320eL3vbDtDfpPBx+b5/zavRv9u+utvqT8bh303l71n5OHQ/GZydvP/z8veZ9eJ18/mSz75zQ7MzfHp21Ya7DwhIe4RKOPnXatbN3+682T69e3IXffvltI/300/j+Mvks4hfi6Oz7xc7r47uro/3x9l344vXg7DAcfzr++LL98f3J8TCI5+dvT2rfjhY/eSL88DE52Px8H/t3Rw+9t+HHV8fv08P2Yfr+e/hw4b/8+P1o/vL1xcMgeR9PP46uzzfFeNM7+37/vvb9t8vPyeftif/+GCzA/uuT75Ofv22+2uxtTTeSz+nst9PO57sX95+/dUZ7NdrAnfPD4qYmP+IqX+SAhoseF/haq0m3YhSGbIqytNeN5KkMvY9AswWRCfKuz41VIBw72ZXWyq5HCfYyjh4WDNEaIQ+XLpwywD3R93RoyRRhlEIH4fleilKF94+MMpjRVxTB9xoMKfAgmUsHPI+gMQ9UHDsb6xyOlgKlQMvGi/1rvHck8wPOU/TI65i0exiYnFvQ+F7ZTyKnyudwgoMiIl303o+jkEPVGsdRNAIQoOBNZ3BId8KRH4o6rm8s4LAagF0bLUDDGERsV7DdCLC8kYfxN9k1C0VszhLVku6s0JzlSchrK7d2AIpiNKGHGe3hjDGey1gGeHgehQL99WMv8dI0bsigGNCULAD1Jt/syHb5AArVXMJV7hXJyGX4qIFc67mhcpKfvAt2fNrtNtiptObFIzDe1tbu5vjJ0k8rQLqPAMnBwCaujFlQ5DF/z+5R5VVmUUW2QJTHQzTyw5LGuewq/dFh7Bv/hroXa9r45zpSPCg9AKbuYvjV8pHcIPIGXWpMalUj37nFjUHpoq+GYwRXU+496mOt3cqBsS/JAEGFznNG/j1YfLi3mw6x90rNno9iSV7dOodndqUfwfb00XzwumlPBqK5cn36GWvynMZ0A0EfceCm/Jz6ExHN1C/5wew1tfX4/9TgzZrFsyjtsyu+PUfDsCzi8ygFRZZ+7aYzGDOgXmxLp4bAA9pPFxRD84Ikow4JtE1sMm9m0tOiuzQwwpRu9Oz5lXOjiUwF/xl0K28gYVgbawl9zEELTGXFK4CJ8Bd2p8LZ3D9GOEkD/2nZ6O1Z3x8RFFb4ySOY5ONZJAKwW3Gz7uW2bouOORTwyR5es6P20j2/OO/wD1kHc+8DeEA9gXXeU+KntdSF8MhSPUaL0g2Wn28JwZfM98nTatbKTzh5Alrno2v+BhIpJxUptryLp7wUYdDJklrSF0yIDSJuW3qscuf8tW+isACClAQ0IkDZoKvVKgZLtxvtWTp+2qxwwHq9zg8SdTmCSR147dFnZe7eZ5t+AwjPeGa+AlD04gBDkkfjdC4o3wK2DGg/Ptv35nicF2IGbCMsMOs5ZBtkj9la4WbcwOeVAkaWDrY9PPT1E0BTPijROpbQo6B1FNQNZrNsXNQVsm+lzci9Z2CWTecRJipjIBaCvZkfDLrcvV/KR0St8mZyLD6tSujDD9zZFJRy0fgzx9N1vcPquwVa5KVGXW1SqyW6YnXDf2eULYQ6MR5f6lII17/qM6BvxEfi3xNig3JxQZKwFapnw6JMZoVhzO+VSEAjPIzSVAwOfdCLBvCfbB/Rv20HH/K1DezbxEn8CZzRuE2krWeAoks9MBJmUzjCjYjKHMQfUqvBqIBtX4otqyTNjM8r9WqmdBcn0u1Kymes5SXdwE8KSqFCF/+ORShiii1zsKlKbPETIk6LqRNE0V3iYIzfMrKUwpeL+IUQxXsAOCRwzzfZ4c/xdkl/LOjypR6KNIj6+GnqpWP+N/YmCX4CQRAv8IO6eaw3v8Kq40RHOAu/31Xh3w0KObHW+0peQXk0NXRXxhTEgSYqR7p4lNSX0AVdFkpOnVFTwxuHGRqeKqNo6Cn+iQIMdZEnmQyj8pj1lckrTUsVAsmE89RmAGvWF67jMgGaOcTV7FQ4TjmHcx9DaafmGU0NBljVkyP1VMxR5v+BlyCgU/I9r/Rp45GGh4XRVac+MToRucL17zlcv6iV/aqigFx+UNVcopo15wdVzYlHssb4tbopsZHZGB9UNWdey1rT96rGmh+z9upRVRfMK0IOMLqoR9UzSJJ5FA9yc+BHVV2Ubmd0UY8qRwHeNEeI5HGZVwhXMYiUA3sjmU84EA+Yu8KeFBHyRnLAojI6IddwoAzzjrEfeM8RGMyTeiI7mRFnj7DSl63Kpnk2+rK9pGGeib7sVDa1GOjL88qWRe758vNXg2ggNDlPT/JHy1HL3nJE2s/uKwtHM5MAVCg7bOJ/Go1/7f4+b/4E/+zKj81//dr8V+NLe/2zt/59c/0Xd/3rT83G7u8D+OV/1/PWAd+57WUDyDAKReh8a9RSoTE1cUdxNJs2tvItDPPRbLWdb0ViM99ip2DzYqOi4cKs1/emmIfpUF8txAnsHDOmZWIAPa3v1ovmFSbFYgxLBIzvpyp29h4VTA2JnZ8YGzSyYpGMOcDPDfyIQZGFKSC9njYFr0eYqHwGbPBrK4djCRi9QymkE+cBkDDxUOdHDv0YjgKgAN9mFmDIBcV/vuyubxVj24usKpvLMFIkrrP1FItTQaKtoUEVXIfLpGtBrpZ0sGSrJVVLOuQka0Gmlo2g5WoU2yrx7pPn8tjEc/N4rHHxdPhirWXVHDRkqd3l2mlF/IpNtw9+Oj6jJAjbxpUNLDXtQxTfyTxpzsDFm+PDzpvOTWfj8t3NBsg8M/OM0xAvYn/kh1QKAc0Oz8coK18neW+ApAJdGkQtKDSzPm6hjUGYTLyBEF6yeNSEhaFaMkOk5YDB5Um7dQzqEuh89M3Qr1AJ419Qv8IfLW+6/HHP+fPfliov01D25Ghg300pJFZviFIS2hYx4Yt4ahQNgxMTVBm6bRhonyH/XO0okQ0yPi7GVXDrCnRtFKQafyUGPiaqSzv/CNTrKF40OJANgPCviSS+vMYSZNUmexggkd1KZZ4PZsYhw2JtvR9EGAUX3ctUCA45iobU+fbWHu/21qGsd5labmEpB8CsG48i4OAIIEDUgUEjv2L+PqhMMmAJlaZREPW8QCpN0yjllAhkYboPnMZRLxBSR+Uw1ttbdC+gmQM4weYAXAWGKfIDjCwhf/EELAMfDVV0alO8t85ZV/QxfWzWhMpcUVaTfPyjpnVoRbnzlaZNLx2/qzJN8C8fQqKDWUAnlPb/tuucynMaM7/876K4dJXLl8oEf5SPJBgAh9vbcna7vc00qgg2DObJgDgBKT93NExH1mbooRqMvUnaSImUD27MOSQVAEXczKPUcobTFt0DgKRJRrBxBzGwBhhnANW+jdN3jBNv0aM7ki56UuXzBneyd0hB0ZDNS07dPAOQT0vELg6jhpD/NgvefZv6FDMnCVMSMFcuJ0pYzv2rpCv6+AsowtKBwRCRAwJEC/xL6TVfS9ClcDGzLADhyrfkiuwFTIZTi07kF+bRc4m8y+hISIJ4QCwXAj0iKPFKcMwDb1Dm1z/3nJ3NTamxwvd/OM83Ny2kymmDy5d4w7JgxwnZFd9MaV68IXoyNlyMiB1Cx50bnOZJp32Yi0OXtDFglinKPYzXC/sgJLwwZV3h3dVpojQDyvAC7dfrF+NBeQnJtMEPOjS+ThGOz7Y368UZQtPsQEfXUgOdXPfSwwWfOPL9mytbuZj0kJSQqviHYac6UwatZaTOCjlWw3Q9EOEoHa+0HP0EE11XmkXIpQmqTCwZ4re9vuMWEzzxLyLVCncdX2hma279UjKjJ6aD5jDZcZ//9AOIWE+fKGCkIqK27BOWwvxTal+28j8KwcJ6z/r+o+BmIcXQ+HjBzIrQE2yrpbJMrfFwFgSoXjSay+SaJLIlsqVWx/cbdNOnY4h4PCNeF20LNBfTMcAFtjFVatDMscxS4IOdkWKxMwSIriXSAIqxSZk1O8dzX1WmQFAYC0t+K10UKK/7w+QwX0DTqlHPAsHUaapGzSXR4V5/luw+S9zM9+3UnTPvDiPuY4rCjXXloWSRoAyQcUYM2Tlo57qa0DW2Kv3yq3L2FgsAZAsNk9E5XqdcZGwueglWmKNkSFk2CgXeDPVhrPehgoXrZRxYVzn/IMC5BAh8zk1MllQypKSJhNMw6muJB9GfpRQSQUpr41nSRB1NOtF3gZzNuvOsDI8G0M/N+oNklsHn+Fz2Z1dDAMiAqDUj0A2iVRbXoLyS9OnBAOYkoS8SsU9eIFo2ovKtrIhx23Jupyqg6LZVSmem9W04GK3TtSDfyd2yoQC9vSTculV0ZMU+qRvHpIkOMC5YkJiDROvLckJeGOOVrrGgL1wFxATwSU4GTRLL5CLFBQmKEx5EhbnUiYoUe87hgUM/nhhRlIlM20sXWSSMn5SAmYWogtD0ESLIGCpXN8ANUWx9iVJE57Wwnw+Vf1k3CzlLzxSQB2lTvmcwW0ao0FASDXMv4d7Os6xKUwPaKCWFZWpJqGZd50cko2Y+VYFFHx7jOodtjEkT5ZGfFUbaPoi8QrgA/dt2+hTOoK0GaRADWfLBoDLra18KT4YiI/OBj/t3aC8PQRuhqHwaFi+7NrY2X73c3n65oZhjHUT9uhnmv44q2Dpzmgk0Ff3xXQiA8Zpng39fl6bCusQ22chNBjboeefgpntwcXbWPj8EWb0iH7H0pciEjS138/f49zC7LN5lkx8frlQ6efIrYt7UqpAv6Yks+Pt0MJny7+V8LX2Pt4vtbVl1UioOB1uDisRk5wDJftydmIdulGTgXlmBx6gvk8mVL7s/i2MMDMnVH9NdFSL5jDhit67aLfRTNjE6p+ChrEPWUHvI5V9QK6CTPNmrwykXRyMUDlHcxeH7qXEMrMpTW14zw6RoXJILODRNwe3H6YY7hRGpyqLZ2fRPKyQcVHfL56H3MLoLN4AhN2jm+coCanZ7zqz+xg9nD5aNswTi9M7f6HvraQxba4Oq+/RTMdgA1C0D4REYaZAUMFvSbZbEG8nYiwUObm6xDc4CwsoTdiK3OcujWIj968OnzRMHQxYL1JAWngXoeKg9Gbpcl2XwzkVaBKeYOA9JBnI/juP1LLyw0zQrMIymqZx/2QgGcw7xjMfKMBxhix77dY49x7QuuZU9vW+NnmoHIzciZ19cZ8yNe98OHn0Ks8uqeSaK6WTaHQ5a9C/2xA0tK0K6kzskztQw3mCvo6eHOvDHp/bJytRkU/xAFY1xcipdTZKEamShyzKehWSTZ1lmH6t26aEXz/3QWr4ocalsckNN08oBy++sVeeQw8Wdk2gierGYEzFJ/qi0NVrjH9siBotYzIGVJINAVfmVFg4L/HDhkMxDfbNU7LGHhK5tsfeEPZd4I4IPYAm4xLSsVGp0lLVdMbeBxSt2U0U8uPqPYStTbT8kgh/mZ1rIdY44ygKUb5DiCakqnLygfvATMAH5eYlnyo+70lWBhxD0wcNAdyi0p9IBiJTZsdx9QHaq5FOFi9amgN7l3hZjRn6CpGxoOPlpkTGBB+Cw+WXrKzupcOUo75WkfnU+b6XzRf3paD3EHcMHNRIAPe6VuJzs3jKlnYC4GMPziHcpa0x60SOtCxsMu/9gl1799/CRiaD3Ua+idHZVlANbTk/80wJJVwx8GrqZ7Ht8klU9nzBX/KuYG7rZDTasBCN9a42La7IwWs7phV0Yq+wvK5aFf9IYZuSfUNQOpsuxBQ3jaLCcTYyXROuR2m80UO6Qyd1aS79SQx1cuROplZdVxg2nNlMpmw5spGQKWrLQNwb8Vd6PdNGtVbjCNWMYJ9qJAycWhuPQWa/UdfJgZ0FxOGIpFGMw1LK3Nze/IvnNx/KymPx15g/GSW4vCiV7qRB6nltX++maHPjDwTm9uN4g6+j3we/u74Om04APA/rgrjXrGV2aHFxje6j9IYWt6BG0Q9pAtMrJqBMsM8WsJPKB2xpZlHUjyQ79GPgLpSyKQd1Ybryj66fdMOL1zu7AK9bzkNornauu+tXN7EKYVUzuVKrKKVJZezEkQwtEZmZ8uaVjACiNEB9JEjgXw9Bj5gwSs1OeNBY486uKqWlZIoeDszAARgXFIkVURGWu6VAWdMLVrMZBIskAYDoDrvqEHakklPxBh/g0v2x+tZpVuf4vZrGqIQsqyrurN7r6TVYeEwsrSbwoxmkQhfWiu5/hyRoFimOkB6i0ca6envWQay7gA3IXoBzQSYbMZFIcSaOka+k9j2Q0/kAi4w8mMf5Q7mJVUYzzKD1V1/liIHfkCdU004XtPUoyjjD+mVRMGdxM86F7thB1UJUb0MwdPCW0K50F50VyknBXb7tGGentBVQpCHoNSVoWLpCNJcOTxgRZusStgtNEhbdnzp4MF87sqJIExq/GBq5Iky2I/6ydjMsxWXQpheQxrSVzoQwd1aKlo28Q8SVqGOnx8OUyc8tCMvWKVUPI/oCARX0cRAEKAHQFc2BjZjDDttZ05HwbS/IXb62aBV1HA7FnLKuY8QCAETJwkrmPSUCaVe1NVra4pijnKo0AQ6OygFRep1aqVnodbPQe22lPxXAZQ/0tJOV+BfIu0XX8obmXSjRL0vZUlkQhJ0R/MICU3JUrP24GjDcqRT/SQff8efGmPFmaeFwApmMvm9LbXMQDg5ntbjL+nWomkeSoKEGVuAkcvF4QNNRbJTglzXafP3MaOed2Pi96qWe30qAuIK0jWJdYJkCzAYe/ISFXyJdPFZ2qoeGtH1KvsE4qBpYaVNtPGY16ddI519v0jiz/u8dXBvv0Vo9nCd4a4IUPv5zF7b14LkLUdjPaGtg/RjUXP00bFXiZONGw5c0ka6Fhg44sqnFU2nA+9un1P2Zzqpne6K0gfLoQaTr/2HM2l6xNfrSfgNFBIPTvG883f3lRTd9VZ2v7lXPnBxERCR1gKkwmC54TYTQbsfMNX/GzgMOuvAAb/snKYXl8qLjZztbmy+1HKqoVjItLKejDlFQYGL6Hir2K0nEusDoezOE3fx89eFhK0a1YENo1VVanhW/VvpFHa/UsMIICd0dpuntZrruxsctH/fGMdxuXQtZ7Szvo94qqVEna+1Xn7bvTq85hRYb7X5tVWbRLCeY2Bf+TmFfvC6uwKOGTr/3Zch6ZZfUMi1z+LswyrKRqQ4cI3WM7B3xhGCzUy3a4JqTswtep1A2Ufbr1rjySnlQ/4GnVOeyTqPRsXLpd/rNb5a9tk/9ft0g18/xXt8bf3BarfMOf3XpoxrKaMeBZiBO0PFxJwRuufKw4M1UMq/hChyYmDls0XRL0VsXnllUJmkQBEbl59PvJyotZWtXN6qW1zJ4lWA+SlN1nFNRSX6YNtsxwl6ZpVRW0dFwJ4YX6nkldUvGOx/wZdNzge0SLVWWHmI6GDj62u+g1ilwIP9fyKTZTmYMxb6P+NzB/qjH2RJvr8UnahpVdtcR0iMh4zfKcgrKEAh5Ypswvs7jQ86jz07m9zATXlgzwH3oA8iPkXGlGqGO5u0qXq6tbVe4KNZh8GbYoRROFo1D4kes8Fu5WHbHHf3VMFBH36r2o9MqVquC1/E3xINIl8AzkdQPtz5X0s1Lj0dHya528pPRDWSqwTtlVTtpfKQ/TTPtDMLuVYPIhUgVwJWmdVsLrMq+jEWFlzc0AZNreq3Jn6oBcY8lVsJsXmvEWeCXo61KOKhJJhZrZm6Y07s6KEpN7RqVlNXTmXUmGoEohlF+0I4oitq2QbxAifSE5zgm8JO1OokEX87lkb1mvbG9rszQTzgIHaHWV5ayGj/GGkW1lhSwKAw+jrPlBCUpdetdlF5N5FXaF9Dyd7mMBeEIhIH7vZ2JFf9O/13iVQK/2ZdmF1Y5QPW1ck1PmCBRVzhVVL5psyDcc08/8UD3bkRvvMBKJ3JTq/bHq5dimrzKHh+KWLIrOEJOVKVmV+VgY4ZK1y3aEGkYFmxiNJPJ/T+7+L5C7w5wDyci1Ntkl50B9elqr2UuBKh5UMqHb5MVyuVUu9Egzl2Iv18Lykpv+mXKQeYlon9lSJBpwqlPVc0nvRo+/k/tugCn6K7PRVBZD9hPOkGoAZfNVRYHsgEPdFANctAi29Bw8vELOoaa8AZXYWoUgJ1Nl72JtGOM0S3lEn5O2uCkyz4Te5VCSOngpSXI2ijH7XAZ8XQkvmDSa9pS4/iIdJCheNDk5pSEuG9XlEARu2GDJxfRqlbBBYURV8FfVUJkCAeneXyYY0yt18JyibFcvmHsL+60+fMGJyoUcbkPjDdpFzw9leV+QfEigwkuB9Evdb2FAzB7mXGmqSYUh2Ln2SP1MvpXQmzylZjE7Sac8iw38ERbCexTaIbUrBWer69nxbzTPyyRLjBqjN5c2LGJrDE9yJ8exy7lVSsYvKzkX8wrus6xLl1s1lnJQUVSU3tvEUteL+y79t2FfO553bq4OytzjQTTycVtBT6O8YBQncutW2N/qfYYXFbc+CixdZ9rilX5cJlG7OaFKzct8QrkVUR3KfR3/mRWpvlKkr5VsTg46xd1/GuX1bC40wWRjlrzxFdGosp1WndOQ87X49SgyzEBf5qp+Se7lDLp7dsdshBGUvB60cHvtRlx53fm/7FO6uOyeX3DjpzfdKTSVkQATjLvi1rqAaqEt+VLM8rT5OA+b3BW1OU3FtbKS4hP+DHW3rMziU6Hw3Pbkv3IPk0gykKvWJP8bk1Qeo1V8Zds61jE2a+XJJDQwu+VuksksnLaEzgHZeawKlPKbTeiwJJeYTGqCQwzLI6MVUHAwuxIG1ZGXI5KXjvvYpd6xsVkBm4ZpOfUCXFm9vViotnoFim0leeyGTyyIUrAsNLXR4CVfVOj36b2FaF7xMKrMGvnSyepSfpK8Zm/jVHJiH1B/EGpY/iQCVmJ4Cg0uH1GUguwj5V8ba2ocZmerqcykVI35HxUCCNY2pyjxbSzXvQlJT4VJ7tGNcDqOZ3VtCtn1bfQjVW9HJ9vXLy4pKhdvk82qBPDv5cU1fb98R/9wiSJ6WfRV+4A+yOt2aoTv87FvwCv8tX7IkaV0+aLMXcbv/0hHbR5bM+5JOfZKqy+1rI7sqlACxNQsMkAVmfPa0ZHjNWPfkh9BnrPoPoaPwRS3/z0+9/FlrWpbU3QR5iUnXKmF31psHlXSfGeYvK56h2qnRQF7rljCZ3j9HablULu66enQ6B94/THdladxFMhnHX7jFSjqvNOCBSt6WFaImsdiGItkrHChH5ejQf3W5TBccWSdnknuXEVvxZyCrnwdoxrJ8la4Q/GdtWiH0b01vax2hht7usiOgZy/yIhtgz00maICZf6OEQlD/NCoP4N982zgPOs5zz45z052n53tPrt2ftqEv3qBx3KzOh2un0UDfP/mYP0aZTPMjIfT9EUlCteTSnMpV0EYzVvSEpl4C6rIFPtUgo+Cr+aifi84OgxL3ekl594F77CfkJyA4RuqLoNjFP7NbzvzbYIKHV13CHMyVWUlkAbqBXFcOtZ4ySBFzlJnlBYFvCrIpTaFUkL536+SVDAkuROx0i86mfSLQFAu68R/I9UdK5tFmMLd2G45L1rOpl3qFcEZyd7T2I+oDBOmfG/ClEh9NLrIsgTSxehQ2Uusg5S5igwU1bmQf4FCibiXnRprZm8mpbql4Hf2nJEDbj0WgUfvppSlofgNPlRR4ikaelYKzLsjy77UOqAvsa4MjCeIlC+gCvhcnmAoXwTL7kUv0UWn6GWxpv2geAELR0WMJtc9VLiYs+2tqAijlaXRRUY0VMk0Hw3TYpKhhw3oPUN3Zhdfjdiw3KsHygMSOqodvUJRVkzLisGztMbcNXaUUm9qRO6KnlAcT1oYJ22Rs5bZYcMs26cGV6+FxuI5JpPt0ZNmy9xvfcFP5dGwx4Wb4MAEAiZ7X+ogOuIkXdS/tig/eK/ei0DQGjAML3cekAyBoJoLYCSbfTJffb4LF2IqH8fyyj+9Y85ZT92ke3rALnerqXbilzRFPagL9jIXhbHoaPryS/sa6LUck94GJNNsWAJElvNWQMi/XwCCNsNfgqG4PJPZzJtGJcRH72MsO8e6nfmRC5lM9DzhzqKB5TES8+KBq5CzxJXSdBe2pPx5l17i2mZWZUlJcoHOLExOKvFDY5lgp5G4PbfpZvCANLu5Kd/IYjBcnlCWg9ilkvNEwt18U9K7+eX1FESH7zymF8PjCy9BN8ZqnWo8BqLORwlEFuAnecLvlmYqqQg8s6vU2NX4KzgAlsyAMVaMOsiyNbKs4Etc6ZihXtIRvJslBVrtWX9zTofUYtc55JrtSFap8Q1I1XIaGhTvSBNYXvvKBtWR/LKdjwocM82uidzWpqS8PJZ2gVD83gEgbGNtTQUWrq3hGwHw7F9ba7oO6LfO7a36UWaBYu3DKKvqjkuWxVfiIioAqjgac2IdtKh0Jq/kuWI3LIDH/fkN9cDIICa53lKDmzv/5KpyCn7Up3oUstzw7S2O9AXEyQCEMyCWG5FDWhT2zG7yZS6dBw/TUHbNd7vghv7Tdd2sfCp2bTmkCe0V5cBjTowVWZJE8FhY4WTl0U6Ehayr+UcShf5w0cjsqSV/zOB7fx4YZdvqu04dNKlA1ubZQID1fz+Kg5RgvB+M90LQglBSHt2+A+FX+NlKdtePK0EZgsnUThpGBdMAAbolVuizQpy4u7mqWs0xvaMycu6yfXXdMQrtMN0wZKaLc8XCOHRznL2XgExwWfiqru0IEapUTFkkdYoKl5hMwURjd0MAW4+/m0ULzCb2Q7y2ZXxSbiTHOha2VzXL9JTS2niPgNVSw+Qm7HfPt5FFHJXin134lfQ0tQKObadOhl+8olM+IJ79TFoFKu+W/Y6dCnoRwbBViHJIdiuJhIRg15osh5BvhRBUgIF+VZT5pp8SGEYDiX3urT/VPeB3PYh0ku3JT394sfvmg/R9vfbihmH5aFEv5REpNajP6oKkuVubWCb+WxEkuQLOe9qilVtelqMuFxCZ9qI/KRWG/murMLlvmS6j3shXPoil4Oa/tvJqrNZKKvA19djs/QaGqp59bBW1V/tBxSiFSJWiI/XHvODlDn0dxaL8oJlYBXZQr2yKXfxmOu9W5UszyImmXLIh1ZRB963x2hrjngOb0ft/FJvlnRPS44uqGUO08wd1/y/82cVV+qq53CVbNzcBfNcTgapnzQiCMZGhLMbOc5XaHaWPsPUITJwksJro15F1HOnMLqUUTQuPh5WL37BMIFnpSRMt4lgXVaVdqw7UN1QZlSRFeBdG8xAzhXD/xdhOlrPkAw4eUTa7dILyNWYWS2hF2ZqFtSjyJu2aj/InaK6x4Vsnrcr8UZd4xWBIYEYwpq3ErvLTEiP4VbwiqKGRLjgoa6lhIDSdV53LzhurZp3Bd8U75arhymGoKGnLkd0yXGx2iHTpAJWQy2uUWiAL9+BSPykURHlCUVrZ1YynWYWTml4Mhm7urCogCXgZTwicjKEcWMqQI4VxerLQESyRka1eTG202X1tTSj58CPhviacP+tAVFArTbLCjiBNbZem2NKK/q4jaBv8Wx5iZYQHHa6M7lrHTBuwHeiyDCty4mPYgOtbzXLxl21qeaMgCzvsSn9YHmu5V4s7dUlEfPloRkmJIb8le+mIGn87X/pJg7XBBGUBJA0cZRI9Nmb5eaPZbg8LzgksQ+JirTAv7cJP6iiRstfYGUVt19CRcxV8Yzahav8PUEsDBBQAAAAIAKVzoUzCst0/7w0AAMkwAAAdAAAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvX3RleHQucHntWltv48YVftevGKwRyC60crJbFIEaB5Ulrk1Ullxd1nERwKKokTRdihR4sVZ56G/vd84MyaEum+xmt2mR6MGmyJkz536+c6gzMV6pRPjRXAr833hxKqKFaIeJmgWyIWZZSve9UKhwLjcSf8IUy9ebKMRVs3amCdBG5WeBF4uFCqRIQrXZyLSBnXOR0gr7tojCYNcgwtejrgiUL8NEzonYXTTPApmIXZSJbaxSKbJEhUtNoiC6XSl/RdvleibnczkX813orZXvBcFOzHY5/yCYpCoIxEwGEVGJQEgKL0tXUUxy0rc1H6k5XXs74SWJWob0SGHNNsz5AzGzn8QPJHjbRvE7cI0nnWizi9VylYpz/6IhxlGyUpH4exZ7abRW4juvOfPmSxn/bbn2VNAEhe8b4tXX3/yFdw/lXCVprKBtFYXMSZbAIKFIoiz2Jd+ZqdCLd2IRxesEKlDpSkAI+h/BSJBCLaAAItAASS+WYiPjtUpTqGcTR8+K9JSuvJRlWERBEG1JtX4UzhVtS3jTWqYtZgqfP+1xlpDODEvsMussSUUsU0+FWrOz6JkeGWVoKkKEEZwDKmYrBiBIdOxz2UlspnCqH3hqLePmKVZwpKWSnBVIOs/A3hFucjZyrj6FG2EEnUd+tkYAsL5zgth4SX6FBTFcKZWx8oKkVD7bjHdbgmj/Gd+6IzEavBk/tIeOwPX9cPDW7Tpdcf2Ih47oDO4fh+7N7VjcDnpdZzgS7X4Xd/vjoXs9GQ9w40V7hJ0v6AEotvuPwvnhfuiMRmIwFO7dfc8FOdAftvtj1xk1hNvv9CZdt3/TECAh+oOx6Ll37hjLxoMGH2u2gV65UQzeiDtn2LnF1/a123PHj8zMG3fcp9Pe4Li2uG8Px25n0msPxf1keD8YOYJE67qjTq/t3jldMqzbx6nCeev0x2J02+71jspK/FckvXbAaPu65+izIGnXHTqdMYlkrph4BxoEj72GGN07HZcunB8ciNQePjYM1ZHzjwkW4aHotu/aN5Dv/MOKAWlYpzMZOnfEN7QxmlyPxu54MnbEzWDQZYWPnOFbt+OM/ip6gxHrbDJyGjhj3OajQQIKw2NcX09GLqmOmR47w+HkfuwO+heQ/wHKAZ9tbO6ylgd9Fhh6GgwfiSxpgo3QEA+3Du4PSa2srzapZAS9dcYgbS3EmVDk2JJU9J2bnnvj9DsOPR0QnQd35FzAau6IFrj64Ic2Tp2w2FgDsuBMf7E8uME2Fe4b0e6+dYl1vVzAD0au8RlWXOfWqJ3ioPbixYtasym2Xhy2Wrqy6PT8hFgJKOP7WRwj7pDmVYgACz3cpWxcBGOzRqH4gOTsoVIhY8tnL8gQjDreOW0hYaAweDMVqHTH8d6+d0WSqeLeTGKNZFJr7x3lgJnnvwNf84TTP44yK5eZF+MkKZOmAMeSSVEd8VdeuJSglG6lDJlULAPpJbyyG1E64jTPfGUhyp6ue5SFt6haSlesTYSajPuxUQWL0GRVQXJ6SDf8pFZbxNEa0nDpa1p6S5qJei/M4vvH10jE8n36lO42VOE5h/KXWq2WxrsWs6ppNoMoepdtnmQcR/F5PYHyo6VHwvreRtYveOlte/Q0mgyHg5v22HFGnfa9I67EOM5kTb735SYVPabiEJHWB7a8QbYkJp46gztKGN0nhMJg+IRE0+1RyrsSEPEnKsbp+XkfCKQhSp6eovgJ+T/wfFlv1MQv+lR3U1L200/ZjKQeFodfXECGuQS6iJ5mu1Qm59HsXw0hQ+gUNr2qZ+ni5bd13CGFJFdakBClB+fT84Q9mmpZ/ULrC8a+894BP2Wx1DXcE3o1ozNBx5gbNd7Q8uKlwLEtQCH6L32OhXVBZH8bXNIN4WGohj65KHtloYitBlFCKq5tHjtQzkEUH5C6jbbyWcYaphRUptPjQk6nBhr4wJkzgnwolh4xmMZQKEXey0UsZUEIpBMCSqjb0ykIkoIvplMd+i0K02Wh7hYHZf6NlMAxFxHtMGHooOOmIlMaFYfty9aVCy8L0kSTosXGoNXj2bbmcLoWSAdzCJxzoDT6tA+FBpATioOZZw+xXGBgcLGRPnAe9JNLBI7a4U4gyam5mOqw1ScWhPKTv1ul6SZpXV4CvSTNzQ7IMWxG8fLy1WWgZjHSwKUJ+1W6Ds74+uUM3vAS6CeBU3w/fSqIUoqbWQw1SVQ4lsceCmsJb64RFVK01gDERLAslSwdy2zXwJ0QFgyfipUMNiTxRkbwkSIJcuLTSXahYnhquo3ovFatErGtI0HdEg/kwqR4eEw1jU2nZAylGx2tyIMMYFTY0OBuq8iAqQ4LQ5OPmU4/wInJD1+cFXMOxUOx+Vr63qkTtxRq3EUB3N6zV7w2RSnJ2DBriVDhvFMQzNdtPyQLVVa98FW5kKE1yFJYl75uh0hT9KNUlr0K12x/Jf13CZfuo0IgA9Pi0j91sYQUuvCRhwp3wRV25aEv8Liks2PpPHJAc2HFkBGjwT7PWThCOV8CyFNIHwT5JlYRh7o+PG9h97izDEROjSBL9+io5LRz20XnMzmVVlGRLLlhOartKAvmZXY+rJjFFt1U6nA1vJpGyMu/r3mksAKM8qHM5ICW6cd041WmSuZSzk1ytNxenOv5gLlxSJDoxFL3fXQd0iwjz7VcsS+0Io6q8pAeAbkNCJIceShyedsdRKQQ5+081MyQQrxqvr44oMnY1yTMHYk7lzSWoBYeyJGKa6l+7fw42UOzv96kRySOjLoK3LDnenNd1ABDCWcbzl6+ar7i7GDpwUpkVrJ7QxW03PVaRCFj5YZudw1xVaVk+2+RrEz5LICCrqCFIsri6ZVriHBZGFVoAQ4AA9Z6t2TBRh548pYLJ/UI2leteGuVK1u2lprGPCnQVMIScv6HcxiHMoALjluxBDtxLNMsDvU29jUbzXDKyw1UsEE23bXEkHfSLI5vVEBfsXaDQo3ymxXLLX7oWVVBrbw6Dj2qIx7Oeu+3pmM0A4zWp8YohmloYbcxxZoLRC7MAZSs2secb4VOwYqmY8CEJ+ZqsZDU4AlqSIQeOTKJIvzZEfYBBGjNduWJuYU4FHW0xEZ7NuIyPoeOEyEs0XGCb8NZRSIDTxdZ6POAbh5Jhmq6KBWFqmSqcEsNy8zQ6Ch4cyk11/MqxT3rPjmLE65np6iBr9ZeuTBJ8snsvip7ElyQJs5VuMnSp1zOeoDONnz5Tf2ikcPai0JJxlGfdGc7h74Q6uV5OrGZcDgR5XvlTc9c54xg7ExRhQN5C8T/qZQh46N9D32pOyurhTX9En2MwbFCs3gmHmQdtWipnuWeH5jnbAqcmljlCy2MFO/CaAv/1BAqz/AAx1Atb41itQQPge6TE6hZX+QMm9sw3KnWtmQby490x62qVfNjDjryElAF5blXV8cb3ZNE9WOLViJPrc2b3top4xSjBss0xZQh/5yJcbzL3xMQVIANOCo0WATIStLKhtK4Te3i53kQ5G11WVLNEGISKlro8HJrFmEpft+OMNjhmKHa61dp5H4mkhUjJHrPAV49f6WFUza48iiHhC8RZd8enIyHRwjvMaFtzkFUbhN7LlE7QmcUkD9nKb/AODxIK7fMGZaWi9nFiVnQh8hUvjd1+FgEC5WeILS3/8DsJwjEVNnyDEB9RYvsM+c3OUmOuZ8ZXUbZckXhzu0BkAYVuiJhCm9Jbzmisss406ihSAxbTi/RJlVr9VPesPvReo09NFPJI6SsixyYJRr5QIDok65EPuw45dyHbn1AyiZHx1bp/SKaWvob9UxCZJtTBitqTd3YhJNSVfwcr9SPJu4Tmxj/WDvOhB5mvGr+mWtzWE+1EWf1+j7ZX8LUQX5kJxIFLDqvgzn9xmlWQmqGLAVRO2UebHfD5xJ2iq90W5tz9mPdYmeDtmgt0RfVxVflbVOT90VicuWksUyFxTySS/4XH0fapfUjxpE2MvtjHGmjvso4sqqmzzuO3IOa9jiSE/b/0TiSXwIl2YYnPF9wIvlx08e9svmrx336gI+aOn5uFswJzVNMVOdU3cj0zAnymrDxisUtgZMDNqbloJJf4O3WSIyAjcVr9YpjJ3afW3BFLVxrevQdGefGZp5Lp6cGNQcj1X11fqZx6v7k6UsNUw/Y/7WD1APG7THq5r8wRv2ZWdbRUVZBwA0rcyy229xKsac624pLWoUKrvkxTldJ2r+PIRiXs0+dge038bz2t5qB7eGX/+EZmA2PPmGIeyIGmh8/KTpO6YtMiY4NIvZnRL/zWc3xQVqlbabWKpSwHjsdgBSrn7yzUkwq5rQo5b0ydda6rWb3NEAe7q/tHCLFWa+QCNKjVlTRCf8cU08RmkcMmg8YjjRDB0J9xCyAY/yPUUDl8wtGAZlpxL/UBODnj/nNenrymB/rycf39Nyrf6ClP2uh1O0MusD60EvVM6AM7vNnXHSUBOxjxb0bhxa/9OTVJKYuM8wt7muElmdwECvoDcLyLbieZvIPwyG0x8IWCw1L/z6KeAqs07Qovs47kE8hR3oiam7KAwWaHAvpJYqb24JK+nm0YYjlQJrqfxaGujqDh0n+ez6G0T7bNvPTPAWsZZJ4S2nzlciQ3+nqSNM/so719rn0guI1PFRDvyxkdEQJmNEoga+CkmdJoeu/896jjNJq2WYsMoF9gz7fXF5+Xblpgv+fMo66CHDSAPs6dW3ycL+OiLudk0tyXqdhe0a/06QuOh9TtxAc5PmFy57Li4saAvb+8bWOr+KJfmFFBq6V0bf3lL2p9h9QSwMEFAAAAAgApXOhTDy5anIUbgAAV74BAB0AAABhbnNpYmxlL21vZHVsZV91dGlscy9iYXNpYy5wee19eX/bRpLo//4UGOl5SNoUfSWZWe0oGcWWbb2xLK8l51hFC4MkSGJMAhwAtKzMZj/7q6tPNEjKdjIz+3v8JRYJ9FldXV1VXcdu9LhYXpfZdFZH3VGvH51ko1mSzqMn6fMkyaM/Lfj3YJzO4Pefp4skmw9GxeLrfvTw/oOHe/DPo1u7fivnRTXLiugvqzKpi0UW/al+J1//nORVNpyn1AY28RXUPssWy3k2ydJx9O3Zk+hFNkrzKo26VZpGc/5R3at0oXhYjQf1hzoqymhW18tq/969YgmlilU5SgdFOb2na0F7ew/3Hs+TVZX2bt06O/7Po/j14ctnR2fRQfT3WxF8Oj929qMH0Z/+FP3xfp+f/Kd68gf15Eg9+Uo9eaWefKmenKsnX6gnz9STR+rJiXryUD35i3ryQD35Fp/0b/1y69bT4xdH8eH5+evjb9+c20M+hCKdvEjqbJF2pFqCz5IlAGKsHo3wEUB6WaZVlerHj7n2qLhST8b8ZLxaLNWjJ/honJXVdT5Sz1J8ln6o07yuOhZkOmk+Kq+Xtelihk+H82L0rsp+1kPM8Gm2WKzqBHBAPT2mp/k4/WDq/xWf/RUWNE/mc/P4JRedZ7muXeGjn9OyUA/O8AGOelYWebHSA615jjVgcMdaMR5lWRS1errCpysYzzytdTc/uKAsEw27/3TfAMjq6w4t36vDs7P4u8PX1sKNZunoXbwoxgRK65daiHS4muKrmL+px9lkwk/xizyEeqt5GufJghrjL/IuL+J5MeUZ0zcFrRRgt/oQV8t0lCXzeELgiwOPVYVZOp/HsDQjWTQqjg91CQBfWvNz+hovk3qm315X0H08SUbZPAPAUDHvmVqgxRKAhyXkmzx/n5bDolKVzS/zusqKnLCfiUusHplV+Pb09AUuQ1fDQ8G6L9AF6rAbHXIDEYO2ikZAA4dpdFVmNSB9lOVRkl9H8ySfrpJpOoAa57M0mqzyUQ39VVHyHrALwRTN0jJV1YH2jKO6iMZFtMD6gCyLIo/qpHpX9aENeCXU7Toap+/TebFcwBaLikn06rqeQVEZz+DWLShXlHUEOJvMU/WrqNS3Uj+rZvP0g/6xGi7LYgQIqp9c66/19TI1P4Ci6DKwA0a1aXBVZ3P9q070mzpdLCeZGU5dJqN0mIzeqQfTcqm+Lq/G+us8qSdFuVC/07LMC/VjnNQpjWVSFguA2ByHQjBWBdK/rdrfngAhzPIpHGiMtvr3GVbLR6l+Yx6cpbX1tOa2szot66KY65bhPMzyPgB6mQIEbtXl9T7hoYEroBc9eX54Fp/9ePbi9Bkg3nkJo00/jNJlHR1T0aOyLMr9ZsmnybxKrYZpGNAsQHmsehHKSO9nSRXL7039uEWlo+eH3x3FZ0cvjl+++UE/bUyLCYSM1qmwtsdlAigHKM54/DD6ffQoukquEeWnaR29LPL0HNDvlvoC7SE6dvE37ciXRZ3uR9/PYPdBhRrWUC9h5C9+H1YLdlgNZLWKrrJ6BlhaQo1qEEXHE9xos6zCArCmUHiRvEthZ8A+hdEQKY4AHVUd2LjwK+UX2C29k65xjN4KWeMYJMORAtxf0uvquyy9ooJnR//x5ujl46PzH18dITEyuAdN/IwcC2CgqtFrBeoWzfSamPnXqsjp9y5wdzUMFcCRRktamD18CbzWsEzK6+hqBlxfBKDKcjzXgMuAHSFVXxSFgAkpVkr1sgk0ldSdilocJRUX1gOgQUwiOHuh0SwH0gGD7WLVwbxIxrAURIIGT4WOIiL0kLsL1UAeJVzDdIafMsmAh7RgR28FpIc1LPJwVacWVMOV2tbAnZxsEgOSpDLwbmuC9gfgWt3t/JT/fWdRTXf2ox0uEMlRBnTmb6sMOAsCbVWPYY2o5chdAj4g+lGeAtqnJeyxClYJmJjf7fSjnQmcSukYWq9hr/4CR53qHsjKIP2Q1d0HPXuoZ9d5nXzYOFS7GLwuhkA54QRb0YaiVUPezR7mMFU7KYnw1IVDEs45xsFITu2bDRiIlRli9Vd19MO2IGyJ1YM4HlRwwsIEBlZzgJVWnT9FF50HyBB81bl0kWk3+k7KPBh8CWsyTxM80x/ev/+HvfsP9h78Ec52WCPE11nyPqXFgh1ZjHG6y6QEzqzGVaGNdZXCMqVjp4NWPDi8GR58fQAj/Aro3cvtEOHW4XeHxy8OvwU5Aw6h5/Hhi2enr4/Pn58gszTORnW31yAkcIrMoPtbQhFk9R4O/jD4t7uAt2P6ev8uk0ZcarXZkH/qdjSPFCfzaQGc1WxRIdCtX9ZWNk9hQHACYGNdGUHfNN2P+MSwFtbUdNdyWKbJu1uNQkjvsA1T2Jral3fDA+p2FuMvcfDVLHkgfx8+/EJ9+/Ir+fboj+rZlw8eCv4RaFRjxFoGBty6PBe69GUQMOpt+ymi2brk1tqeUHjh+e1j4QH8/0srCQRwbDF2ghqOG/4OVJW1ZJLYCDpnlQJBpB/kSavB8poPqodfqIEARGJuEXZtH4422H8g2KTvgWtqb6fKPqgGutT5qx8f9uXLI/4ylD9ZDkdljMcQP4D9m07Tkp5U8gj6hP8X8pMZC7tADbK0NNFbPyp48D7VPOgiWSIHOl7hmU+Mfvy3FTBKa9qIsS9Vvy5AWKyz91Abvg6v67Sib1hmHZQToIH5dDAqciCZdTwEvlg1iQLW0eHLs/jp4Yuzo775ff76DfzEommS32Jx7PvT10/ik8Pzx88BB0pSBi2BMHXLzn91v9kf3L3Yi3+qLnvf4LLDA/75DXy7Ksrxfy9nJVDf/76Cr1fj3jc9XWJwt/fN/+kgZAbHQNril29Ovj16jVwSInG9AiLZnWdV3XXWqhfdjS4mwIjUl8RxPklBkh+B+DGOGKmAmp7mcLC9Q/SEfYoMDgCoIPrKABIRD/hOoO1A7KsoeoOgguba8awCqp/CEQgLeeuWO1Z76CjzJUvcLMnS4uoUTx09pJ+rPIPjRjPiL2EQ1h7ShR/ZhREoCgODLQ++YrpHGLKp6YeDL0xhaNraIe3DHgIseV9sN3JTHnqwNxQst73H4a2z5WEhjuBkAol6bJaXVgpfHQM+kFA0nQMTM49mxVxObtTo4JrTGY5yxDmKEPAf7IFqNa/5LbG8u9ECHmSAZOrQPhEtQoJiBJw6UBpxCfHmCvnMNAFuwCkbXRWr+RixpkahGncqYHMyFiGwhlGpFiJawWqGFYjxABRkAWcMfMKoBoSFoz+roTF4luREoGiSxC2ggiFB2ggAQZXYrRg26zES68PXzxAF8SwUDeTj05OT05f44s3J0ctzzRowVStHB8woME1DbRY/wHU56KCmTF4VV3laOoWnZbFaOk+qFOZReo9KIB7eoznqSbxn1KH9aAIyWXFljwbpEFCIcTpJYLEOSNiVwrsExRqEQuBTr4nPQqWGZq2BzYjsPY/aofmcy1QFiZ6rCgjENEexEQouBtQuYAoqTHkUrH06QJlZukU9iQcD4AxG7kTKdAHkPbZgjeMlojPEda7ShRLRSqBsH5Ybi41BoF/gDtlcktCpgK1sltYtPiqW10JXqmSSxqguS6sG2LmSYOwwtZRlgOWoF9O0FPceXReMYjz3qG3N6knDyTxDWnBw0cE3ncsenqF8ujxBVI1fHzUOl4u9y7/f7z/8Bfc1fP+mS+cJHiKs/nuDE0K+TE46EDcWw2KejXiVl2m5qG6dnD45ik9fHb0+PD99Hejl4u7B3iW09+bs6PVZ6P1/raYFFoAmTsIFyqsPP1S1FLtFBeNvj2nj3S/+AB/NoSoM5cFlFckow6yubh39cPQ4dmvef/DgAVZibW6zypOjp4dvXpxTLa7w1VdfcT+yYQwc7Ho25MYo6CyyHKFHeoFUdFdIhcpVTi8SEICuQEAqVtOZJ/3RiTlWWFLguVsSxR/B7gNpZwVdFKMVki/oETtarBaqcnUrBj4tPjl+iacDiIpK+MvySXGx//AS5aPuo370ZQ9LPpSS3Yf96Kte9KeWOn/CKlRByutOYL6qmVui4JBSov0iqU5zsSTeOSIYyATmrZL8GiJfouepNcIP1ZTxkMZx8ONH+vGjwZeD6PGqJAFbHu5Ht6udXzrR7ajTGfy1yPKuNV8Wj/Fmper2WELhf22R+9YtQATirZXytiuyWqfTAfFWK4HU62+Qr0/wwNmPXqAGkQ4j/XqA9YRs1asyNy8Yabp2l+OsYhIAo7W7larYrV2EjnfdPixPo+3o4CDq0Kg6RtRwpBqa/2qJHG46pgEgZ6Mbiv13d1G+LUczlmeXdE2FFw3vV0luqR2YrFpDtdrkixhnrl4vB97v3sX9y8EoWWY10MSf067bj+Cl0x3usAL5+Xo2yCrc093OvbQe3WPI7ImCo+Op1D5x2Bcdbh5o9doRy6g7h4vk5yLvIINlt9scU2BcqnKjrKswWtcCcVmMHbo886i+bujp8Q8nR/vRSQpbcHyClBFvGo7xGAVC9c1Wy47Pu2G4uEP2GiA2DZ/LLrBft+wdpe/aZg8pcvLp2yg0gg0I1Lt4cLkJl3VTnwmntxzaOty2Bv158SU0NsEb6bMdVay661BGFbNQB5hcoHPD0Ry5wqo7mlcGb5jrEzZQscXz7F0aPU/K8RUKPnA2vUxrYLTeRZMEDnFphyUb6Bf4+Aj5aNMFnnJJNM3eAyNOj5iPjq1RxLEaPHEIzKJiE6r5AQtqMNNoihevVyT/FHxBgUUAQdN04MxjN3qtBmQalPb4JDRDPIBWqoE7JNmudRG/zyoQB1AEhcUxRXrSzTneZnEZd+b0/mqWEUvMrZjVpEuXEVLD5jvdbjSSI58nmaFO+or7Ssd9lFNQlIAZQt8kU+IQnWZU4wMu2K1GPa+bQ7IxIT7OXTeAr8wK9uK7NF3CMpKGg27rcFU1x09VnHZpejy/atSAbHPTmvcDNnrpVv5Qnen4hQR/LOAzyuONlOZu9CgQ7fvRnaSc4p87767w2zb7ABjjEgFD1wLmysOVwSvSayC7S/zzCFlbwyJFZym3o+RrFJJIe5wr5sogMqMg4L6eA6uHLYbtlk8bpIRLex2M1/oAxgC8usWdtChA4CWbkQnISEAT05KL4+hIF6Hv+LEikG+n24xvTFytu0HztcRHffAGZzRoa5ZlCe/9wUGTG4IyBl4HLgDbcQ+ljJGamn7YuEj4qCm1Dic4p0afHzlUqzgM7Ja7U2B5eR+oUj3YpyDJwQbVT2QX4WVUjHJ6LLrGWCmau0CG1NXYQWdVT/b+CDxyihq/6qBTAfkqpkmdxkUZs9jXsdiU1yC4wvn0Pp2j1oG00KSKAmpzXRFg3ifzFZMi7A1VhLf05M7Yugk4K5ZV6cZMZjdMR4nWoNGhMoPm5qgYRxKJ+nHUH4viHj/YG/WNCp4EWP2SL6WjLh0y6mFFd+O0XaybWulWjgS9cWFtrPtugJTWz1o4oni1EEDVFw1R/qMYyUb7OIFm06RgWSTLbvsy9s3tRnfcU/YwXdW/9YQH0GsdAoK3OQQ6OjcNYfwp/dJ6BsBK1wSfs2ebJVMAbmwTahc7kL7+/za5yTax7hqsFd2Nvk/KnHYDWuIZs7TmTsJ99ttsJH+h/wEbqTmE32ojfVrPazYS/4wZrbvF8K8ynp2dHeTqsSjfeMqlEVubyy4gRnic1Am8W42gMFlaYvWngP6I7mRMRgwxaYXTHDhKbEwaWJYpWVAhG0QM3QD6DWAqjKsPeK9vfR28tSAItaCoe5hfZ+l8bG5uua219MCDVXD5qBXbnqznsi0pc6nIuzRGRGzNamgVcVdBXgR4d56Lqds+NLHbXDuqAW+e3keNDiTnTxpgF682+tqQseeQH5JB6VZoVCxSZVEIgqimo0MyLUU5OZnjJVJeV1svm3VHbPUaxJMcZAzCe0AVEj1gh5Wd4L4iCzicCl3Cdjtv8nc5is/GigmxFVXJffgflcl0x0N7ro/L0VM8YMwSpAA8BlI/zkja6dITHBVehsVicEm3cnQbzhUTzRirzfQ8nQMLqs2taYn3YUyz/bdOV29l9+6DkMZbdJ9kY/oaNvakwwetsuhIZCoBAuPYtOMOdh+vG1GkVQ2waRettVU9KlZUCm+3eGymwcZsUUMO1bklvIJGA0zk+PmeNhliY3la1ZbtmH9+svUZTYGuv2s5w1UfaAlbk+mButquC6OzQ3TV0K0KbnKBOAt7oMyMQXwyHjOoQNgHzGTAZgZ1E++0p4tzElphMggQuvrGRq/5QuDhHh0XMPmUL0+jSVZamgm9jfl64e1b6vLtWwstkdGg66YUMU2PiS+bmgAjPCiWfNErzZPqybRNGqMCdRcKlixn7wtXso9zt4pjX9UomSdlXzEu1ms6Q+qrItJmSZad14NBtE8C1P7b/1Gm7wP15S1uq3RUKywjewLgu6aygNgt20Tgtk4rpVYY6OYfDgJNAEl0kZqeQhNz4JPGPF66qhmliI2if1AfhR55YZ2HeIgiHOZp8k5BCz/HDTi5+GHgJVeDi2V9bcrIfn77trFnpDmFjgktQTZO99LJBDV4xcTFamlpMIiumDXc35cNsVpGSj+IZquEH5aF+Aw1mQADhBqDkKZjjmxrS7F9SIkqxAHzEIpw96NZNkUTAjJfsHevYxHKG9i0JwfHvLgKVB1QD6II6rPeVLf09u3fO1QBrffk60N0laFvj+DbBdoiXP4S/QKgpD1H/ZKacWamh9wbgA+tw5GcvH3Ljb5928di6uIAAG69fYhvcVdOMvTiurboTEpsqV340du3A4fYuweenBhb8E670cuUbHsUt0cIpl/zc8R4PiyiA38hQ/2GxHD80Os4q9i6Tnwh7ALQ2qsfHzYZjMAwtDwvfW7H2/mcgVQOC0ShIbO/R2PMj7YdM0lOWw3ZBnGjrQYx8qz5mYfvfHf44s1RfPbq6PHx0+OjJ/Hxy/jlafzi9Fn86vD14cnR+dFrc1uH6FUssjpebGw/MDf/0UAoY1ea7EedO53oTvRHd2YufBH9rdXx29y4UBaMm3W30Q/g7oxl5D7aoJb0hsNduxFsJP7Mo/UvbdsW6VaQXZYRKD8ZV9JqlvM8w7yposJTTxc5X/rR664brl3n4lITqSQSV1iPBjWOOXV7oTln1V7P9KpBot5tgEW9NRh8Ee5TIQCsc7f3zwCEpnS5DhDN0p8DGH//5Z8BEqysId/GriVXBuRb+6xlUnptHwcidd5gz4dOtWzitf0PPh5QIWNKOkNrnpM3Gsy6RWkIAhboHeZlkFUFy1ZBk5GGTP8dVQbeeCXCfVikZ5TqRwq17NsgQSlRvlnCd1i4d9RxdAGtZI6mFELqOJ4XSQ6WJKe5XmqN+UDiP+VWOwEemSVWxUY29g4aLyNh78p8vM6BPhssrjbgK6sQmlNgYwx7l38GXQgPl60DmroDvdggqcUohjFVIIHsIEBBlgVs+End7bWQPlU5RCT15oGu3qXXffqCIjQp5KT/sFZOwYVKrwOLanIryIR6wDFcyAgvhQ5ig2vIslIt2tP4yOF/9NBbV2EtT+JPXB0QaqzNfnySE+gppLl1Oxmv76HN0q5Vx+gqBej0WKHXAmoj9C4nx4wyew970VHgF6t6udJyhpAp6/QjUjVLVyVIntmIFybJYb1+TruyYXi5uNEDcii079QcskW6jzk6Q5OZB8rMaEwuY4FmokVaVckURmfsi8Qed36NdtTohK58XNDnYR/b+POkKO6hHS3IxKzSwgg3+/fuYQk0aOVSpCaDEveguDRBumbyCZmhyydKddGyYL0M3/mRywr6Ji2TklVdMEyZSVIqB28UCmg240qZewzTaZbvY8iHkrWZUkCtF92JsBMPVwDU28d/tipcpct9VGeV0E99lcIy41xpxFxTyi1hxrEMhdWErKOIYFlKskRiHRhG06CRJtA0GuPivpY2Ij0c03nMpWIatNOwaRDnYjcnisYlHzFMY82xgQ8EDRkpieun3zR+tAxLcymGT83U0G8K/9Jj6AB+P7DoPsLK0nQ8zUTnuSzQxSRL5j7Ujc7Ft8fEggc09kFJwXe6nT8Df3a/zwOw2Ff2yyKewXPL5FG8LNQSC9zQYJ81b6wRdMozRAZAeNKy7t4nLie5uL9PvV66ZIRdda0Jn7krQHUIo0JTZvBpa0d5ZC03vIV/9UsGMeK/A2bu+c3rF3tVfc2ufrQT76o975RswNledQfasKkZ3u6gXAhsAL5egFWJ+I/DkzVgimZddFQzp0CgGXvZrB0amsh9DzrWusBsEoz1VW4GCy8QMwsCk46gX3Q3eoTiw0eCQw/i3wEERTZKq2ZBOP9kOjCf8EGlJsa+nYAgxVxTBdJ/A7HooLVZFMQF0wwWBGb2tbGyhqNMiGFjh6jPTXaK+hjn9tA8GpEXnA1EhETmAQdUOrcU+OrT2ECGWtkfpPVZ7mpVA5vqlepNRyn4d2OfOtiCbCDNNsTTg0mjSueOfDobClLbfNQgjt6NHnhNBwi2S+6Vgw0/4MqkcbM4DItnV9VcSYofe3yJY73KJYS30cYVs9UiybsYeK0PLB66aLGjYx8df2vF2PB5hL7MMNxvsa428edKZnyqFD7lQtVqMsk+0ES5Ib5ux4doiLLI6Ma9IhP5rjFwseLvgVwJnPnBPFkMxwlesC32oz38497Iw2i6OGplqsjWjzA1dOWijihSjCriGH7iw8FqiWaL5KTA47u47wX3sE4Z6I0H/zs4d22rSJrt3QOCBDQQELI1RLCIw4N2bg8eTpRgTY7mtDa96B531pe66t6cls/YS+arxTAttc9qrNfQXVy1oorbfCx2X1zdqC3UJSxdHLKvdjf9sB91Hv6lEx1gTMYv/sggJQ9IAmxSTlfGDOKDmrQ30M6D+ycddKP72n/zADffSafnDHDBfo9MTdA786fqTven8Z2fBt/Avz38dXG495/J3s+Xd3vfwMEA4xdgAO5M5sm0OkBP/2cvT18fPT48O9K7bNG0a2WRw5wb3R1viD282u7U7CUN27sW52HDrpL3HCyhNQwx+LfPNngD8+JVXgzIv7n7QFkwed4gnzQoHgIOKuoWZTbNyLU7RzpixtvDAVvj6Ed65DQKWt2DSBV4qEFobzkzYCluY+Kt9TVQckL2BF/SzaS66aXb6uRKoadCCmvboOtkiWcCQlsgWOKlN0r0MApsE/2lZIM3l4J38kFkEZ0L3cDlx64I+2+y9QDvMFwBgQxAfECWDkINlD0IUqRiIgikdzRW6kcYl4cPC2ugA7TU7KK2VQlhygPXqIaxeqystTvfdryHFA+SqDaMXEmgZK2JsEfCYUJ1BWi+2/qwE3hheshq1QFznAQO0dtN2IIExMyc4ICiXjJCi43uX77tRyffaqRDoQhr9qKvbeqbflimI9S9kjyN/fETACeAHiiVkFa9tn1rjH2DM46qy8IkAp4nHLV3aXVo9+JBxb0T9EEGxJjOJrrQ91VkS9szhtQ0hOkPLvFQMi15CvCPQ9wBV1EnZ4Jrlo3VSdFVBEQhrAsWTyPs7NfojhzO6lDLKiuIaBe99IxuRYx62OMLX0Wm6DfcxwtsLLGMZu5ET1TErWREaiJiQZ6eHT5+IQ5jd6IT9E9BxXmFQbfgmJ/Pr4HXzCmuBqqcop3HCfPjym+KdA/KQR5NRwTM6FC4I+E/tImUDvpVp/M5VCVdSp/tojG4VK5bGjJxrNLa9zk7p6AxbAE4x3LEeVcoX7A5BjzCaCF7FC5qzly6dsDHiXW07gb1UXLEi15cVUZ2iA2QJKwFiC0UNgDDkorLfspjvlZKHDN0tgzSg5cF73YxCOjgLD7+4c3Z6+i/I/3z2etX9s/T8+e96PfonYmPePEv+O15jOEULrXJIHtfUbAV1z01GYvHEftWabMvVm1VtYq5ISFc5tdi6eMu2CK5RnKMxjZiGDeCjQCCGDq6Xkfja9ie2QiQARpc6M7I3oe1MII7EkxVRCY9FKVKkzh3SGxTe9zoy5XlIwmPKJFkYKFH86JCW/064w2KNbwoLcXEDIcCywLaUTPTFfQP/EEaHb46RpgPKaguTQ6QrkshMa/FXkyr2FTAhF40AxqEusYrHtVVhjgGsCC0Lpg5xoamIE+vsrpYVQBbZUUo0JtfM42frNB4eg+gU0x4jgUHSXEtCy1lZITIxVJMxUcEGhWqqbOlUDHNxHMpR9MuHEzCl0HLaySl1xgUgtXHY8+lU+L5OLFt1GnjBrwJ+qINVygoo6K/0YDL/OPxPFxNSRVZwhgrHbgH/QRZJ0yhMxDjFngCp7Za6/B9AVRX+ydUrO3mGL88snxvVU/+aFX5nn0nyoIpAnVDm5xleN4LACanmpyxGNEBKrz3zlkp4TlRq8JhQ+kJ6gAw+LpdDpj9cthpKig0OCfjAQIn4PcPb2gzeK/Cdwm6PavvkJonaM3kNMBfBnRdnXbbL6vTapQsbcsUw5chL9X3gzVZ57g/gTbLMGdSHPEpAKstAMJ1ZWZeE36wJy51q8lDMylW0TQpWmtXmhynNrAcESeooJMrihkbcUtM7CIC8W/O1AoZlJicf2DzoMO40QC1hseUsFmrXMUR4lEJH/N/z05folOtCpz1RhebZFM060SLarxtsYn4Fd4AsO3l1qFIFU1xVtSBXYszJJfpBSCvAqJQgYuOWjA4Lt/IunVsGQbDBq8FuBugFHgmOr8KC+pwEF2l2oKcQG61xcFGWVElgXiA5n7MEiFdA74iMCOk5tQPXak4Mc8+Yun84B+dLdeSOJE0fx9P4OhCetxt9fuOXsCOEEGHhgvVsrLI6fBShxApXcsphfgsbcsSZLf4OdBcqRm0azGvL6DCpYjCyPFLrKCnMtKXRf0UWXDNTqV5BkOJSdBACtWdax+xnZ2dF+qxYwCfkHGu8E/HzMYa83uWE1hC6IutMB7UJQVzAj68WKHC3DXmpf7TsbkHQ5hoyx4Yk5l246ZA6qorbTYMEcmpcUvVFti5rSFHmyoFBHisJ4tN3LGu+Sog1A9iMmd2ZofvaMl1JXfh5bWX4sMdsdu+GrhX5QJLXToTcetZsTxIaxYjpq6dFxVzp0Nidcugm6Ye1gRD89d9qClp6VypTEV9ToW0cEB0lM9oszeNr1/Isy1t84jEasLIBeqts7xSexIdzzyzK9+wAaVL3C9MXtOSvWpFDeS4aPVsx/lsci0WCR7FIQMVFVYagynv0OGLx9Qcmsr3Huz01uwkGbkJoy69BJxeVYDEIMytcVkHxCl6zFEQnEp0x8V83IxRbg4iiaFjZgRYgLdBA39Ta4C60wneKVpGT+ucq8PzDl42vuE6T4ixaLl0bNx3hcFtjGxuDN8tB+MMhNFRqoipzXHOrJEKAauBbxnbUNBBVv21nC/dI+XDJPjGAZrdSnzwd9nxSMrRPo4z1GLFXZB5J319uUDJZ/rR8BrbikmPqO+tJGwm/2qy8pxHJ+OpxarBSm5JFiCVokYA40HPV+gWLy8kwN4YMAOI4ywt+XmzfV0QXsfFRKon43GMIlLM+VywXz1gwW2ZB0fObBu9bj2bOHdz+LH14pI2hpYNKQHUGr0D3nm4yjhcbZLrtAGy14BGcGBA3Ug3mcNvPKpBbCZ3PorWqZULnOIG5HU0fEH2CosIJiM7bfFMlJ2LMkbcu8OkiX2QKmf0Vvn5ZKBUxkrIxMsyfNSNGZQx2YUjfgitpPKAl9lc9B4Um1O4rYLd6NwuHHyCrpzfbtHAKqHw1HzqVnNKu/4z9N5BYZSu7N9uUUZsNE+kL7ZS4DFp0q9mqRXtl+LuABOWfljOs1FWi+SkYhqjcmW5KpdFZS0CHG8t+yMcNaatcMC7SWJaw7Di9lougJqC61aNOF1bixAq2/LGw8QmVcCLsMZDt1KDYtAdpvespQrTDrsCP2kpnjlFM6/YCKOpr5a0aSxOzewz1kcF0ZOSiLW8sjJ4NW2nqACm/gq/0pm5HAOh3eiEla4qCxYiMay5jpVQKY2nLTtxeKtrq5VylROhJRel/H3BlT3gmTKxtBavlsjDkaOFN17xAG0Bn95RwfcEf27Uoz4cljjU3zydJvOYrmipzU6s8pXxfRIbar0joq0yx/n9CsWLKaj0Bx1jzr1uCh1MTUtnYDPea2beD/Hdbr0N7b/jxDx5gN6G9WzNchfv0Cb7vX86OOp/H/NQfhE+pLJnvBuxFEG6BRvG4hGeox0sAvpabieTaYLsPulI27llbzl5EBztJpbH3QZ/pvki1L16hI6iJFve35wjTkWCoAsivOzHENpacW60M3wGynlBuQPoCqI9j4wXaFhpZUQdQzNgBacJC2xEEtuTiIDhK7t4QmeoLDIu+GKOTXyDikxPlwtDUuNP07G7po5hk3IM85ZdIO4UdVefF5WvCkhjntBFmwrirm72LPLCbsnoKY93HDYiRWyIzjxy98Xhy2cHj3skWWJkY4qiT+/urXITzFCS8bnj5lOIX9njtd/qs6nbcmYF5mmOK9Kc2vsed6XDawQQWjE1jYOu23zUGDVuQZFcqi4QSI5j75d6/Pzo8V80KYnJkSx+cnz2ChN+HL2OVDJM9elUNWZ9tIeH5v/oNuFyyx2kdaGS+NwrinJfqCg+94pScPpAUfJ8c4vC5gqVhMdeQTIAChWlF15hypcZKIvPvaKYTSFQEh57BZFmABqFCsurQIXtS5NEHQQaxUfzCqPZYKgsPDdFf/lIVNY8ktlNvdbC+hCiGGTbFNQUZ2P/iv/rNp5sUZl5wa73e4uKmVUpm2zesmwsaNOVcZrM2ThAuAsMfLTXlJYUKZZSHtPBdvOaqIfWCwm4Yd1cIi4xIuysjGTZQAIpnIl5SldCZDCdlYFJjq7GbotPCpaVJKocMggqZgmeRU5mtoqvPYZ4NY6HwmppNaRSvuCtVELm21k5JicanbIiWYFQbcewU3fdSU2X8VB7WpCIfpVcD6y2v0+ZoINwtyozOpZ+nlR4gkEr1zqZXMYX3Lacp7nXCccoK9X9b2c+tzpwklwKL8wNjsT8o6qLpSjs2mQquo5+OPi3qCzmFK6K7hisxd9ChiMzDaMCEA6y24FpY4x8mLUfmptK63Q93c6Z5P9cI81ayX3Y4gVNalIxqx4PmuHgrU8neqPBY5QsnDKUVzk6zmVx0cyh76fugc42dJDMYfkryytDJqF1FNUAYCG2GQcdgLhyZ0O9Ga646Mxk8W1NkavFvlKBD+0cSSH4ailIqeJV082isGu6nYvvD1+/PH757FKM4BrFmwJ+QytOu8jKbiHmZnyzJMpx9D9WbVswMOjAgACm1gDM8thrQoRKboSGLfcpiPy9sagdaAwONGyy+c7ko1ax1O3Xv7QC9snRK7RjPj8+fRlZQFbGhvZMezcCt4bZNjCHbmx4k0DmyZJyyPICyAW2Y/Grvy9MVp/KOmZIadEXTVY+SpbVap6gITr3oaur48hQVZOPiW6wlzWeVaMZl2ATO5X9SrfiphkCgqAGJOLhNWkVqxlF1dIWRGoq+oeoRCQRGJzrXebb+s6jcQocqiSjdOxNqXpQ3ybKVUtd0FxT6VxpTdMPSxg8Ev2u++h9UlZsX2cdhUOlzrGsDoCx3DIUw65K46NmkGBWpHmWv+urAwctv7RdOr6BxQVoWE1g/RpQJ9UOZliMwmKRwQwSWoCc7XJGMDOA5Q4AtMy/eOkesLUuz9Pb1nryqjBageIXVTwEZyMHq0IGpVnraw8NH3X8BKSU68wrSM8aJcn03itJz3RJC5CShxtT0NIp19SAkwGr2xo/bHTMedUaZfFhoGzN2bndsvgwUJZDlPmF6WmgtNFlXfBI+zK0vnR76ZxyrLlnOMSLeRWnZBoy9pVVumEd+p9HYMFTOTLYY3CaVwXkddfFGDrBKbIo2lZ2yafXb7HnjQoPpWaEelX6Irske3TVcaepT3PLNqcAj80EzeW7txrmRWNB7NDFjc1xwKSD7rMWFMCXk/rRv33J50f/9r3FoLR+7voeuMt8wH/8ipzpT/72zfwP9Le+nRzOfDUNaScKSXyuLlWY61V7ig+RKjp5cbaXYO6SgdT6njNyECeYJnAgqauzSvIg0NDuERbQCSf1pOEBEKwaj64RCj2SpP4aTUHxDyICtsrdQ8tSmXhXlWhBbamEnfcVEVVYy0uWKqf24bXpemp1PTDnemj/IOa77BOO7fnhd0fx2dGL45dvfggeXO41QqbbBrIcB7cpYviDYFPOLU+As7E7bEzlY6dRpVJTbX40lBlmOVF+olvYvJTxrFpptqp60IRYfbrlqI82a3Ts9lRX1oVFVzcUDApSjtb4KVNbWlWMPNzBzuGwKGsSAeTgXaEeW+E6JrOUxHsw1TEJkt15NpT3e/yuh8bs7IAmydx/t9P7WBz4NdafdzQ7L6hNLenV3dCl4prOGxWwsYE9cuWsKb2HRdYZxZf/5t9Lb95bHU7e0RSkwFJGzfRYD5ZuBdh4M6PAFHII9LX9F1oalkhjJuaizp6tf64pXl7T9vvBuTuz8yHWW7flkNppDVEDI4LLrKavHofMmXhMhGmLpB7NcAZI6gwDtwWry1NuXOWcngXsa1rGllEE+ov7dHrvhZHbr7PLTk0R5V3ExfzCyq6gSX6fD5ikqlVkB1fZvcveHRxbJersd4x/XRVYQRzlg0vO9cjhFh6txzsba5rY8i+HJdZ5iErzmyFKG4oE7vpgcukAGssLxAj6Mjh6eXr08jzE0TnU22azgJCTlIl6AG03l37AaxAQ2JvyS9gtYEMHxjFQJ2RThEag60bm+1+M6cidxmg9wP7QNllkAZtvvCwPoU+SrRFLqNkNMut6eV9kVEukqrnunNztfDl3lRGnUw+qOobvRg61nk+t58rbD8pidOmxpR3C4ydeoNlgvCzw4jlIGfAnRphVieEbzIKlpWPotQfF9dsijqG5Gg1hXy/PTVQoW5MDEznIqCQILFvqJMYZRRRqahtEcaRnHNxuXOuWv2AqbjC/3io4szjqVpwASHNuBMHg0ir7fvy81hHcOZS/Fe+re04mCN450lOaJcvxl7IKRSYezsunZ3gocN71jgysE00q1MSg0y/hXZ8LoKerBNchCCj1f1eMMonLGgQH3zg2JsqvrXMPLS3uUW8oLHdKTwZg/Beb4wl5WkkWZbfBhmNbKDmo2mz2iN2NZG03ddY2tmFTTTEn997cGqzbb3ecvs8wPr3VTB/AzCE2RcvUpxBRPTI6ylNFWiMAyhcWztp46wz2wG484EqILmCVNivS2KewcRKI4SR9cTUebbgQflRDLZyKreOxBx4OtGQvlmC323yvsSHdRbW4K31DG7dzWn2V8WG9aPsxHBS3u4mRW68KUzkCuYYuFWeTWEebEuKulTb2TbQCRLCeAwVVW4bdp2hWDbvpzwuahoTHV5B0L5IMMd8GDtiYBXcDRN8WcQ2sV+U26GjgjB4Enu2f1YattT/mlCXkkuzYQanqSvnOuiSrprriMbCW5DaJXEgLwzYw8+g6pwbuBUPkZV5t54o7v8w7gFwYurM3nazRGQR0tDbAwgaOVEqN/+soayU8lirW1+lar353YC80PGmnUdYMWb9raXVD5cl7yR1FWP/U3oEzMgfVbez83YFdtSHskEnxWgUYZnTnPCYddfmPlcIjxTcXqvila0wbLnPR0ergjjet4FCSCeyPrUfCpdcORIr447CA5h6RQe8ll74gKQmPKqQv0+9GtrxrqZ5dWRe4W5CK2M3OGqRn97lR0MXPBvlSGTl4QuW+3GzbRqcB7xj3Y430wPret9f7wPre55BY8VVSHTSIpEAcAPa7oGp1w7xQ++/NSYLseByiytXkuTGEDhl1DtJ9SvspKNctzTPQl1Pt7cxXkuvuoBvn3T+VeHvjUzgofAXPYQygFpOAS99EFMY+PI2AFqZofGaqjd3MUjYy4wT2rcPpBskCN7a8GuPdxPIKZEVpdbC8cmR4q4NmPAD1MUtqyYdNLncD9o9maJHG+L5vBZeimNirJQdS4h0uEHBwUQBOMa2y8T/jecK39VhYDfY3PkjMALBvH0Ibz4m2MyKIYagtohXVGE67Ye9B8DjoHp8SbvXVwdBrORl+DVRTGSqkzbTX+2RqS3u7ndryffYNqS2bdfx/avubUNupprZU8dOoLTc2LZdIbaeomZNWB9PS0YxaHfz61BbGs4baMrbx1hAQNMjtlMnt9J+T3LLJkya3PpzVGH49cmsGMP0HkNu9B6Jct4utu4nEz+fHrU/nXBE67aSUbYZuRknJ5O5fl5DaixXjjUzocuZXpLaiCrNNoTntfCM1dxBVxeARaSvX+2MQR7XjY7OFYLNW06LuvV4Mi3k2YgSChStGdTLvaqj5t/Rt/bdwImoFttgu+GndMpsEVJoX2t/Q5FT8XwzxhNNBHaSaKAUY2tmqwXFao8XHgeufGaKP1OvvDnTsTYyuSevWksuH06qgso9qUlSFWUI5dgBEZUIZqGHUHNBSSfLzQgeg/tUAzVBU4UpodBiYYZ4h3aAIqyYKKo8SbZPRSJiCfymQMdKYTYiB+xXe2TDSeIbXn14lvM3Q9X53EPEB8E94hpIlMpbt3L99/1FBNgJq4L/xaRoYCo3is52ru9HT4x9OjvbRaWCZlBn6SCvHbnFuSIaYSYJuAZW1AbtxDVMV3XbstZlSHFiM+LSVsm6WVGhvCvS/H3XgRIcp+F5M6sNHPhTQR34LPWtNmyWEfBurd6/jbfpd3zd+djG6GmbmVlEcVCjiSmeNYccAsi4cNjHONDRKynSymqsgsxKcttHeKh+n5fw6lInGtEVOIXwDD8u0rqxpzjqHgzYSnwBD/KBm8uM7o4jfTmVFk5QWvuV1+9IF5+DQv5YmP0Yd7MSQtXGU7kWaZlGvjl6f7FPEG0p2IPFBUgrcqc5K8ippOWuq5gu6FlE9oZuhbYHVVx2/OD191WvpeVgW79JPGcDa5HdrOahfWZcCM1TMfj/IWWxkSXQIiwMdsngg0RLhVddnTdYyvnZBRO2bns34QcKoqsIO0UdeE4YfIc8Yf4J2qcbyObihbGM5aPzrSji/ovBCto54llu2+dyuCV8ZkqFUPePtwrEv0YCn00MsaYtLic9HixZnAFgHeN10ApBKTYQzrV3IVxjAAetpbKzhOTTveD2EDdHazXynFNye98TPNvxns9yF7VmEtVoXInz+3YQpNb2vZUwbxRpjND+DANZGI5tuZVsFXfzY/h8h9w9Bj7VsgdxUohQJraw/7tkNWB8qXQ7HrPNIsiu9tQlUchqMq3yXnFRaeMTt5W38hI4j7wjTJxOFjFVH0zZyMX6CJ9gWR5SGUwvtD1GaoA2kTt5m4d5aIjInmUG7svrHQYOOBGnI3vtxh4fiEo0gDn407m1yPCrJCBeaFmPAMMowhC7s3X9JFtoVWmiXKfnpdDt7TJwHeFwtNzSlnOxVO/cvt6rmbfxmiOfQUEMscIg9s9hALz0fPvozxUZdpPWsGGska9N6jeZV3/BN/cgp1mJ/e87u9GjPVhm+lfk9EYgxdCZt+4oYrFRnbOVSgAt7y2RqHTMc+iSnACXoh09+YpjweD/aWR2UV3sf7v7Qnx6U9LfgvzvO6PSPj2HuMJfnlQT8pAy/alZYzkAbbbxYSZy7kFImqn1fKN+NXqVlhLDggALpgKeqAnSw6TLaMnfuIlbudZDmdg46Xitn5NGQUUR89N90+V4QZSUuOM40PgVB5/D89HX8+kgGFuBlQcjOx+LXJdl+imVKuUxdKQOfVqGmsS5UDTeOucmIn6t6KtcxBTaDrpIhkn7JxcSph3TY92bK2N2oo+bX4Xjx0TmGmuEUOBRdTfJ4slJR+XMMGrONUKVwzfYCCOSkI162HRhVxy3PcW4ONGgxjX33fs8vE68WSfUOynWlAuapbDBt5l3SiXQQHSobMMGQnjuradHxocqhWtmaveRkTtmcQhFaPipqr9FEKS57Y2k4FAVGKsKr4xw6w147trfMNbVOLAmikD+nN2dHr88QC8gdjqcf0A0104UNk7HZXpZmVe0txR8EsQp3qU4dcVXQ7FCHk/5tBSCYp/mUjJby1OwsNtP/2yqtmko4O70VxXnIU5O8alSUQPCXBTmtmr3h4glZfY4/9EWdlCFQVwsyiO0q3AmA5YYraY0zdHKiXsNaCxpKi57us66HAgAhWsbp1Fts59WpkyyXcwzbCufOIEZuhY4hPpQoW4AmqzQL+1aGHfrpcd/svfAxbJ0B1BN1G/MawuRwJCQfcptE4S5gGS/77kD7uqGmkb16I4cujjEb+afu5n4Rpxq9igW1fwijvkuqEPU48MiHkBp6twqQFqFV6lR8/f0Pb6zsaWdvjp+4DMjcbnC6VYPP7AafrW2w2KrBU7vB785/8CkC1cDzZNWPpn1y4XG2NZ0JNjxpozMawhK2bq0MMxcCksmI6M9/0U6Lvz0+P2tTJ3Wdrn7vNNODqTgLbZQPc39p73rAsRHa7mH7Fve2bHGvMQenh4Y8s9U++Cw73XL0+6jLtSrmkE1FeW3VO3ty/DpUT1WbJVX8IbZRClbZDOD30dEPR49jjRhoxm9iUvP+/8Gqjjfc9kBgdRo92HzpM7n+IBD0lSMbndcdDtGBzDgyNuzWmk4mGHgko9B+ULqb9ChbkdWkpMelu5OstpIJolpa8Q6E8IoDSKjRdOwFFASpsuAQCFhlB+93S4y9TZV3+ASBb/ZNieKXimpAX22eSj/zCDvCWtVbNTaiNaRXBsqcdbYgQ4QaXZpH5GmC0RoskWo3mtX1cv/ePXhfDTgUxaAop/ce3lMJBwgpZvVivhukkyhKN9bY3Wj8hgJ1N+gdkuno750fMGipleLyl0C0t2mz5LPXr0Ili2bJ0/PnXsl1QcC2HPH99cMMvi5aX1vxzck4j4agDszmWHgczQ4wwK9BmN+bowQzh/LxI+9w3s7rwHCvWlr7fn1r34db+9DS2g/rW/sh3FplLTDiZKAIBga+H3iOsLNJmM0RtKxpt6X8s170pz9Fj1qWuq3WKdX6ykMBRp4brCmmf12zpvD6Jmu6vrXvw621run61n4It+as6bMbr2kbuN/AqfR1cJGm7Zjw7KPW9JG/psUN1xRI1bo1hdc3WdP1rX0fbq11Tde39kO4taplwWp7rYGz/ag1/erm27UFE4p2TDi11vQX+8A9zitM763OiyzH0L9N4u3ozyi/m87IJ1XDWSACTV1A9csBhxdWifV0HWQ1izIu0/FKGx9i9fD9n/DPoU7w2eUFPrxsyH3Suu5HM6r3PQ/jSbXhSpeV/pTwcfNdrgG6Mh6o2N2lsp1AkenjnNO6grnYW+8z7ao11NAuOPboZd9+ZDkeekPXjfTW9R/wObtB7+Kt4vWs4LXdCAJ+GDcYgRhwf9IImtbLNxgA27x9Uv9tqHmDUdh3HNuPpe0abNNVvrVnvHsxOySAlTELt4VtxkYXxFXXtOBFRlAi2a+xa/2oBWtoQ1v7xjnFpTLuzeE/+YgxYRDHmcknhQzPSTyJHzuwM54YZVphUgEjp0pYZ7LSNenbyTSX7xZUCGcjpuV2uBYyPiE9Lx5ylVeJXq+PzcxDdmIz248+PjYzt6LffLLxjYf7QXMYE2+pxTmqaT/Fw7zoQE3tNhh6P5X3vo9N2PdU4gkr51NoFIfWu7jvXjxvdISShgAeVH9zzyowsvbEgvF2px/TtWoJ+2442SioWO6WlWfXrgFnXITwm1NmfdAtu5WAabSjLavqC/59jrmxj7z7510rYjLS0etUZ1L1LlJa7TCbAFJDo1thHhtWcK87WXmpWwTKfKMGNSUPt2qDDdV9OVkSf+1HCm1pfJaUY7/dkAVmoCoSl44POCdwjE5dsSFqjNOBEwVCR1poD/ESxpUq+5mHaSHF2fF/HpkN0JxnYI58dJtZunTNKISdpFVeCFSb/r+p0srOuCUx+TGRX8qJ1ZT2en5NnLGXIgsvw6gUZtgi+L54HD/GTFFOFkAxU7LS4NqDaBCNXW3XZA0NxtTpcPBbfCphjPwBcfWEcplfUeKyoTqXgEEbXkt5pEPSgs7sZTchpWAY8loewOwOX7zoO7fQQrWkRIBy7UYq3Z3KkNh53JHydkwevhuWhL5eC1lVYWq1Id/sgyAJzIHKhUCpma/J4AphJpfMXgMF5X9f5ZwXLQC1jVN+7N28W8nsO7j8jKGPO+2FqKVtip0cnZ0dPjs6a5TdwmgtGEPZzDxl67mkshMlizVdwm4RhHgF545TeWQYGHJZ2yAW1uem5mtmz3opCZl7wwhKwkhS2Hc/N8ou+8LQtrAyEvIGmFFUYsAaMupUeXcxLyGmUHTSEuoGpftYcYVoDkdbCp/vQ32rrErtEOa5+JXwPByz3rUwxrS+wZqS8LdqZJt0+NYuJr7ska0QvAqrONgl0U4mqTPVu8hs8kO+FwtXftCIqi/LRbRHF1ZR/kOFVVIvU1o9aVoOCmRU+34oKlUxZPBA90iUC5KCbk2yERGLWnJeJdG0KMYcSZs9dTjXYta0eGgYnGa4SfJkrrJM6hnhmNRYUXBQmQet3Ia4UGzg8K4xTQXz1lhXwWzsTTdYaacfnR39x5ujl4+P8Pzp4dVfqFDXCkraj0hRQ/FJW61rDCQ6PiTUDJR7aCIWMyWHzvQIJuXCpSXCXGhctSVYURBlqUrTIMPbsRf0Gwnnu0ZRBXQcAO3INv9aeMWZXfkrN9nQ1Xk9N2mZm+xzO4r2iZThE2gS53VFao/cFPYEG2VsxaXHgxpfycUtJfp2FrecUlK0Pn0rMKvRWuqEyyEFmTJwm5ouhPb504yzHYmOl1ObqdF0xjzDRj1ZiGL4V7y9lmWlPs2Ym6RLxuhUDmNMZRIGymorJTKjikIBpynfv6gBDkn3BtsgVnbCvQZRVBNYg9PVDVKC4UfSgu28ImzpoJeHk5NuEJ2ljCjCL+P9tlh0lawpwSMeOkLCpyEc7sukGds8+UYLv/QaTL+f2Ksly17LRuw7mZf9vak8/CtcxEkyyuacJbzz4vRZjCaLhlNb5VqYja30jH5u4N98t1s1nRzTwQa8TN9N0nzL2f+GI6GAnrzJZOf3mo7m6zIs6tTcdm8BXUwQyoNk3GBwSDx/h9J5WVeonOuatOUhV+fdiGm4FsX02bcs0fCpzlKx/tIvpLkIncqa8nSKiPLOeAmY3vsNo14BENZRSdS/PT19cdZGfGpy3mZ81ynX6S6rz4uGKYDTJO++DzjDtLt4rm/4fcBcczfihPJk9qvgRYjX7GGczm3MhPPWYY/DS+s2A3QKgLrzxhS1U6ESSt6uekKltGdQpbN39jHms9JoUP0uYW64754foUu5CfrZ7AOGhzDQuzBSGNMqJ2+F29UAB7MT7X0d7Vg6Fb+tXmPC1M5ZaMLi4KAn2pgcHcWwergf/cToTbERUyg6hKrlBoR1QTygKm5zLqNSmHqdO6jeZXAE8b2AxMhAil/rM4VWTueKkOaFb8fWcYZmJZ1zgGJe12T2Z50BLfwWlQYsuv8ZiCrn/yuRJnCfDX5HvW05qXkwsMIPfF6T3jQOu0D6c8NlfgyDyb05E2KQCyPnllaw43Ww4W5DvLFpuFpQJSo72kLqsGClcNygOPUYJKNb7lHVvb9PP2Kb4qdtQ/lL6GfO/pdZv7A3nSwgcqcS/MKk1IQRaxH+f9/66bTpv/kKIm920VzDi0mWzseXahU5MQE+0uTJvX7Kizz+OS0LbG3EHVNJ7gLXHY2PL31swCDjqiqZJwcjadw3TbUvXXPna12Lgu7WiKMAvT3yqFF8JgTCz42RyJdeNmkLOp1OlOaUFx31mUY1ZbhpeAWCG96buDmN0ZiYAhJcXG6Hmb+mOLKtQvMjtImOuk7LFS0HsEAloCUVTJcCAUQXBFZwbS6FQt2dvsIk1dhn5Co/AllvjKTZFqeUhZhkWGHta1YkjdDzDUdFB7sFrTC63oRmfhoaVsswqQ1uGHqRfIjlZdzkJmlAlZzukZ81SoaLmFUtKaPkF4G7WLFp7CswiQbDbrZyb/Fbbo2DDXFtp/Cu1TYAEZn0CBYRZDtU7+apXT+qZnLR6DUhy9hnM1aLPHGvupoqOPChoscQ8uRqwhyBaPfQJMnEfeM1ct64vA+BSxefzzsN7YUI54xNSFpEawyyMS4jQLnZoHOY20MNH0jt7JnwZ70gh2YNch2npkHpkrzAceqTPpytSwsPmgvSyhreJmXTbb7MvV012UTi+LkpLcoazEUA2HK7GsS/IOuoD9EbXRB4R7456dlt2fZHZiV5xQnujSdwXqAFUXJt1d2a1P4zMAGjWZGN7MtKeRC8fyRFI5e/8UWbdX8mbfiXbEoHEip5g5s2pCcbLqV2o0PcIER9lab9apbmEaUc73AAA4Yu2R7KMNoog5OrkK+7+lGLO7n6oNVlLJEgDBNzgWvEnur4Rc0C78+wH3wmPJcM6TJMrmRgupP1YYGkLTRLdEdjDBBGPUuI2dg1foRC8dUS0KXb5mrTBjscSKi/i54VaLoWkVs8dmWoVd8eX9/MaX33NxdX7IF/RrFFfTYpBe0PKdj1yrsr3j6B3ejV9Y/JYo5b8H1aolcl/IuJUTnUC4c5LYo5gBszR4sNxypPFsNsuipW1fxa1Y3QvASADdStUDYfRGjXx93Ul4l2D50aROUpc6+Yy3HFcZtRMZms6hndrUccv4G0+IPWDjCQLzLNhmjpTNahjw1E9KsmdjHgyr6+ixiO5wwpM70aJZUmS+sXHJOszRM0McX7hqPDl2fx08MXZ0cD0uZX6YjsALZqSpgGaTGULD302Y2OMFzzSKKtJHgHsbGSoWBoYyw9Ni8nrJE5IEbudgOEcTIekDdmKFOfX3V1zl+/+e0W5xPgvC09wM8/hLQTSe9H06JupeJqMv8LiXhY/hGY+VY/CpHF5INzLN6utDt/VrNtZgCQ/0pMOtoJxikgjPDlhDp9toSshEGXG7fYmB4eKNMV3eEunkhsnob8G8euIPNDOtz4VHEu5j2GUjrmA5FYSD/8DponYr7e6wgNOOcUEQ2aTiJMkMsyQF2uRjWcZ+RPOEuW1TcNZrcxl4BFmGSAlSGFrP2oAL23p1SiZW1SjmbdsvPT1U+Dn67u/oT3k+xj2bgm+/XG4q1GtqBby7ah8usIRvuPGGvDMJwNzVDVQXtszugpfqqfMCxuNzSuMGHQvqpYTb/dwjj548C1Dla+TI3bA8m1vWndq5cb7awWHHpZ1EDeKNTeiLRY9awEbEqU9bqYWUNvVLNTRTEOKo67vcgKXsONlZJwPC/yvaQaZRkwos9Jli8iQr9VnQH/q520JN94og2cb3mjxenbzss+eMjG4QbwCYiFzTX4VOhKKMGd/o7lWjAPtogZXzzzVnkzmRdJy1AvDFRsS04ycT2HsXG8sg6cY8D+I4EYpkqwUNSUZezbJHFLUy0QHmejG0EYy/8qEFYYaBs+7fx9JyDktwb9lYHgKTkA8I6rEMGh1WqJKIofTWKgkHYENCesBYZuL3iskv9omzIDym2OHE1z8Zd7lSsvEhzHCt0dVNKNioaDFxTldUuU5zAdJFgg5nYOOsii0OQCymC8iq2a1wn6XTxcoQ9suESWx39boblM6EZBCqTVKFm2l9ActMIQijsbBh4RbmmwHbz2sLU+eY22Z+MQ8cOpoUlS++mnNXKa3VowobBujIURgR+FN6DcCp2fOqhW3mnLfCKdKLCP2jtwGjdVoM5WLW+ARdvw+zj6aN3o+fYfA4Myq2yv11qlHBVcL8EwOqtVp1/tTW7Gb57tujQuLcgW3ktbT3r9ZLeapFAFImYf1LF2AHLsA5ZaP+BycUtbMVsezdoxZMlo+vvChyBucJzh/3t2+jLigCgHtMF3eh957GF/2x17qKi7ybGH5T/TsRfmB5q54ezAA9oq1gv0ciPoYCPbQSfLb8QTtI798zBdOJpPmTi1sN3EiSu7ydTXsHHbTL7l4oVBGm6Wh/hJiEBNbAcQip8fhgcrqPSdryvTeIyXis6+VZaTtQNC5i4ppy1j2o1ei8MTcYEZ0C4Vih1ElbMCJJpsIX7JqG8pi/kc/aJJquE6VlvCX1E0JyYu9+hOqSqAB03yCp1H0HuTHIapFDYQzewok+Flt1bawoCWFXeYHkvi8ElwuCsccZ+93IR7D/nPWbSGAXfdWMKbYBjBgcG3HZ6VyVXLkm6WoVnWDNduyAo0xdlqkVAMYq7qzXRDBuMbAeJb6EAk6+3Opaz+9KmgvQ025Ikin3VegPabpiWuhaLflFk5N/G28UDDhQmtB8ZpMudLYmmFPGLLlKSf1ZDv+D2LAKeH8OWHW2QrG4GWaxR51+Kn6JoJOB2E7QWuEgrNqs0FELJttgKqLAX6QL4HO7Mesg4AuW5pTHILqIxMumKAHghcpKLAPjgQGYwNa+LlHOvFCh9p55n1F1LawqE5qJC63LJ/DI3LNx6oyHogoMRQHwUk5PV1jSbT387wWw0E3IXUZ6IAQpHy5+tsq5pqdqq4dhYaXF54heCl0oQ2MDWH66RSCeBvcQcP9iEuhhgSS4JmVF3LzLIlLYysnvHi52b8kAp2O2t6D3svbzsIz2mQlY2W41F0l595g1av1w0s5OE5WOvgKSPuB0fX0teu2OgZL/40Ka3g5/Yns9JqDa8xdw4Pcs0Vpz2VgP+OUIfmG0UotllBijjHkQkqjNUpblZ0OthGu+ta+sSphUzaN6GQ34am7STHfEL9EA7fbBbau8MzO9cvGstzo+bF+cdvnB9/WtNZs9ms2eRNkIn0UzfEJu1Pu4Bmsmievk/nmv3oItchQQfWOFI0eJ91+BA+1zDPzhpDTMGzz26HSUeBE+rvHxfmYVsry5twTZvcKsLsh5J1lWGF36qE8d1otenXw7Hg/Tqq9oV3C9tcyhTXck670UlRYfivq5TKc1AMDvpSkyFt2PZrN3oKgGbCw1mmC+bjQCafZ6OsxkwsIobRc2RE0mQcJWGbzV0JUInX1ihVUxhCDIicY+OJcbBoI+bGKnOT5VLQHFZ9NFp0YOqdRpHWayQSxGi7pTp3X/z4+dHjv8SHr5+9OTl6eR6jTe1Z/OT47NXhObx6fcGdBRjFTZES8RNi1TBUGLGFFJ5CWbSo4Fi0TW9X1sY18XsEkfrAuW2jtd3VmcWdiUsANIWdfYlrhGUFsF6JThVRdI8uuhbj8Mimlhyj0UiRaA3RqGSeRCfJNBudFCF3OA/83FmjlF7cqbj+q0l3YnKzjjFagTHDIhFVNpjPuQURwYqsYw8odKMoS9zVonPfkrF7Ldk6Q0tumyhllWbMb3OgCdzVaBdvLgKVNSl8vV1tEfEMP0hPLXm9Hym42eHQHDHeOVT5wPH4tH+mGEHq3bZnx40ihOFwylT5GYW2EtngYvR4dhySiBwKfJi0UPuP0XBCtL4tqNimo0uDhNFWmtmKAiwwkaC4wJEtM+pcMBZwokdT1RnwDtOUk7YHLYO3OF5bxuhimyVP/pMHotoWy3RYSY1m6gngWZem5tFq9R616UmdTjEmi3rmR+PVZSnkVeOSUr/m4KNRI2dzY90I3ZojWGu4YLtV6IE+2L9sv6U3Cg2st1Gf0ZwGVmvRway9jvXAFWymlTuwsLcBoe4dp+1+dOeON+o2W5TokK3zn0rxl0X9FA1Ht9GR0b5BexeOAFMFwshGaHMpwcExdTLH1+VTXzR0Oqj5wEtGK2agGZvR4uBgJmM0J1ss4cAYUmipQUSJTqeoSVFucqucbn91a3R6UagsylMHePJw8IAuU5BdHK2qulgoI1O5JNbDtkIm2fPS33cBCsPVlEy9y4xS5xZcmxaZTPQppjlakizG88wiX9bOJpN3C5I9B8JT1LNzTC2hTGiH69Ch54dn8dmPZy9On7krJx4Z5NFJC73HGelNXBhvy+qIXYq/4X774chemK8YHwxUeC/Pb5pfFss0p7FDczwgOP/v93VvwUpqvqb945dPT3nqBjoE/TagqKhuUCRwg4Gtdy7o7WXEUHHbdsCNZs1T2l3qFLB70mog1gg2I3mpyu2ihS5xIEZetz5lIV06x5XdO7+AMbvqgr8MxikaanY7q3qy90dU3ktkrk4jK+xXX335ZbQnBsNk9D4acbI3k0o1JILaQ0QQ3zjypmXzgYbvxmE6UZeo3WmBfG1vR90X4Ro3JvC9SK5/LVZkx493TZwguqZUxxiCHWt71Rg5qUwV0UUYclrL64d9U4WfPFq3Ojj19Usj44rZ34AD/ymVHTxbs1T9QOTF7aSz89Mnp/uRznAglmMUKXaMSSZKunDG6b3yprfFiMPD8oH06sdHAQmGSRA3bHWzxaTaqg4w0vM2qA5jwmSRUrV9nRRD1N05OX3y5sXRTl/bIAyTKsXt2o05dUjc6wWsG9lxhML7CV0In8h2h+pKCiN2DlbwvURLUSS6qo0L+OfSj2uGn1auQ9ofVNjwCj3TRdZXJMWCYg/5DqJd9qhC8fSY9Tg+XaOa4JDokYqJLjsNNs0IQ21PpxKCW21YlGgD0jp+JCaje4pqRNhmL2xqwT2rs/x9wUFEQwwR0QvOnM6xEMsktwOFuswF78FpmqMvQ/Yz0bUUeYSaoKB5HWKsSJhyR4A6sMjKHb4bnSV5VmNLy6LiAeC9xVVRmjAs7DQtQDZsz5oDyrlVtNgaF5QUGlyLRHKnhUKJXEAF1NQ6Sm5IkKK61GgfJAu3IhMWqLY2fG5jcztGb1w4QI31dqIRUwT6l6fnMfAoz45fPotfHb4+PDk6t2OdMuxhl5H6JFnWSieugF/dw2/LWQlAIXXWWFk/Y/QGTkzgxRNmfEUqeXh29v3p6yfxCeoElWuOhLRSAXANDERLTcrJXvQ7GDwb6ClHfbe0CRzQHnEYIxVGAjVqBlOwU4h5ncaeM/x01HQ7lAelKclvhCzPtKlUpYUD+SDvdk4k4m6mYi/WChtYVYlcUwDVwlufCuL55EriMr6QJiJ0Zx5TXI4tra+CXQMR179DAUoDJgexBANRTNTmXvR37zxsdthcp1m6KrOqzkZEAyuhMvZI1h74fCRfdG5XB7xCXaMXhI56tp4QQdBTRyTZK9nnpFKEOCG7oHlPLqL+OsdAKd9hdgc09eGOOzpsin04NLHDbcG6t9LChSs1qC++inN0NfbPicZpDGUiSjkEWxLLN7SDkmYN2e6q6kIR4jeexqd/if4bv72Gb1uErw96kUFrBgwBB5Zd1BCPC61r50FoLT2OZVkWw2Q4v1aZR1BUr1bjYuA1dA4Ekk6ygn27kihPVzUcfeRTSgcaenyxmlOScLnyK4rZV+T2yuH+6sVynJX9KGDu2fk/z09PjpBDRpdQ5MUQvPgd8x31AgqkVi4pm3wM9NUHSoxm2CfUbL+5DqzGulVRH6S41vGPynxaKna/Qw6+L7lAMMHMHUCkOyw/4ZrN00Xfqiy3AbzW6qZvWKbJO60+WeV5ilBIysyyB7Eu8Qz2Y0p6IIO2qS/tZ6WoVpfocBZhbsCQnZ5hQDDmPbBkqAdMP6SjVU13FRQa+vy5QbPTJcdTs0LO2VDbs6JvsteUaYp1kOxVbafawDc13Uk0U6VQk2r8+8oACNFZ2zpxpqyMFUd8dJPN3phDv+FTnIJNzHgMyix7RVxEPfv3qEAjiyvc0Ta8Q+BSQyKdLWGveqBM6SggmXpq0Ta1YFS1cw9/Iy9xb1WV7g9yAudHhhLrmuYR7ljarxpMPm0bNy4kvKR6oatr6kkJRNau4hHcPbDyCTGfg0BmA0XxR5FOgN02tdXs/RAleJGRTwFLsBVcMnHPiwgAEcHQnjDHz9OwbEx5abTiONzAnJeb5myNWoM43Bl2FdD3Bq+4ZfJqMHROjmnfNo4kf8CY+ayREpRzy5l3WRWbPddtyThnrQL+ab5HimRvGCe+pa4dvrVpXo92npJsictKZEa3ZlGH25WBr/ARXSJmBosYWPTDlsBlq6pRGfKopBBNGRuadXE4wII6ywcqDZp2yWtTXsJ7s8EDDvU8PhmNs9Iix2sl3Ha5tpwLXzNfZf7P88WwDOs4IsvlFitRcX9cb9jT/GZDozR9zsBQhy8R2UNDazr/8mB0fUy5SjH/V0vS+Ki7bBe5M0lOKhuf7TatWlVg8M57vfHdzLrjwum8cUnDons9u2mnfj+xpHHhxCYUSj91ksu6/LGbh1bdUdnQ6BgtRkdBhcu5g1I5EK3iKI/8vcMsCfHcmOjdCG+/uP2g1Ih3z522HlzZSvWnq7VHhUPYXplW7Tpr7HFIir3aWi9FxZsduMBkDZZ6GwagqauNj9QjF2B2mpwbAs2puh5wYxtwbr1trnbHoTiEGNFpTB4IDzeY6OpUPt3xxX1M5cEJdg7GFw9aohutv/v1G916eU2dMCxC62yXCK+124Zeb/uxaVffe7tafH68/n5hWaITZeen3LqsUjRbNr1FQ6zcF0Q77twJpabWnC/erRrNaV/bjrBo49iSMih9Uthz3zdJmH93DhIGJejo3rfPB32EfPSo0VeUR71A2WnatISlhFPryCCL8odVhYl3ilwuxULGfNHeHikvMKLDh+U8kdTcEo/FJCCwoowotGEtOyEMxQ+wZcPxONIZLNmaiO7FQcbMpjPcP8OiwptlYvPIyEji0lqNcNCYYTKeX4sSNLGSVvalHrB3GBBbLuiGaZpHHd21w/p0dF0PdmyIQKs54iOod/Hwkp52rVtjHD7/NOP/+iB61AvvKtPbpeVN38jrWQ+7ja7tK8nPjawPfGQtch2I2LLcMJJ33O4Ydy7ZInWcZryH4RzxwhWLQQPdIUgsSrI2JoeR91miU+cpXdCVETHyNB03MqThdUmOfUoW2ymwjrlKKC+NhY0zRLjxphViJI0e0oFLU1R127KCVIdbt00D+D1Jm26d0FW8MwzF2nnVHAWnU2Ejo7tFDP5G/Ghp2ybY42yaVnVMHLLF2uJXyXI4nxYlELiFh0biLT1LP0gbqAwhlQFby5ByVzUvhkCStVTuQqh5KMWG4ROuJooCFS09hAyeWKqGGvaAdoT8zE+J3lI3aGHLeYPFeBnv6dhRaLWwtUDXCva6ZZvC0qxKNGYmE4dZ8j4rViVutJKdKepZVlrZriiFcYL31TOzDhEnVrQnBQXI0EeXgYWHhWHg+06a7pIcmIaN8NXgZYJqU7+hw+8Oj18cfvviKH5+ePY8Pnzx7PT18fnzk7ML3cWlp5/eaOIeWonHOggIAYbQhvI38jms4YTPBtHhe6hNEr5+s63BM366gZ1gR2ZvnbRjKz6EjfEO08MDmL76IroTPbj/8AuzgjnN4SBCKysd1gDPSFKsalRCo4ahdZVDraIVItXHCuOu7skU47zX9GINJqhcnlTOXaUt+5HXo3lR2anWdaQYuzONnbad3GL8pUd+wjTn5MmX7XRnRUSxQdZ6lm3iEw6OuKok8LG+gl/lc7z3uC5WfA+SF6yRlcieFP7agcyDntFHt5g3OuUf9qLH9ktBWbPv4dGyLOpiVMw9W0o9yCs0psbxXxXlO7TdYaU5dzu/xtlTq0+PX53tPfji/t5Da+ro5sOJxMnU0m6XQk5XdsrTQTVLHpgC4rvTelB3YP00k9a6L0I8r/F56HZwcekOW+3bQRS9YkOH66hc5TnH1aDpUY68TgPVmPvyMcBsZBqoHQQVprkV4p09P3zwEZjnAGrbMeKgvEE+/PKrbYe5B2V/w5FCb/ZYcSsA10s9qhHn7ljJ0oVit6Z7i6TEG1yupWyADa+AXWkvMsrSAkwD3SwR9q/4shLzfgHGrMrUEby40XFK8fxdfPX5iNw7KHelciWCjDL7Grw6fjL4ET57Jyd7T578+fnz/ZOT/bOz//GONuwRY8Zg/JUJfunu3P5x7/Zi7/b4z7ef798+2b999j87fS5DsKJC9JO++WZe7lxuVwP6jzTVk7wvl8vLbIw2YxQOePMZzmytsCXjeFQsr6kt01XYYamaYbzOgfgsiRnYDRyWOuYQJ0wwa3+7Mk5JempmOGFvIwtdTVGDj0rhyahY81Wxq7n1kKFZphWEUBPOjSx/pyuFIHZ61qbEJgiBoFfVYxDgB1fAYgCmmEhnMnjjpxWpjto9r7wVFV+YctSPaEnNAHZ2gJ1aXmNkJyQMdHDoylFxladlNcuWFFCZhAj0ZKZwfOxrDPVtBlfQAjt92OX+bAzahf/PsgXQdXIStEojymKqHGAPElS+RxR0dpmx/uAqhSNvj91DRnXfaa8WcfavGJPCaVGy7bC0gY/QYAmJnJKAKx6K2xzbPLzJsw94oC5orstob2mOvd3whKn18JwJxOSLoKEYYZSdfjTHXFPK3CJDckpP4FwD2qfsJTI7rzi2Rz4280R5J0Cn5LBb1QNypaClpAWiEBG0iuyEOS0LwKSkTJ3mVnkymYBIgUnPsS0sOcZxYZoyutFIMPIVS+2wHJK1wmIrAutOpMPq5kx8RgRxGipP+CfLVx/ilHwUx410OFxNR1WV0vIY+wzkeyPjHC4RZ5MYM5SApJXXXaYk8qppSojBMdBV1jK7bGx9bCLGNefLVLX67ihgo/plmjSVNG2qMdJfqWpQI15lY7Tw0yXUM7wttIpNA8XgWUAxQbYqMDsBgt9wv9mGEQw30TI0uBgAFQPGGQZDXwZHr45en7QYLoUQRHsTmQv30arEZYv5lTYlRfMTMsA2VdwlaNbznrDhAP6IaT95aco1EpkOgnjkNtqPOHiWpsZJXSyyUYyad58U99G1HSMEE9k3gfXVCICX4dqkLyWjKrXbBYWXxPGbATJJ4Lcq6HZNTJPmlczeU7JABUIdeWCjRlklNMwqMSvCL7nMAmXUMmFdbimcpe2yBYXNnilYRklKMneZA7llLXaFAy9zFKwolA8bnFK3srH1fkcyneBZhLerYu9sM6mGLjiaHnt3Oi+GMcLvINJhz2gp2A7toKNdJeKiRNc8inFl1RXeS1dmMG9Xu8lkcHvb8BghYiO1g/np8bzR6cv1KR4mBVCuS1Dxdj7V/32Euzf+9vj8rHlnpilJqHoLKQmMVtlU46D5VLPWulHeUncVgEed0Yx3Ll8Pqle6Y7KSls3dYsDXahOo50jVg7OkNy23idswfuqD2lJAI2RxOkenr16enp+9efUKydERf9+cu0dNnaguISWDRBHkA+MQaBVZ3yx+XFucxizX3pnihwm9U2cbyNzgKAn3siVjgZ8NzIV7ZAejZ35iT+LM7vVoiByGLeS0rAEFeIMQNBB6l7RUCzLupoMEt1wSlSLBV8X8PcZvKRZpky6jWih5XwB7AUcFqceQpvc5GYR1tae083xMuGbB+nxRe0gN2cOIrgiSfYUbIYnSQgzRNV1Y2NFXqPLDk6Pv9I/Dx4+PzvSv8x/Ovz37Uf/89s3ZjwGDhF04MQVUOGM+xCq1U6H3B3+MuqMS6BSQhPfZCPWzD6Iu3V4gm483evAoevAo6lp8/xjTYgWMhNk29+FX7GPAPPxwVV33JJWwpMHAY/x9Mi3RdLe6zkcUvmU+plTiSOtJXdlJPxSw1KiMy5GSgixD7Ymm8IYCujjSURP7Tfnc4mUccbS/ibJo1Dlo3K/Cq+5WEWgYkdDq1LJphF+CbE1mm0H9pkrF2ROvXjAnHt9F4im5pMuCx8d90Y8CWDioRWjJXAs1Ep89d8vOWGf5oyuxXAXiCTQH2xyj33ZN4BtcczImFvXdm+Eqr1cPvhrc/yL67qQ5NQa/DRQDobbC1WoyAQwx8NP+jW0AlIrLMuWKw25nIM5wMQglAW8TYpHEazMYAARlGRoosaFthVrjLqnak3G/0ZT2DVi8Q/RfdnncB84sMIZEeeCBD0gjwebAgVSrN2RXqNZGhZgPkQ5K7dhplrPIr68To+7tqqcuRVFGIOQcsjZB5A6KkjWIBAUTszP9/UCL6e3Strloi9A2387vKcgXjJJ9IZUahlEWzyplHLC8rmdF/mjwxeADkqiW5oYpOp5zUkoJoocbVO8d2ANq+XoYMUJRjJbmroh6pZw+DQ82Ck4DIsi71gho+JF7uNi1jkZEwOA9dzlK1V3EijDPt2vBTSYDR/oI4MPUhmOpbcSHrjJY1gc/4LR1rc6SGEqDA7kdWlUUSFOfEmS7wWr+Vwz/iM5uMsSQJ4MvB1HTDa/x6bzCjOgp3Q1xzYewkjAM3Qz+mFJk5XIQ2PsTwGoYW6vJn555Ox8JpRw5eT3HydY1ToU2xsP/bGZnA2elnkB/izMtxOg6FCvc/TBukDUlhDovNkmjN5eA1r7ED8ibeOVrcFTtX85Oy7FTlNagst2/XE5HRk4czrD4MKl8Bqd7leVjZDpnxaYswyi20T20dTKsr7ELGyRBNlhPY5jO0FmAthK6umnXh7Io1udr3Qgy/IjOlLRCGjudtVw/YFeK2tzfrr5JoBtk4S3gKOmUiw6pcchtbpt2UM3LrWiLFhIelPZ+27mzvrht8psE3m1lrkbna1XDmwli1NiNTVXyuspbYUdAc3wj7MDPb6xYDn0sFZEHsZvonUOfG6lXLIjcRKngf5pKBv+z1dra4rAHlU3nE37Cd7A34T3tj3+0+jojS0jeDkjh0/cj5klz3XgeO916MuwbHe9S1NufW4QNDvlGYm2z5udY2PU+cL+WQP/pUFjLKuqJqev89hMDk+kJ6+xrwux4laniV1RanxLFLvuu26uMkvE4KnJH/NKZEVaLpHIFEnrCJJy+oteB/V4/p38b75RynhfjydHTwzcvzmOkV9Hvo/8J1GkzUFA0mBtiQ5F0xZYi8muKv9aZLoQ0ZN+nHYwf9S4RSzboH0UONGod4lWJ0q7wHRFFc2ZZNqT8AK6KIgCjcSyqVTDhGUUfYXd3dGlX0WtxOs2I1OQob+PANhzCrqINi2w6q43xMTI1/l01fj76Yplc7Fyy2G6XsRtV5EJRk2kwXsQTg1Vl9UrYaDYaZvN8HLSK0k6yIcX1Q3sKUmFmE6tdsxB5hepjFRJgqq5fl8uyWJYZxidihxJ4ybahZsfwNKzwAA3EK1a1ui3Lcrlza+h0wuhqapI9LIO0czUMiJa6aSpJgHRNZPWqGYYTJ1AM/9rlun3dnxdEVSgRafkqAC3iNYkUbJBWcRDKL7SxJ1rPJM38GXjrJx2EqZp627CbtVrgobYI0PSuUfvjz5L1ymDCXHKgZW9qmDapqOjWGb/pY4TXjUC8/TGypfws7qrJmC0Sl5nJaFDSD/g7GVdsKIngHZXZsi7s6y6aAqku3Ttarwb5h0BTnvEHVyZGLhl3vToDMo4skLj+2/37HslHppxqS98BywiawYBd9fy2mwZvJHAZqNDhSC6zxu/ccd61HVq4tE8QJUtWwr5/RHTFeVLd9ivLKNRGUXwydKLBNN6kZENFUN9rky8xKBNdRTFcJVo0auRMCC3jW0cuXuhb6hKMwpofhwZrRKV89WPAM9T1LGWvx01xlfzOtMKlGY03zKaGOl0fJLKlW7oTbPYaav8GEar8jqrZPFWpXJ13nijuVPICRmdVTIs5Dmb4tWIi2sGhJBIUOYQ5/baAVLpooYdrR9Ccg/LM6tyRT0sa7NZ8ESp82xNM8RC/PhoAsRrNum64B/tTpcs4G39g9B1gQIpu56ClV2JiuPjX0d6DdsY4MKHb1YGeExBkeHexL421OECvnSd+1otjNuiDyalx7VvjjFGwjU2RTsNTbQQvsYgbXq8oZx3CcM4pTXVshAyiu20Cz80ZSluu8ljooEVp+5FkJhqp4Ed0MsdwhEjeAxNzRALTIwGXr7Jj6YlUpxALcmPFhUZXY/4WXAggx4rLrGbpfK7bAcq6rOHQnKaqIYmeEzN3Jw9JqtAFRsUYyPSBjqq6VrnsmK3p70c0XzxOBFrGLg15g6oew3HA5qliAm3gv4+LQ57SFKPJOnlQjF25d6h3UM5QEZqTSLKXWnVIYwliBFSUq1oDoMG6piQQMbYRBi9ayVHrRD25JgU/oyhRFGsq0OfHdkk+ENANtFnZjeJzM4/9d1caF/ej72eUwIuDV87nxuWdcD/hgG5oLvBzWhbR68cuPJ5IJgd32NSDwu796CzFwGKjlXFWx+1VrYaAemh2OHhFXHpvoJtziAS2ZvbGxzTnyBfYHO6jfYQrmS73XZfkQnhZsfgG3Mt8/sZpzNqb1KbsKyZBFOfuCmPDq/aw3GAd5KyNbY9RhQ1LOCaYjllkI3LmCb90BQgVdcg+CmIl23vBOoqsJEauKoD+zpdo4sONoSDj8lgIDBSIzSU02QDNoeow5dBlLgrAUWxNAI1l2L1IVee96s2gysYuPHw0Zwx4i1virXGJXQ9Si8btR6/xjw4iTrZJJK2RGozKKNsanN/Q3Y6rinVlY+gUVULUcqV81ploSW2+azah2+jauwFSRm3BvFmCQ0rJx4nubYkplwwLyv9Be0D13D3iUO19iuSL+MKAvsOP71jR0VzjFQI10vj96A1pp2paVpovXXUpHztrudZsMXVEwGpRYOcrHbqOuTt1gd/nIOp8FQ2FKE5AXThgQrMYhpU0Sqm/Kt5SOms2iQj6Op0S3pGG6bpYOa2p6IYSZAL1G1wLhco+tavPN5yW5KEwKXSc1nboBNxR9+1oocPmbuQ5YYbHxAWDY4rhgvKacFoj5sWFIx2snwOKBlKCpeZkbYUdAp0f274htA9yCkyvYjFiwbdvFdzevjVISkdtbpxqaUJOU3wtXPKuUsmZodKiAXnoIsBgvH3rri9tvgXwZJUyQVGTUAc9x9QfiyEEvHeZRC8uvsTTlFFi2DXtOt41XzFS93zuNKQWRIRk9FeCxpcoD/csuuAZi9VBExbNo+BV03g7dKssfHMa0x47Hr4J/7UfHUaP9ii/O7agwpOi3XcXBH+gkGVPMWhAQG1M9LQ4Fop55WAGpwplH/YDKIm6zWHhWdfZpj92C4/aWiBLArXldMJxd4yjUVEqquJQGbrxYtsDK1mMIipMSHBELvZIZ4U1tgZNoa5ISZMaikZjLSaTtnQ0KwkWzuILK0JQpRJZYUldAcdEZcVPM4K1qAaYI94uQQcbIu0cqoDyHQrCJke5knt0ImgOhloKUHbW6RNBMHr45R/6mErngMeFGkYThJ+q4PnfkN/xptLnEfwsIDyAbHItyXtQP0Xl7zE7Ik2vMELTRl1KS2gz0XzsRDssVV7YYuUHFio/SNb26rLnD1FFNLdTZHJoHRL2a/ZWzOuymM+9RJdOPNvWgV2YMv2oszfqsGTqKmoo3rgkBCKQiC01ZxSLOvcATe5Vs5B+UnVj1V7XTzAumiywIwA0i+7qWw0K+IRfmjrBoBZs+yw0uxZ9GnzF8UXRFrHP+jC6HJRAyiJshVPJeG09UvXxiEaBTtdq3lu1KS0tYDu6x209jgxg2xKxuD1oNWOgAzFmbjYeUB4GdIa7EYft5pXPqkWTH0KUcmN84zVVt2iG/f7Q8zYZQvCDHWP40vSuZQL/AooyMlrywhqkssut1xuvA7Tdir2kbuvbQ16WtQVxPrK3VkQKpzq1gFumA5GuvD7gxcmbF+fHL45fHgUvmEsyN9k2/E+Wv0/m2Vj6RkFuhf7kgB9lyi4WzPa4R9WOrdXDG8P7+mfAKr2q46x5sO5GJ0meLVdzdReqBay0g7qfNNeZbfOUpFHH27KYj2OogXpNL5MiWVOjEMqJ9TK5B0G2iD0vxbmTqBPJ7jYLilvhXXqt0ygQ0Kypx67EGE4zaQ/uAlq7jBoBtqmPZmZTU0rVgzbsXeYJrI0bAXvs2wz104a71ZBt3Uw7mDjiuNu1etgGIlNn53a1L/EcrO76gcKOZ/rxRIkzaVXvScoh4mCBnwDWvS9Z2RcFJhjKhqxFIrECzRoWKMMMBlY0A/j/Xr1Y3qtTdvrEiLvVqMRLjHsUQhE9Ie6Je8c9KYCXytW9YVJlo8HyOjQ8lRyJfF443d1ynlwPi+Kdinokg5Sg/8FBKa+S94ur9MV/6J8wCpja4OdsuWFgVouPiVlG2YFPaInMLhwXNQO76mcbDTqvfjx/fvqSVoEC3OulcbFiea3C4juLZypfysnY2feoqql58cGcafK07aJFGPwP0NW4Qlh2OwHIsEdsWxuNBvRSd3prkNeaEl6s7MvFigy4LalJsH7Iy3neWtoRb0gH60ljTLBt9fDxq6NABFxKfOWdQvpCxuKc3eZJv86srvPCXO7ob15NVCsf0PAaLwAXD7wBN8rAwdhexiEMVV2IUdvSSjCCpvbGPU4nn7HqGYO4K7LmEi1PSeZQDWWuvQyYpsVObMAxFTEFihe7Q+W+oVLJsKKwdgFeD6u6OKTissIbwjg/d0q7wVswG8tNHbWasqtYxrLUaoUixLuI6DZwzkLSKYXM5/aHdFgJZVvoM7Ws3G2cWg1TRDEf1AUboFyXflUDh1Kw8hUfaYE76B5llBNsiBJIIgjiv7dd6TYnkdzHjXDy+NnVOulVroLjQP/ZiNNYotKxwqihrFLHGJ06WHNSey3JRU9sWksxYpCJdRdV1cw5QWjKrHzyzYbkFSqjQq/YoAdpPMx6oC485TtUurzlw71J4OQFopp9ERXEKbpAuAvk+ae86d/lxViHopstYcTcyYS9cHI1qI9MKMslhtWmUspkzdfqAtOAeoGAYRSZdF3Rvyn8y170QJcG/KerbL8uLo0d2IOAMSCv4l0VM903JHNNyMyaBVvCRb9ZSxh2IaA6gIVhAuwEZZaLp340LvDKNy+uQiuqBa5WJztdQiUXlClpp6Z2dJIW9E3PWjqmd4ihfQrhtWZUfdHiJ//5JK881S1/aS0rOu4uqSDVwHYOlTCJ8XZxdKu8Tku6EENsVDdilgUD2ta6F3ZYVV+G7IQWmGxxOdcWJTGnKz8mDajamqeTWqceL5p+WEz9THkK95nWKYbFpGzlAWZP6jCJJRMKNG2HdQkkWoXx0CWpUCvAPfoFmMtIgti7LObzbs/WtLR4KwZjhmB0+LYJI0CtcWLz1ygMt48XlW1sIZ1LkCLU89K9fpLVOp2OapSvOSdZngEbF2rt5en50X50OKrZk9adrlwikk6fD5uJAhQeO4A7TdNf0s9YEG0CsR2AWA6nEbAH5tGet63rQK4QNW4pCNNJ6F/vmQZxNfpib04LoC4X2V5EKCygwjTxzQ/cVfePeSGeQftmQxHDpwFparAQ71u8mjLq4hvwcppV2eG7N82wRI9PnoAUbhIoKpE8zMM0Ii42OmlnFu2afP2xiU+SGeqx/UYTa+E+N86WaGpzppvY2vWwsASV1ymLOCAfotReO9edviLJVtSE1UiwOals2zb0pFFUFLmyRPBQCquWbP5cM+Mej674cv3e9WeCrfC7g+g+UxFl1uW0wHrMFutK2WUlWWB3t8gp761v+xoBdzM6MJZ8B+pUVcIr/Qnd8u2SQqpuiKxafFOyqwsIfW3begqpQ951PdiaDQkLak5DNKONDTUNSbu2xaNiBW8xtk3ElAwzlAczMcDS2Vm/5LklWVtXJAHhWseMttIKzpTrjBXOOenYBYSXx77tpw61ppDPKBjg0GeAuyqPDsWH3/eB4BelIPK3GCNwG68LpA4oUdfXLIagrOU0ZUZDP2MT449Gk68WQwyEmlUYe7URalFG51V1K6lxymi/9UYqL+XtMToI3e9TnA45ftnszU4WL/cFdQG83PuUldBOCkV0oLJ/37p1S6VeFZUnaVNkIjIJP/JLI6dAzIEzY0CG/wdQSwMEFAAAAAgApXOhTOIPaPZOHgAAqHkAACQAAABhbnNpYmxlL21vZHVsZV91dGlscy9zaXgvX19pbml0X18ucHmtPf1z27aSv+uvwCnTMfWewkvstHeTqTtVbKXRPX+dJDft5Xk4FAlJrCmS5Ydt9c373293AZDgp2TZnkxEALuLxWKxWIDg4g2br72EOaHLGfwmacwfA/bopWuWrr1glcCPnTI75iwIU+bypRdwl4UBu9mm6zA4YUYWeIg+7L1hfhishoynzoAtshRQ+ZZQbd9nydrjvgu4iy174HHiAQ1nzZ37xGSCCfhn+0nI7ABIZRHyYm8ANnDDGPCWns8FN4/8SPKzCoFHloZsE7recot8hVnMwkckEW19L0g/MtdL7IXPT7NAsv/2wY49zOoB1FkYbWNvtU6ZAWwfv3v/7i3891/sEw/+sDceNJSnwG4IFAH6hscbLyHmgd01jzk0ZxXbQcrdIVvGnLNwCe2y4xUfImN2sGUR4bNwkdpegAzbIPBoC+QANsWmJ+EyfSRJBS6zkyR0PBsoMjd0sg0PUjvFGlECCTNArKw/kxj9AVXjctsHesAulqpC6sgQeiLmIE3PQSpDAHL8zEU+VLHvbTxZB6KTOBIgB4SzBNqB3A6ljOGXU+OibOF7yXqI8gXi0OGQmWCmwwPEgrb8ZxizhPvIGtDwgHtqccEhQWE9EQo2laJKMOdxHW7KrfGQp2UWB1AtJyw3BNFRrX9wJ8UcRFiGvh8+YgOdMHA9bFfykbpvDqX2Inzg1CTR7aBHwLHgA/siKrpYFiVrVOAFl5KDqkHOdqlVMfKQpKAHnu2zKIyp0mprTcHElzGbXX+efx1Nx2wyYzfT618n5+Nz1h/NIN0fsq+T+Zfr2zkDiOnoav47u/7MRle/s39Mrs6HbPzbzXQ8m7HrKRCbXN5cTMaQO7k6u7g9n1z9wj4B5tX1nF1MLidzIDu/piolscl4huQux9OzL5AcfZpcTOa/4+j9PJlfId3P11M2Yjej6XxydnsxmrKb2+nN9WwMLJwD4avJ1ecp1DO+HF/NTagX8tj4V0iw2ZfRxQVWBtRGt9CGKXLJzq5vfp9OfvkyZ1+uL87HkPlpDNyNPl2MRWXQtLOL0eRyyM5Hl6NfxoR1DXSwhQgoeGRfv4wxE+scwb+z+eT6Chtzdn01n0JyCG2dznPkr5PZeMhG08kMxfJ5en2JzUTBAs41kQHMq7Ggg0Iv9w2AYPp2Ns5JsvPx6AKozRBZNFSBm71ev9+/TT0fNA50YglK8RjDM+mhK21XnKFaKPPJjknvTgCx11vGoPCWtczSLOaWxbwN6hHoaxL6MLgske71ZP4yC5w0DP1EZXhgp0oZIaiynYaxSifbvCjdRjzp9SzLzoCPGGo7Zf2ayWM/LmTWzxHxa4bx6qc+oEkLLvDem+/fm++gBSCN24QvM58aDzBbaLkdJzy3+GBBlmA0cZjgCDF7N78fAwngzFQkvWAZfnt3x05P2TEUn7QXn2Dxh8byj8d37KdTZpwM2YcBiGzJAPRjj8EfGqtgZZEEEDeNh5QPcwVf8TgvgLQocHywyHk2/or8lD+llC2oUN7CC+x4q3IX2xSljAWXo99mk/8bS2Y39lPi/cV73E94I1cLO+Eiq5k5A7mj+XbQwKNBTIpONs+waA6PgxrXcupu4BzbIypeEsORb6fQpxsTbFycJjhxGP0/7Ae7PxD8498b9j9Cq23/0d4mOHck7OQYSKeJmUMVkoA2GMZ79uOP7OT9gL1l7wWHhVAE0Ul6BFY5BJO8QA8gZGsbDDjKL1waJAL2H6cqfbO1Eny00kFRJUmH/WaENE0Ax3kJ/oFXAMPO56DNBkxWS61F6i/mMCYDJnnNi9N4W4YFIsZvxmCQZ/Inh0cpuwbtXMKcNI7jMC6jvAEJvQUJlTJ3yqguJ0Hrhw970frhpELL5T77DQYwycJ2XQs8DwMtzBB9ECkRsFIjt+qUoI8jbBGOZzRkCIoZpoVUyETAryIuDJAFzkTmcyOwN7ygPhHGSZQNpdTJw4NJVOQyewm2iTKgU8EjDdO8UksRtwTdntZ1NOqIQvINC++AH6EW1oX91/acJ05c0Q+hF+CxpVIxhkxjF/8w08Q8aCL+6HgrXqABXRiNkYYJDlnmpzjQkIQFydB/4MZAIw3yTYmlYVHPUCIOaFwED+E9jDDLSrAus10t3xQONg5Glq188OPQRYeE/RB6boITkxQ1gq3AUwXzVaES8034kAO5KDMvggnGrIwnX7EOKiAsk6U1ojY6Rqn0HxuGRwTYmtSoL4UM8g68BH/OvRT6VHTmjl6ETvHBYw/44+lVGJQ6NYNp09CICtYHZk6l3Ah9btHygDIKHInX7QkWSpXRs0lEoKVYxh87BroGCM3QGqoUqWLHpOAqg09RGVT0FnsvFxYmNEoSt0NzH2w/w3JJyLDUcCZKNQUvahkK1EGVa8otD1fZ22KCEwma4fYfudTJGq0dnSwam9szmZS6rQr0yl0vrk8n2Eqcn7/1JQb4+vCIVcHzXQXu7wCIT8LCoEeFKVx7iOo3qKKWrQZPclcVHJERTIGbmi2IXTAYCSz6fFjEw6QUe+SYemKM1Ugir3flgZYP1meONVQDGm/iAXOwHhp9Ij9PNg/GvOKDxyPW3DkmLTn2usclQjVRF73TRh6AVJvbgRQfBNXACP7VrUEDpqqq3gxFWsLubWSaWr27QTlEm1Sr/O40Zbn9aTVmClKOAmWHNto4N4Utym3KzHu6BLAbO10LH4RXnQHlY4zYBgBhWkrXcoGG3kioFmuJ92TiIEpoXQd+L+4LSL/DFJTEhhvVC0g+R1cKJ+ab8c3Ju2O29AIXSCK6H9rwCEvsVA3ZBW5cbKBydIWJGm3VqcWk+b3YQPJ9mFq9hJaecl1EexFyzy5vUNughVZIsVodHk8FqgxzH4SPgSxGM/Kvf2u1oYOpdRttLQ3Z32Dh6COlRKsQzZ7KR9OXwzToaqnObwWrf2d9sw//K1xYPGKNGkPorJUYUqD1OfR5NRV1YL821zEkbapaPhheervr1ZYFoHFX5RgJVz3TXY2tuZDtrS/aqmClW/cPvm1w6GLbSzgTo4yKjT6NCKHs4M+DuuDWLlaiBrsuUs1ZQZTnt4R2SR1YX+OIsKFhSKaxrdqS4ZmNLHmtwoRKl0ETftEirc+9xAtwH9HhBg0KzQ+trEsFWfi/yQmrW3MCtISUcx+mGLNNbZXDpCKW0sjxEiuynXt71dEDynTi31TQSOMM1N4Tu6MI7qqupjWKJGkW6/SvAMa53KEFC7wOaf8XpAkGMY5x8zeJuMOE0U7ILObI0jyemB8YcMnR0n74Xlv86gzKRq7tJPdOW3qNXDccueC6aUqJoLip0imPaXVkCtn8mXkxvkfwdMGiSPKJgrs6sx3coZ7zB8DIYDbYilGX6MOu0UpgGqklYRY70omn1iC5BE2RneR5vZ6Vz4CnzXOo8m21qZYUepLyTaK739o0i5k0tHH2giFKPqnq2GKpLyRPDioyt7Hje2ROCg3qa3RmCbnix/adGe20Ta7RHS8lvBD/z3MGw0b8pedDYwlebcFiYpFBPoxmKihg5FMnraUN47dGsJyoQOrJFtpeEGWpWHJI3sQKRGc0th+tHE48tFKD+oIGcmBL2nA2dtQtJgmAPy0kQPucRxeBwqT4X+RmxWM3+qIZv3hadOCHWSrlA77YBhwuogB+XhSHDk9aGx/bwYrvEP9TDiQe2khxbe5rICnGpe8t+nI59IFmBCog4oTfTt3NnCay+SuHNsxk7fMn688sTAk98mB9TsLBfHwQRS3Y+sirjMIWjNuEx+eeQ52hPzuh73PakWxlFcEvwEdWqOp5T1TBn0IuUnugP72iIvzl7RhQEgB/2klYuIvORfs7LE4FUk/qtOXmTIkPvaVNsE4YLL1VhO+KyEieUfpGpJsRom3MpcijrYXPTXDuYmOtAmEY4Bl/4cfErCZwy802m62VrmNukzmopssATSTWaRrB/Bjee/wPOxYMYgJHIySw2CyKd1BIhDDwsYKctKBufAtfr6USFzMovQXnJM8xc5BWIkVffJlfXtzkKUKP2jtGMO97UIPiuNRyUdKEyTe251sbDxwGfP2FKJRlXk4ux59KOQhkEtAOQjChrCqUJuUsIiXAdtDaZKDA0PK0TO+ynk00C/AddIMwaCENPlkb9RLSjgrwlV+Z8LyUQwQJqIkQSv7LfH4z4/GDUIF6DnVtIpJNNM5+mZQRahm7KMzIBS7jNOXtouPceM69mDO1x0g8NSH8mfGMYP6XHppAYh7FUsfxsREmCZ17niYF75SetTOq2RzN+nTYnfSePDIEm8vHDjDL9Ww/JPt5nj/JMlOWdaHjWaSCxGdI1cloMF2kEifGSdNVajqTaaWkilwJrpMg6UXBndCTOn8luC6CqUfOy1z8KHTM7cRK7wlc/igsSHZhwUxI54gS0ZGlpKJRZHb2cOAKGvIh792gU4GAAz+MnXUYSouf3p9hzpnKqW3yanxpmN1VbDZ4NKvokTPKUX3UVYWG2Sn9+7KKpvcvUtJlGEhl/Jw/SQJhy3ymUDewJoDpZRFK7bkU6U9hSZs0qO5mVbU7vS/rd7vsdup7Fvtgx8TUD6TV6p22NWlX26xASAzhD3SR5Ljl0ElSQSiSIt1OsotYZ1kHl3G4CNPC8akkJWN6bhOtp40fR47mAokMOUGIRJcbJPGTyjT32+XF9OasmOgkoWKqu8NDd67LHr3ADR8T2g/zlp7D8tcPlbM8eISqD9Anx32xOVXfMfm72jKpcQl40v225KNsy12vp7+qrNGUx53Uy+FiO0i8BJbHHIp3xeU9UfGeuHlTNN+NMvUXDAKjL1QAN5LzWgY9PPUi3jppfNRfrOLuVjWv1xOveU7LO1o1resPes18UalirD/QXrLSGxXxcgWKLH3IHbJlRm8P1Gup8gDGV0C9UgX7b5rRimBKJzHk4NhtFPS1PqwK0sPRBbN/Ji/B9Q9BBhhYSMX26kDkP0IvOBBVh38uboLiPhA3C15ScxYcXHe+j5Rb9f3RrMjPkufjZsGBlUrEl1VrpaFFZ0Y7KGBaY7KK2N4RPMBN++fzRt0Hq6B4eyBumg+W52JmpZn3Wah0aOiAjkjA4uLw3sjJ+9kajwQCnvqhczA6QNibg0wboet99XzsGE/veQ87h/tdeZrvnETKc37nDNfgBRSzdCdm87zdyVevZW7eMQfvcIsHdRdcOuLNPrSeq+S70x0g9/jV3AHhbJfcAcra3x24nV6Myx78caczr6HiBs6huGew+IKROg/D2Vq92Wwc9TmVZr1tbu1+eku4B+ktYXbqbTNfz9VbqSxtelsW8S69zRdqTSR26m3MwTwl6atprqRX1l2Zub/2Al4Y8aBZAVUVre9BYW3i+xYS4C06vIMEvhtxX0IAX4Zj5x4DeKP678AHqGNF4xD8FU+jOHzyWjyXHdhTWXxIy69JaOceHgFpsyA7SKD9OedLG5YiZD++2IHrH9gRSGvKXWLnpXTEa58b8VL5wLYB9tP2JYzQXv8LG3IDBuExjN3L1avQ+Oqla9lfU277m4NojhZJGttOCu3znFGWrl/ayFchRN31KpRU+849fD/7Gg18HUrUwtdj6qX4s5cQwA3lF+G/jP8z21nzF9K4Degw50vFSHbzZZYKoGIOKf5w0AwEUI7P7SCLDsEG77U6+z6jG+3A2b6IAs6dW1jA47HRg/BpdUNv3n3vwIlU0Ljnfz7sR6PZj27zvPbzpCX2Qb60xO30ptu4e64/nTuxrR51Rdi7fGoFXvWqczJ7+NVJFAavuEOsCFY9a5G7v2sN4lSnORrUWtbRopOA6/hhwtfgCR1KAL8Hfwluu0ddoLeNhRZZ7TsYBPqBo0Eg7xgOLQw+fzwo5esYEGVp7x4REr4+JBSh3WOieFP3esNCe/tXHhlFwf6DY4pI6EYU56v2eunYonAdLOypcwWFw9SuwO/WvA5On618ei+361/9re1OFSz3RDu5XYrY+p0qaNBZzO2UM7umafRZRBLZjgocwhO+WeDHVPjNhPqkoYDa+2Q8AhHnxTd0KGntI4KmfUoZmgH9vD3x1I4Q4sl5bE/MYtYTuGLA742c2wfCLjpqXwLlrkUiHZ/0yq8nvh0RxtGQHVG78UE2QzwKpui5oH9090xlb1fwPZWalBWbI+p6EO+hy/Ed7ABDyWzw+5pCKZV6Nb64RxBpKYicrIPCBYjZpRLoYUolohoKe1OvqPTdlgopUKq1+HZqRxyB2idgaMhEbdirTmpZ3579BVu5MqMfgLQyZ010h+y7uM++Y9Tq4QDloX+VbOEHTBYepsfoORY9WVZfK0MlE2X4hGWikCDRJcpiLsplIkcXEPjFjiiGp3KZK3ZOElGuUmWYlR8ubF+CyARCFF+VlZvgbeixqQVQhI9tDdDTTS3IE21NKGW0tEFP45Sd64PtPuCpEguPuWOgJPou+intye6/gt7T+l8MmzKG4aV1Y+ClJpIxBj38AaJVLJ0Hx/Z9jEUHYOqxo34Fgl8n1yu2g62BnY7b0VYfXYN7nJdyLaevae/Fp8cBhQciOlC+iUPxlZaup+pztixYhFngWirKiyEz6gzIAtHZDk1tlsCV3+ydyqBEl5TEubCwsBJe1SYwZBAax0/qtWGRQJ8UHSicN01VD26FKdW6xmEDf43dUW1qAUv/FQEkBi8QQkslIqwCYsjOoF5XcmqIhISVk9Y2hEDSqjJU7AWEVdNhr1WT81hCTT2gzVhg9X/hqYjeJ0sZRiakT3Rl7Ket6pgcBLBQZ2kqJwHkpFERZPQz+tYfQOh7xNxsDXQsaazaMUQrEUNVoBmxRjTdsFUxhW3rQAOACo5m8trxFFAFt7CE7agSpskCoNG659vEwE/0/3b/2GTvgI5rEhBBaPqMZXQWYze+BGui4NHpul0EBFQTPu5o7cYXUDr+g8cfsVm67ITOoIYDUh9L+xq0aEQ7vCjXMYjrdgQqBviyQdvdKSSOuOiT53WJwNZ75FkdItC1/nhWdwhsrTf27QwF8awOyWGe0Sk5COLkNk4JHBaLUzUhs9y3AL9QxC2jNpBhc0VIVjvemnhOVKcjWNpBSbZtFy3iVDe2XTQNYE9FYoJ1ohfvpk899Tz63wTrdx1V1C3RwmiY/xJTHGwz+nhuKXj7Xl+0ZY0YlIGxD9foMMD/lKEiuKRx5ogIel6QHuOpOhEPEXLNGf0Y/Z8+9QcmLqhlTb6OhijHgKurD3aBNLjv5LnmwOVPdGZPhwMYBBUQAK0A8Fnn0gsppb6SRYjQVCkq+oSYqkQmhHuMQaDi9Axm0nT8Z2b76CJX84QnLQ+Ml0Jrvr9jP56y99qxa4E7pQgDU77iTwVBLTPq1zAqsAR1aafOmktPvhzFYs+K9qqnX7GnzapFGW/Y1zC+t2PheWCk39QOXNsHB4stQAUS8KzWO1ROBto0EjPmkW873IiP/vnPoyHDH3gY0FFOgrF44tiR2r7ItVQ8VNUSs4o2SL0zFg0chLEL+d/e3emmOFdBY5Eth8xrQcuW37y7QU0n86/B8dAaxj028s+F8YPKaIjYA11rSwqq6W6hqyqzrMsdKkt7AZrKPk8b99LE3NYthuinIrPMJ8PmkwOal2dUjp+tlcrlhkulCTI+yN/seJWIiRCfZA9UQlcJ0JoUBkYVu1SX1tyDK9No7KrtZfW01qBPAvyJO5YWWlDs4ajvzEH6CKDb/5jTZo2RRnJKG7J0UY20VNsegipFEMPWsGYqxmEaaTF3dFRYIOHxC44GAkM5i5hG6aIhtCztJgkkjFpT4BnpokxcA83zl16AAV7KdBV7eUyXvKkLlVuxfyRZQ2wXQafgciCxZGw+yw8dmShHsRkDUpZyeXsAzPfFbrSph6nxlopis0SXsQwoBnMN7stS2nhfbryicCrAzaVas1TlL9ht7ztZXtCBdJUMzuhUXOwI+jtI52Qlo9peIncMEAf+MiFhemkjwJR4haVQ8BYi7KXAJeUVClKC7TUqSaEH0kI1zfQYQ/tUxNA+Hnxs4A1rs3D71pDV4bMlXMcG7jCiWQ7QLMQmDdfyxF5xQaS5cSXtp/ZR5zW276fXa96r8KmPyT0YqMqs14tg2kw7bSRB9MUO0QA7XqKUOgTrF/k1m1wyAXihQsAf3ybp1ucCo9jAQWcpD034gUITHpvfl0zDMgJWBWEzCiOKf4TvfLGzktQNs7QUHQ3Am/WGppbyZhbG3ueGa6f2oGbe0RhrXxUi0FCLud4QARxBxDpAkCwBvGETEcqM7ihZ2xj9kdGyxKMLUWiBopxAEbLRS80qU4bG0RLGMBIboNRqzBBClX1Jvh1jGZmKJzUhVWy6+qNXWImmRMhOX+TmmlNjaKnwOsOb5rT74kqSfpuo8Uet7TTWh5JCmQEA0Po7L3q0A9x29OSW22eMjpUXJryqfJBTax5aDh7pAqtpk9YVAFr0RF0AFX7mcVaen8lQVZSTSCalIMzqTwx93PWVL6GQ002WpBgoFFnFi0lsebdAX4sOGLiVlnMKEVBrOQLu2XIAfe2WE8k9W46c7tFyqEQ0u2ne0ch5ARhUz8W9Gjx1zAAlE4Fa8a4aNHPGoEwWedcbWXFxkJ14RV5Srfa6MAGmS5j7CFT9LWJu3+uMtjMJhtz3Au1iCKP/z6BfHmjiRIIGwfpdUScLkkiqkRJQaBhsdXUTw5WwGlS0Di+UXDJQTDjQEd5Q9QUPoFdjOyWJJ/V5wmtQZDIzwExZLiIbqGjGh7KAjUGre/UjuR8nsmJLzJ+ncuYtljE7Z2J9GgWb3TmNLv0sWVdnXcwDeDKQBahgqL4q0yQkqOHUvuwwkmCeCdDQt0ulbzsUBwLEEMR5k15+ysskBs2u6btCeB8GhcfyGNtRYuD/EYbRhKWltwq4e1rsVHydjm5uxlNrNJtNfrnCG4xm1XMTWeTi3VsNSLc356P5eKbJXVWKQcqrL82WpS2SFtaGqroB4JfRYQkpwekwj3xucHtYLeKvZEnzJ6n6OkNyBU/rToxrTW8JDXyC7iZ/qDizkZ9Uwnz5PpFcGZvlqLlvJ2+ciEVIU3XvRIgLzci3A7o65CO5TAv82oF5LrdRffAqN/sea6E4a5JWTp+GL1p3nz/AEg2vWpNvs+XdW/IOMzovRRttCQbhxtd7eUTYN1St7WBkVI1zKhPUCkmk2mEt1d/46vPRsgzHT1R4e4xKSzHLIMcdNLmmRFMcB2E5YEH3Z6pOvGOoVBZBQ2w84NFcYXttpo6r1Vy6FgUbaKoW5e0esqOU45Yd3ghEOUdDZgyG7F//Lp0fKmkMPWnqQqJ0uROKFwDYc/RyW16A160/+sAqv/YOY28FayByIaEkP9tgYhhA/QIVPxSvSxU8GcYjy6ICyzoqu3gEva+Th8Btnomq9xs93JXHNO4eU/XAEB0vxUSdRsEzWuccY1AXAgEcKRkcVV24GtwjOAMxXzaAapoj+lQIVxw3G0pRkwJhMidc0iZld4SKiCvDrGPlbFhF6Hrjvqws9DvStIWGsLiuES+4URQsmmmgD/GCEhkHOmEZRczPV5pE7FbPO8F7blRkcbzWUoXixzNbEW1Nl+9jU4oJfb7y5QYXtn0oeYKiMhOaFOh+HNwDplD8UeRv1XU5eePENYWa3VEioM3O49KWyZGs6Eh4x9UjPU1+7K+4FyAd2Z87eoE5doBEwWVGPj3usvpqrPTXB9a/SwDBsbOEK6kGR6qvlFSMgdln33XTUu1QEaKr+UWfn+Z5RLwGKTrilPn2ZuHaFBX7o4qNnVMBluRi8ihLl2//+6ikuUSqJ+4DxWhLKZd3TmGMnzz6tryz7k350lRbnFtBf1Pd5ymDmeMJygjDlmeRypKHzYkIGWANHHytUI9+XjvKK6dT8RrqZnzD8MIIVDN8/vD9e8IgZELKD4ti8G4R95yd/PAD+3nKKWyUOyIvBFuGXpbcTwVBkQsJco04nksc1O2iKjPz2y2shNuxs6bNVHH3pWJa8sZ+vlX3r/6qrl99w+RBUBiVMFoT76nhdg20tF5Ax6D5lgnFi+IQ8DeJKXuCroxdo/EJyJoDIfI+xHl+ccK0iHAv7/rkgQz+z12TGTNQhIhcI9JpvP0USZPJDjccQO0EOj/35ZFR6h8hEbmuyAOilxYXJfjShXlfeMyPkrwGH+Z3vLaDJ8lHNgqEWPpq4umr+0RJUEJnNniHqUYQxvIS2iRmWXpniTKCWoBBMF+PNOhhvNLwLaa0AeoqXceLLdHo5dXV+kXrFq3/FEWwoL5PN/VpxKCQx3ivLZ4MUJIiU5vfC6lbRWkByQszFPwgNxkUv6wp7Hy/cQssP2ctri8phkdlEqe3+XqHffPKs3ixokZQrdPxDlWxyTtE/4gko98RQxalIkeG3zaZvVKFJimya+RHwwe9/wdQSwMEFAAAAAgApXOhTDSE8t7lAQAA8AMAACwAAABhbnNpYmxlL21vZHVsZV91dGlscy9wYXJzaW5nL2NvbnZlcnRfYm9vbC5weYWSTY/aMBCG7/kVo12hOBIbYC+VkDiwW/aEulWhvUYmmcCsjB3ZTgv76zt2gIbSjyjyx9jz+PU7vodn0xwtbXd+Co/jyYchzLWjjUL4bM0blj65hxXtG0U1YQVPq4+wpBK1QxAOEVQ3cSN32VRsXJX7gwdjYed946ajkWl4l2ltibmx29Eli3kPjw/PSrYMzJKktmYPslOQ703VKixaT8rljg7ARxjrYUNa2mPhjw0OwePBx+E/couw6ZztTZwmSfL0+rpczD+tivWXrwuYAQPegywvRHpMh5Ae0YXO6NBOQuNti7HnZsJ/Ph7CmmNZ9ov2Ml+ufsdFgjaRVtehG4emlspFXggxahx4LyHYAzLqSmneajJaXJ/H3iUV1rAxRqHU4rtULbvjvKXSz6LEaQL8UQ3kSDsvdYnnbSHrtB4+i761GuJiEqPa2L1U9M7VjVHW1K3+FSkuhRn2C5b1jvkD9FScMwStNdbNUtfyYCs9FsYW3Z3SLFfmB1qR5SHQiCw5i7nhkr528OamwZ8YQ/W//K66/LS18Sd7b3Cxgp0cK4lf9ppvvgh3EXfrHXbWQTpwKRsXQTLEqDqXLwf41p87llCqtsIpDNwdDNjdvlEZvyD+8zdDWlhsrKAMatZIfekZv6mfUEsDBBQAAAAIAKVzoUwAAAAAAgAAAAAAAAAoAAAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvcGFyc2luZy9fX2luaXRfXy5weQMAUEsDBBQAAAAIAKVzoUzGcREl6AYAANYPAAAiAAAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvcHljb21wYXQyNC5weaVX227jyBF911cUPA+Sshx6ZpJZbJRMEFqibSKyqJCUvcZiIVBiy2oML0I3aVtY7L/nVJOUKI+9GSR6MMnurlNVp27tdxRtpaZ1kQjCcxerkooNObmWq1RYtKpKXo9zknkidgJ/8hLHs12R483uvasBWFCuqzRWtJGpIJ3L3U6UFiQTKvlEd5mKPN1bDHwRTiiVa5FrkTDYTZFUqdC0Lyp6UrIUVGmZP9QQB9CnrVxvWVxkK5EkIqFkn8eZXMdpuqfVvrUfgLqUaUorkRaMUgBIUFyV20Kxn/yVGZW1pVm8p1hr+ZDzlsSZp7y1D2CNPLufCtj2VKivsBo742K3V/JhW9JgPaRPHz7+aFFU6K0s6F+Vissik/T38mvz+s+4ts8G0j9ek/5s0U2sZKXpSiSZzGNttAQikbpUElGRRW4srjQCl5MuKrUWZmWF42pPm0JlGlTJcktwlp8Fgglv5QZEMYAFyFgJ2gmVybIEjTtVPErms9zGpfF1U6Rp8cQhWBd5IllMG6FMlCNjFH5/emGZZm4bk0xqZZUuSYkylnkdgVXxyFuN2zUKUV4giRAKE+0UgIzT1WuSqWsUtK7TWGZC2W+ZApUdSlpT4GlSwbxXrGnNaK36X6yhxtGkWFcZCsXw3QJC8JzzDwcUUq4USsapPpJvYmakO47UeRZdeyGF/mV05wQu4X0e+LfexJ3QxT02XRr78/vAu7qO6NqfTtwgJGc2weosCryLReRj4cwJIXnGG0B0Zvfk/jwP3DAkPyDvZj71AAf8wJlFnhta5M3G08XEm11ZBAia+RFNvRsvwrHIt4zaRgx4R0HyL+nGDcbX+HQuvKkX3RtjLr1oxtouoc6huRNE3ngxdQKaL4K5H7rErk28cDx1vBt3woH1ZtBK7q07iyi8dqbTV31l+088vXBhqHMxdWtd8HTiBe44YpeaNwM+BoOwcWpROHfHHr+4P7twyQnurQY1dP+9wCFs0sS5ca7g3+CPiQE0ojNeBO4N2w02wsVFGHnRInLpyvcnhvDQDW69sRv+jaZ+aDhbhK4FHZFjVAMChGEb7xeL0GPqjNGRGwSLeeT5syH8vwM5sNOB8MSw7M+Mw+DJD+4ZlpkwQbDo7trFesC0Gr4cpiQEb+MI0J2D0Akio46nNHOvpt6VOxu7vOszzp0XukNEzQv5gFcrvnOgdWHcxhnAwrL6o5PBlokpeZfkTG49Nr0+TsiD0GtyxhA3vm5ot3s9iemDOaX3utfrJWJDD6Jciue12HGhDIajHlfZ2dnZlaib2LpSiufW4QxA+Mhx9uVCJJr7O7d0jCea79Evc/pk/wUIqqgetvRn+9lCV6MnAMY5mgQ331pVDUxui2+RGJ3RINyj8p9dpbjaD5jAGaLHqNdFMX/+SBgGvf9kfx7aRtrLdSnihE0yfY2nwWhUO8e/l+Cjw47ZpS8vqeu11JknOnalcibaxpmlzDfFYPjLx197vVLta6x3R8N+/MGsbFSRwQncG+owpZjiKk6X4jFOe41BntkyvrUwMTflNF4Lbpfcqk8E64HEwdF1e9wd6LCNxhZmW5Y7PTo/z2KZ2vUhu1AP5zvJUw6L5/Xie27o558+fPjr+xAW8UVCnX/+/PGnnz7Y2zJLG7jjTUNr2CXNBSNGQpXnO/goGm9zErFKJdr5ADwM6VEozbHEwKjV9XXrpi5h1pEovkvgZqRatrBv8W2qyawGvr4t1DeVJcZBqm0tn1uZerwty/0Os1PmpXgQynxh/vMAyR/ar1I8l+a9jjPXTpflQY5aWBZqWUsNj+nSZgT/wngjwAILVBhexvvnnQJD7DJD8IUjblTzxOS5z69xkyyd/GzlbK5G0cpA3uAcRiJfzPjSyGi6GcVc2cfhe8Bs8rHxixGrNdJY6FGDDh7yiuPNhFS4yeHJ2YBHItf8oFVRpAK0WwdUHvYz3HntVylZanCyxBUUV9cv9FufT/ZHRsCifqQq/uIHvi4x6/nTPH8/QMgN0kyinuN8LV4E4jSMw9MqPj0K9SZ7vkFA8ogvfQ5af/idSjlX3UOE/pva0wWbP4+NiDNtidihMEqj5wXat4bU6scIdzk07L/Ytzn9cGpwyGnrtBA2aRGjlurLOjruD6el8cKATrs7op92y/QtIyNOorfxTI4Nsng3aBmwah38Rw+H36llihR9Wwkn8P+vY4L8f1sHV8fgoGDwdWgdY/o4HJqm/dWiR778GuUowUwPvlv7DPXzinYcr13BNkN3iu3bwx1zO+d+OQD8+p22LDiTwmr1NhvvT/LZ5lZ26qiKJcbxLSeSmXSDfhan/D8I+lldJJ1KbG3uYnY6ca+3XGL2LJcotEH/ZGj30VO6TRyg/wFQSwECFAMUAAAACAClc6FMLUFLn2EAAAB3AAAAEwAAAAAAAAAAAAAAgAEAAAAAYW5zaWJsZS9fX2luaXRfXy5weVBLAQIUAxQAAAAIAKVzoUydxfFrNwAAAEgAAAAgAAAAAAAAAAAAAACAAZIAAABhbnNpYmxlL21vZHVsZV91dGlscy9fX2luaXRfXy5weVBLAQIUAxQAAAAIAKVzoUxnrogIBCoAAAOaAAAVAAAAAAAAAAAAAACAAQcBAABhbnNpYmxlX21vZHVsZV9hcHQucHlQSwECFAMUAAAACAClc6FMC0mNiScyAAAkpQAAHAAAAAAAAAAAAAAAgAE+KwAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvdXJscy5weVBLAQIUAxQAAAAIAKVzoUzCst0/7w0AAMkwAAAdAAAAAAAAAAAAAACAAZ9dAABhbnNpYmxlL21vZHVsZV91dGlscy9fdGV4dC5weVBLAQIUAxQAAAAIAKVzoUw8uWpyFG4AAFe+AQAdAAAAAAAAAAAAAACAAclrAABhbnNpYmxlL21vZHVsZV91dGlscy9iYXNpYy5weVBLAQIUAxQAAAAIAKVzoUziD2j2Th4AAKh5AAAkAAAAAAAAAAAAAACAARjaAABhbnNpYmxlL21vZHVsZV91dGlscy9zaXgvX19pbml0X18ucHlQSwECFAMUAAAACAClc6FMNITy3uUBAADwAwAALAAAAAAAAAAAAAAAgAGo+AAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvcGFyc2luZy9jb252ZXJ0X2Jvb2wucHlQSwECFAMUAAAACAClc6FMAAAAAAIAAAAAAAAAKAAAAAAAAAAAAAAAgAHX+gAAYW5zaWJsZS9tb2R1bGVfdXRpbHMvcGFyc2luZy9fX2luaXRfXy5weVBLAQIUAxQAAAAIAKVzoUzGcREl6AYAANYPAAAiAAAAAAAAAAAAAACAAR/7AABhbnNpYmxlL21vZHVsZV91dGlscy9weWNvbXBhdDI0LnB5UEsFBgAAAAAKAAoABAMAAEcCAQAAAA==\"\"\"\ndef invoke_module(module, modlib_path, json_params):\n pythonpath = os.environ.get('PYTHONPATH')\n if pythonpath:\n os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))\n else:\n os.environ['PYTHONPATH'] = modlib_path\n p = subprocess.Popen(['/usr/bin/python', module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n (stdout, stderr) = p.communicate(json_params)\n if not isinstance(stderr, (bytes, unicode)):\n stderr = stderr.read()\n if not isinstance(stdout, (bytes, unicode)):\n stdout = stdout.read()\n if PY3:\n sys.stderr.buffer.write(stderr)\n sys.stdout.buffer.write(stdout)\n else:\n sys.stderr.write(stderr)\n sys.stdout.write(stdout)\n return p.returncode\ndef debug(command, zipped_mod, json_params):\n basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')\n args_path = os.path.join(basedir, 'args')\n script_path = os.path.join(basedir, 'ansible_module_apt.py')\n if command == 'explode':\n z = zipfile.ZipFile(zipped_mod)\n for filename in z.namelist():\n if filename.startswith('/'):\n raise Exception('Something wrong with this module zip file: should not contain absolute paths')\n dest_filename = os.path.join(basedir, filename)\n if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):\n os.makedirs(dest_filename)\n else:\n directory = os.path.dirname(dest_filename)\n if not os.path.exists(directory):\n os.makedirs(directory)\n f = open(dest_filename, 'wb')\n f.write(z.read(filename))\n f.close()\n f = open(args_path, 'wb')\n f.write(json_params)\n f.close()\n print('Module expanded into:')\n print('%s' % basedir)\n exitcode = 0\n elif command == 'execute':\n pythonpath = os.environ.get('PYTHONPATH')\n if pythonpath:\n os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath))\n else:\n os.environ['PYTHONPATH'] = basedir\n p = subprocess.Popen(['/usr/bin/python', script_path, args_path],\n env=os.environ, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n (stdout, stderr) = p.communicate()\n if not isinstance(stderr, (bytes, unicode)):\n stderr = stderr.read()\n if not isinstance(stdout, (bytes, unicode)):\n stdout = stdout.read()\n if PY3:\n sys.stderr.buffer.write(stderr)\n sys.stdout.buffer.write(stdout)\n else:\n sys.stderr.write(stderr)\n sys.stdout.write(stdout)\n return p.returncode\n elif command == 'excommunicate':\n sys.argv = ['apt', args_path]\n sys.path.insert(0, basedir)\n from ansible_module_apt import main\n main()\n print('WARNING: Module returned to wrapper instead of exiting')\n sys.exit(1)\n else:\n print('WARNING: Unknown debug command. Doing nothing.')\n exitcode = 0\n return exitcode\nif __name__ == '__main__':\n ANSIBALLZ_PARAMS = '{\"ANSIBLE_MODULE_ARGS\": {\"_ansible_socket\": null, \"_ansible_shell_executable\": \"/bin/sh\", \"_ansible_no_log\": false, \"_ansible_version\": \"2.5.1\", \"_ansible_diff\": false, \"_ansible_selinux_special_fs\": [\"fuse\", \"nfs\", \"vboxsf\", \"ramfs\", \"9p\"], \"_ansible_check_mode\": false, \"_ansible_module_name\": \"apt\", \"_ansible_debug\": false, \"name\": \"tree\", \"_ansible_syslog_facility\": \"LOG_USER\", \"_ansible_tmpdir\": null, \"_ansible_verbosity\": 0}}'\n if PY3:\n ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')\n try:\n temp_path = tempfile.mkdtemp(prefix='ansible_')\n zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')\n modlib = open(zipped_mod, 'wb')\n modlib.write(base64.b64decode(ZIPDATA))\n modlib.close()\n if len(sys.argv) == 2:\n exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)\n else:\n z = zipfile.ZipFile(zipped_mod, mode='r')\n module = os.path.join(temp_path, 'ansible_module_apt.py')\n f = open(module, 'wb')\n f.write(z.read('ansible_module_apt.py'))\n f.close()\n z = zipfile.ZipFile(zipped_mod, mode='a')\n sitecustomize = u'import sys\\nsys.path.insert(0,\"%s\")\\n' % zipped_mod\n sitecustomize = sitecustomize.encode('utf-8')\n zinfo = zipfile.ZipInfo()\n zinfo.filename = 'sitecustomize.py'\n zinfo.date_time = ( 2018, 5, 1, 5, 29, 10)\n z.writestr(zinfo, sitecustomize)\n z.close()\n exitcode = invoke_module(module, zipped_mod, ANSIBALLZ_PARAMS)\n finally:\n try:\n shutil.rmtree(temp_path)\n except (NameError, OSError):\n pass\n sys.exit(exitcode)"
},
{
"alpha_fraction": 0.782608687877655,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 6.666666507720947,
"blob_id": "72553195615a19a917c9b58ab6c49008b1af6361",
"content_id": "cc9c5a8be88e1ff0c3dc5a0dedba95f1a402d575",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 23,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 3,
"path": "/sample/requirements.txt",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "rpi.gpio\nbottle\nsmbus2\n"
},
{
"alpha_fraction": 0.5150943398475647,
"alphanum_fraction": 0.5320754647254944,
"avg_line_length": 17.89285659790039,
"blob_id": "77c6f1472a137af149ee9522d6c94101689b5799",
"content_id": "0d915b4c56e8d9bb3cdd99fbecd72bc7f6e09811",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 28,
"path": "/IoT/shutdown.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!usr/bin/python\n# coding:utf-8\n\nimport time\nimport RPi.GPIO as GPIO\nimport os\n\nGPIO.setmode(GPIO.BCM)\nsd_button = 8\n\nGPIO.setup(sd_button, GPIO.IN)\n\nwhile True:\n counter = 0\n\n while True:\n #ボタンの状態を検出\n sd_button_status = GPIO.input(sd_button)\n if sd_button_status == 1:\n counter = counter + 1\n if counter >= 50:\n print(\"シャットダウンします...\")\n os.system(\"sudo shutdown -h now\")\n break\n else:\n break\n\n time.sleep(0.1)\n\n"
},
{
"alpha_fraction": 0.4406779706478119,
"alphanum_fraction": 0.5762711763381958,
"avg_line_length": 10.800000190734863,
"blob_id": "62dd7ae1be5d2c06d45659157d7bcf71e0b1bd09",
"content_id": "3aad91778b37d3d7252157176db1d8d7791160a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 10,
"path": "/IoT/RPython00.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "# coding: utf-8\nimport math\n\n\"\"\"\n0J02017 杉野和馬\n\"\"\"\nprint(3 + 4 *5)\nprint((3 + 4) * 5)\nprint( 2**3)\nprint(math.sqrt(2))\n"
},
{
"alpha_fraction": 0.5059880018234253,
"alphanum_fraction": 0.5508981943130493,
"avg_line_length": 17.55555534362793,
"blob_id": "42a4459059676c1e84530713ce0f43b9a19d8990",
"content_id": "b5e1ab700b86fe734dc8d7810caec570c0b4e689",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 668,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 36,
"path": "/IoT/0j02017-2.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!usr/bin/env python\n\nimport RPi.GPIO as GPIO\nimport sys\nimport time\n\nLED1=21\nLED2=20\nLED3=16\nLED4=12\nchannels = [LED1, LED2, LED3, LED4]\n\nSW1=7\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(channels, GPIO.OUT)\nGPIO.setup(SW1, GPIO.IN)\n\nstatus = False\nwhile 1:\n if status:\n while 1:\n GPIO.output(channels, GPIO.HIGH)\n time.sleep(0.15)\n if GPIO.input(SW1) == True:\n status = not status\n break\n else:\n while 1:\n GPIO.output(channels, GPIO.LOW)\n time.sleep(0.15)\n if GPIO.input(SW1) == True:\n status = not status\n break\n\nGPIO.cleanup()\n"
},
{
"alpha_fraction": 0.6377171277999878,
"alphanum_fraction": 0.6972704529762268,
"avg_line_length": 13.925926208496094,
"blob_id": "abfe3b4864f09d4f9de891574d890e0a26f02225",
"content_id": "b605996ecebd555b6cfcdd4c95a65e975ab7d0d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 403,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 27,
"path": "/IoT/sw1-1.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "import RPi.GPIO as GPIO\nimport time\n\nSW1=7\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(SW1, GPIO.IN)\n\nLED1=21\nLED2=20\nLED3=16\nLED4=12\nchannels = [LED1, LED2, LED3, LED4]\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(channels, GPIO.OUT)\n\nwhile 1:\n\tprint('SW=', end='')\n\tprint(GPIO.input(SW1))\n\n\tif GPIO.input(SW1) == True:\n\t\tGPIO.output(channels, GPIO.HIGH)\n\t\ttime.sleep(1)\n\n\t\tGPIO.output(channels, GPIO.LOW)\n\ttime.sleep(1)\n"
},
{
"alpha_fraction": 0.5356768369674683,
"alphanum_fraction": 0.570304274559021,
"avg_line_length": 18.4489803314209,
"blob_id": "e7ec98d2f7ea1ba74ee403ad32f968af61f70a65",
"content_id": "b939a953900c15e5389ee4167bfe30e938af65ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2280,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 98,
"path": "/IoT/sound/sound3.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8;\n\nimport RPi.GPIO as GPIO\nimport pygame.mixer as pm\nimport time\n\n#PINの初期化\nSW1=12\nLED1 = 21\nLED2 = 20\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(SW1, GPIO.IN)\nGPIO.setup(LED1, GPIO.OUT)\nGPIO.setup(LED2, GPIO.OUT)\n\nGUIDE_SOUND = \"Guiding-Crosswalk01-1L.mp3\"\nVOICE = \"20180607030425_5666.wav\"\n\n#mixierの初期化\npm.init()\n\ndef manage_state_LED(LED_id, state, second):\n \"\"\"\n LEDをtime秒間stateにする\n\n @params\n LED_id: LEDの番号\n state: HIGH or LOW\n second: 秒数\n \"\"\"\n #LEDの点灯または消灯\n GPIO.output(LED_id, state)\n #スリープ\n time.sleep(second)\n\ndef turn_on_off_LED(LED_id, second):\n \"\"\"\n LEDを点滅させる\n\n @params\n LED_id: LEDの番号\n second: 秒数\n \"\"\"\n count = 0\n while(1):\n if(count > second):\n break\n\n GPIO.output(LED_id, GPIO.HIGH)\n time.sleep(0.1)\n GPIO.output(LED_id, GPIO.LOW)\n time.sleep(0.1)\n count = count + 0.2\n\ndef music_load_play(filename, loop_num=0):\n \"\"\"\n 音楽ファイルをロードし、再生する\n\n @params\n filename : ファイル名\n loop_num : 再生回数 ※ デフォルトは0(1回再生)\n \"\"\"\n pm.music.load(filename)\n pm.music.play(loop_num)\n\ntry:\n while(1):\n #赤色信号の点灯(default)\n GPIO.output(LED1, GPIO.HIGH)\n\n #SW1押された場合は信号変化処理に入る\n if GPIO.input(SW1) == GPIO.HIGH:\n #2秒後に赤色信号が消灯する\n time.sleep(2)\n GPIO.output(LED1, GPIO.LOW)\n\n #青色信号が点灯し、2秒間歩行者誘導音が流れる\n # 音源ファイルのロード\n music_load_play(GUIDE_SOUND, -1)\n manage_state_LED(LED2, GPIO.HIGH, 2)\n\n #青色信号が点灯した状態で警告音が流れる\n music_load_play(VOICE, 1)\n manage_state_LED(LED2, GPIO.HIGH, 3)\n\n #青色信号の点滅\n turn_on_off_LED(LED2, 4)\n\n #青色信号の消灯\n GPIO.output(LED2, GPIO.LOW)\n #赤色信号の点灯\n GPIO.output(LED1, GPIO.HIGH)\n\n time.sleep(0.2)\n\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n"
},
{
"alpha_fraction": 0.667664647102356,
"alphanum_fraction": 0.7215569019317627,
"avg_line_length": 12.359999656677246,
"blob_id": "bcd09d649cf7b25c90772bf886304fa66b9af06a",
"content_id": "18ccedfa260fa695b901ca77db6c41fd273c65c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 25,
"path": "/IoT/led2.py",
"repo_name": "sugino0708/IoT-kadai",
"src_encoding": "UTF-8",
"text": "# coding: UTF-8\n\nimport RPi.GPIO as GPIO\nimport time\n\nLED1=21\nLED2=20\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(LED1,GPIO.OUT)\nGPIO.setup(LED2,GPIO.OUT)\n\nwhile 1:\n\tGPIO.output(LED1,GPIO.HIGH)\n\ttime.sleep(1)\n\n\tGPIO.output(LED1,GPIO.LOW)\n\ttime.sleep(1)\n\n\tGPIO.output(LED2,GPIO.HIGH)\n\ttime.sleep(1)\n\n\tGPIO.output(LED2,GPIO.LOW)\n\ttime.sleep(1)\n"
}
] | 20 |
carlosqueiroz/tt | https://github.com/carlosqueiroz/tt | 6b7ad5a1a27a66cd865474c2df3e0268f1188e62 | 6957d924cdd2a99fdce0b55822571bbc0bef8728 | bb5bfa3effd4b92c48a85e4a55ceabb6ef4f62ae | refs/heads/master | 2021-04-28T18:35:53.373437 | 2018-02-17T17:55:28 | 2018-02-17T17:55:28 | 121,877,257 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5030364394187927,
"alphanum_fraction": 0.5774291753768921,
"avg_line_length": 29.399999618530273,
"blob_id": "6ffb5222d11c49d440bf7b0eb3a5658778531580",
"content_id": "7b0d54957a9439b9399f3d88a4518520fd39f836",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1986,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 65,
"path": "/coordinatespt.py",
"repo_name": "carlosqueiroz/tt",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport json\nimport numpy as np\nimport numpy.ma as ma\n\n\n# Receber dados do PHP\ntry:\n data = json.loads(sys.argv[1])\nexcept:\n print \"ERROR\"\n sys.exit(1)\n\n\n# Função para calcular a rotação HFS\n\n\ndef get_coordinates(gantry, coll, couch, SAD):\n\n # Rotation no eixo X (Collimator)\n collimator_matrix = np.matrix([[np.cos(coll), np.sin(coll), 0],\n [np.sin(-coll), np.cos(coll), 0],\n [0, 0, 1]])\n\n # Rotation no eixo Y (couch)\n couch_matrix = np.matrix([[np.cos(couch), np.sin(couch), 0],\n [np.sin(-couch), np.cos(couch), 0],\n [0, 0, 1]])\n\n # Rotação no Eixo Z ( Gantry )\n gantry_matrix = np.matrix([[np.cos(gantry), 0, np.sin(gantry)],\n [0, 1, 0],\n [np.sin(gantry), 0, np.cos(gantry)]])\n\n\n #order: collimator, gantry, couch. \n #00852 0x300A:0x012C IsocenterPosition DS 50 [100.959176612292\\52.8287469269864\\-6.4116182178251]\n # Rotated Source point (1st gantry, 2nd couch)\n sourcePoint_bev = np.matrix([[40-100.959176612292, 30-52.8287469269864, -20+6.4116182178251]])\n #sourcePoint_bev = np.matrix([[40, 30, -20]])\n # projeção daria 4.0 x e 1.89 no y\n\n resultT = sourcePoint_bev * collimator_matrix* gantry_matrix * couch_matrix\n x1 = resultT.item(0)\n y1 = resultT.item(1)\n z = resultT.item(2)\n X = (x1*SAD)/(SAD-z) \n Y = (y1*SAD)/(SAD-z)\n result = np.matrix([X,Y])\n # Convertendo a matriz Nympy para lista (lista pode ser convertida em JSON)\n resultList = result.tolist()\n\n return resultList\n\n\n# Chamando função e Passando parametros recebidos.\nnew_coordinates = get_coordinates(data['gantryAngle'], data['collAngle'], data[\n 'couchAngle'], data['SAD'])\n\n\n# enviando resultado para o PHP\nprint json.dumps(new_coordinates)\n"
}
] | 1 |
apragacz/functoolsplus | https://github.com/apragacz/functoolsplus | ae978d9e60bbb07e487d46459b912f7cae1ab7b6 | 9d26666a2d017b25dd0e031ddde1570f5a4bffd3 | 4bde0b9130f99f9a72301557823321889f01194b | refs/heads/master | 2020-04-02T07:37:14.909295 | 2018-11-18T20:26:39 | 2018-11-19T00:19:17 | 154,205,865 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5990961790084839,
"alphanum_fraction": 0.5990961790084839,
"avg_line_length": 25.939130783081055,
"blob_id": "01104dd37d56bd2da5648f6babb305aa43b5fbbf",
"content_id": "4a4d6f596d31d515b7859f7926b66bfabe0c2793",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3098,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 115,
"path": "/functoolsplus/hof.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from collections.abc import Iterable\n\nfrom functoolsplus.abc import Filterable, Foldable, Functor, Monad\nfrom functoolsplus.utils.implementations import get_impl, provide_impl_for\nfrom functoolsplus.utils.singletons import Missing\n\n\ndef map(func, obj):\n return _generic_higher_order_func(Functor, 'map', '__map__', func, obj)\n\n\ndef filter(func, obj):\n return _generic_higher_order_func(\n Filterable, 'filter', '__filter__', func, obj)\n\n\ndef fold(func, obj, *, initial_value=Missing):\n return _generic_higher_order_func(\n Foldable, 'fold', '__fold__', func, obj, initial_value=initial_value)\n\n\ndef flatmap(func, obj):\n return _generic_higher_order_func(\n Monad, 'flatmap', '__flatmap__', func, obj)\n\n\ndef _generic_higher_order_func(\n abc_cls, name, method_name, func, obj, **kwargs):\n\n if isinstance(obj, abc_cls):\n obj_handler = getattr(obj, method_name)\n result = obj_handler(func, **kwargs)\n if result is not NotImplemented:\n return result\n\n obj_type = type(obj)\n try:\n impl_cls = get_impl(abc_cls, obj_type)\n except TypeError:\n pass\n else:\n cls_handler = getattr(impl_cls, method_name)\n result = cls_handler(obj, func, **kwargs)\n if result is not NotImplemented:\n return result\n\n raise TypeError(f'{obj_type.__name__!r} does not support {name} interface')\n\n\ndef unit(cls, value):\n if issubclass(cls, Monad):\n result = cls.__unit__(cls, value)\n if result is not NotImplemented:\n return result\n\n try:\n impl_cls = get_impl(Monad, cls)\n except TypeError:\n pass\n else:\n result = impl_cls.__unit__(cls, value)\n if result is not NotImplemented:\n return result\n\n raise TypeError(f'{cls.__name__!r} does not support unit interface')\n\n\n@provide_impl_for(Functor, Iterable)\n@provide_impl_for(Filterable, Iterable)\n@provide_impl_for(Foldable, Iterable)\n@provide_impl_for(Monad, Iterable)\nclass _IterableImpl(\n Functor,\n Filterable,\n Foldable,\n Monad,\n Iterable):\n\n def __map__(self, func):\n cls = type(self)\n return cls(func(item) for item in self)\n\n def __filter__(self, func):\n cls = type(self)\n return cls(item for item in self if func(item))\n\n def __fold__(self, func, initial_value=Missing):\n obj_iter = iter(self)\n value = initial_value\n if value is Missing:\n try:\n value = next(obj_iter)\n except StopIteration:\n raise ValueError(\n f'Empty {type(self).__name__!r} object'\n f' but no initial value provided')\n\n for item in obj_iter:\n value = func(value, item)\n\n return value\n\n def __flatmap__(self, func):\n cls = type(self)\n return cls(_flatmap_iter(self, func))\n\n @staticmethod\n def __unit__(cls, value):\n return cls([value])\n\n\ndef _flatmap_iter(obj, func):\n for item in obj:\n for result_item in func(item):\n yield result_item\n"
},
{
"alpha_fraction": 0.6433120965957642,
"alphanum_fraction": 0.6578707695007324,
"avg_line_length": 21.428571701049805,
"blob_id": "4e19586ef4205cccd2da24924e3e81689ec04b4f",
"content_id": "2d8890d3fa89ac0d22909abb664cbd4083718f28",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1099,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 49,
"path": "/tests/containers/test_lazy.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom functoolsplus.containers.lazy import LazyValue, Unevaluated\n\n\ndef test_with_value_evaluator():\n lv = LazyValue(lambda: 42)\n assert not lv.is_evaluated()\n assert lv.raw_value is Unevaluated\n assert str(lv) == 'LazyValue(<unevaluated>)'\n\n value = lv.value\n assert value == 42\n assert lv.is_evaluated()\n assert lv.raw_value == value\n assert str(lv) == 'LazyValue(42)'\n\n assert lv.value == value\n assert lv.is_evaluated()\n assert lv.raw_value == value\n\n\ndef test_with_value():\n lv = LazyValue(value=42)\n assert lv.is_evaluated()\n assert lv.raw_value == 42\n\n value = lv.value\n assert value == 42\n assert lv.is_evaluated()\n assert lv.raw_value == value\n\n assert lv.value == value\n assert lv.is_evaluated()\n assert lv.raw_value == value\n\n\ndef test_with_value_evaluator_and_value():\n with pytest.raises(ValueError):\n LazyValue(lambda: 42, value=42)\n\n\ndef test_with_no_params():\n with pytest.raises(ValueError):\n LazyValue()\n\n\ndef test_unevaluated_str():\n assert str(Unevaluated) == '<unevaluated>'\n"
},
{
"alpha_fraction": 0.6514435410499573,
"alphanum_fraction": 0.6530183553695679,
"avg_line_length": 22.8125,
"blob_id": "9167b4e7b6162c8ead860047ccc1fb4ee6f1913f",
"content_id": "ec680bc374b6f239ba5ff78e9e72a6326522fbaf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1905,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 80,
"path": "/tests/strategies.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from functools import partial\n\nfrom hypothesis import strategies as st\n\nfrom tests.functions import always_false, always_true, identity\n\n\ndef build_add_function(n):\n return set_function_name(f'add_{_integer_to_id(n)}')(lambda x: x + n)\n\n\ndef build_mul_function(n):\n return set_function_name(f'mul_{_integer_to_id(n)}')(lambda x: x * n)\n\n\ndef build_mod_predicate(n):\n return set_function_name(f'mod_{_integer_to_id(n)}')(lambda x: x % n == 0)\n\n\ndef build_lt_predicate(n):\n return set_function_name(f'lt_{_integer_to_id(n)}')(lambda x: x < n)\n\n\ndef build_gt_predicate(n):\n return set_function_name(f'gt_{_integer_to_id(n)}')(lambda x: x > n)\n\n\ndef build_expand_function(container_cls, function_list):\n return lambda x: container_cls(f(x) for f in function_list)\n\n\[email protected]\[email protected]_strategy_with_reusable_values\ndef integer_functions():\n return (\n st.just(identity) |\n st.integers().map(build_add_function) |\n st.integers().map(build_mul_function)\n )\n\n\[email protected]\[email protected]_strategy_with_reusable_values\ndef integer_predicates():\n return (\n st.just(always_false) |\n st.just(always_true) |\n st.integers(min_value=2).map(build_mod_predicate) |\n st.integers().map(build_lt_predicate) |\n st.integers().map(build_gt_predicate)\n )\n\n\[email protected]_strategy_with_reusable_values\ndef integer_expand_functions(container_cls):\n return _expand_functions(integer_functions(), container_cls)\n\n\ndef _expand_functions(functions_strategy, container_cls):\n return (\n st.lists(functions_strategy)\n .map(partial(build_expand_function, container_cls))\n )\n\n\ndef set_function_name(name):\n\n def decorator(f):\n f.__name__ = name\n f.__qualname__ = name\n return f\n\n return decorator\n\n\ndef _integer_to_id(n):\n if n < 0:\n return f'minus_{abs(n)}'\n else:\n return f'{n}'\n"
},
{
"alpha_fraction": 0.5992063283920288,
"alphanum_fraction": 0.5992063283920288,
"avg_line_length": 18.384614944458008,
"blob_id": "7b57bf2d23ad25d56b54efa0da5b70828a5f19b7",
"content_id": "78f75597122b9ba838a76ec547676c278aacbb1e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 13,
"path": "/functoolsplus/utils/singletons.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "def new_singleton(cls):\n if not hasattr(cls, '__instance__'):\n cls.__instance__ = object.__new__(cls)\n return cls.__instance__\n\n\nclass MissingType(object):\n\n def __new__(cls):\n return new_singleton(cls)\n\n\nMissing = MissingType()\n"
},
{
"alpha_fraction": 0.6777873039245605,
"alphanum_fraction": 0.6843456029891968,
"avg_line_length": 27.055999755859375,
"blob_id": "866e6907ccbb3b544fa6eabe8d61002fc608d49a",
"content_id": "ebbb223270b457c5e11285baab6329cfa41dc199",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3507,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 125,
"path": "/tests/collections/test_stream.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nfrom functoolsplus import filter as generic_filter\nfrom functoolsplus import flatmap\nfrom functoolsplus import map as generic_map\nfrom functoolsplus import unit\nfrom functoolsplus.collections import Stream\nfrom tests import strategies as tests_st\n\n\ndef test_empty_stream_is_a_singleton():\n assert Stream() is Stream.empty\n assert Stream([]) is Stream.empty\n\n\n@given(st.integers(), st.lists(st.integers()))\ndef test_cons(input_value, input_list):\n stream = Stream.cons(input_value, lambda: Stream([input_list]))\n assert stream[0] == input_value\n\n\ndef test_cons_invalid():\n with pytest.raises(TypeError):\n Stream.cons(1, 2)\n\n\ndef test_cons_invalid_lazy_evaluator():\n stream = Stream.cons(1, lambda: 2)\n with pytest.raises(AttributeError):\n stream.tail\n\n\n@given(st.lists(st.integers()), st.lists(st.integers()))\ndef test_concat(input_list1, input_list2):\n s1 = Stream(input_list1)\n s2 = Stream(input_list2)\n assert list(s1 + s2) == input_list1 + input_list2\n\n\n@given(st.lists(st.integers()), st.text())\ndef test_concat_with_str(input_list, input_str):\n with pytest.raises(TypeError):\n Stream(input_list) + input_str\n\n\n@given(st.lists(st.integers()))\ndef test_isinstance(input_list):\n lst = Stream(input_list)\n assert isinstance(lst, Stream)\n\n\n@given(st.lists(st.integers()), tests_st.integer_functions())\ndef test_map(input_list, func):\n stream = Stream(input_list)\n assert isinstance(generic_map(func, stream), Stream)\n assert list(generic_map(func, stream)) == [func(x) for x in input_list]\n\n\n@given(st.lists(st.integers()), tests_st.integer_predicates())\ndef test_filter(input_list, pred):\n stream = Stream(input_list)\n expected_list = [x for x in input_list if pred(x)]\n assert isinstance(generic_filter(pred, stream), Stream)\n assert list(generic_filter(pred, stream)) == expected_list\n\n\n@given(st.lists(st.integers()), tests_st.integer_expand_functions(Stream))\ndef test_flatmap(input_list, expand_func):\n stream = Stream(input_list)\n expected_list = [y for x in input_list for y in expand_func(x)]\n assert isinstance(flatmap(expand_func, stream), Stream)\n assert list(flatmap(expand_func, stream)) == expected_list\n\n\n@given(st.integers())\ndef test_unit(n):\n stream = unit(Stream, n)\n assert isinstance(stream, Stream)\n assert stream.head == n\n assert stream.tail == Stream.empty\n\n\n@given(st.lists(st.integers()), st.integers())\ndef test_getitem_int_key(input_list, i):\n n = len(input_list)\n stream = Stream(input_list)\n if 0 <= i < n:\n assert stream[i] == input_list[i]\n else:\n with pytest.raises(IndexError):\n stream[i]\n\n\n@given(st.lists(st.integers()), st.text())\ndef test_getitem_str_key(input_list, key):\n stream = Stream(input_list)\n with pytest.raises(TypeError):\n stream[key]\n\n\n@given(st.lists(st.integers()))\ndef test_str(input_list):\n stream = Stream(input_list)\n assert 'Stream' in str(stream)\n for elem in input_list:\n assert repr(elem) in str(stream)\n\n\ndef test_unevaluated_str():\n stream = Stream.cons(1, lambda: Stream([2]))\n assert str(stream) == 'Stream([1, <unevaluated>])'\n assert stream[1] == 2\n assert str(stream) == 'Stream([1, 2])'\n\n\ndef test_empty_head():\n with pytest.raises(AttributeError):\n Stream.empty.head\n\n\ndef test_empty_tail():\n with pytest.raises(AttributeError):\n Stream.empty.tail\n"
},
{
"alpha_fraction": 0.7868852615356445,
"alphanum_fraction": 0.7868852615356445,
"avg_line_length": 19.33333396911621,
"blob_id": "9e93a31d7351384836c5adb8460703032ed7d6cd",
"content_id": "19cdd3141c80743142749bb57c3f2384419c0b2c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 61,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 3,
"path": "/requirements/requirements-testing.txt",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "# Here are packages not imported in test code\ntox\npytest-cov\n"
},
{
"alpha_fraction": 0.8222222328186035,
"alphanum_fraction": 0.8222222328186035,
"avg_line_length": 14,
"blob_id": "61a6e06034e3e64e5c2f7ca7def1b48e8409b923",
"content_id": "c14a96280e5a18dc80e71347848bf4961e56e207",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 45,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 3,
"path": "/README.md",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "# FuncTools+\n\nFunctional programming goodies\n"
},
{
"alpha_fraction": 0.5949628353118896,
"alphanum_fraction": 0.5974401235580444,
"avg_line_length": 27.494117736816406,
"blob_id": "430b0a029d8d6b8821cc810ebbeef0881529359f",
"content_id": "20fe6e1362f24ac0d75532760a0233728a77ca2e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2422,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 85,
"path": "/setup.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os.path\nimport re\n\nfrom setuptools import find_packages, setup\n\nROOT_DIR = os.path.dirname(__file__)\n\n\ndef read_contents(local_filepath):\n with open(os.path.join(ROOT_DIR, local_filepath), 'rt') as f:\n return f.read()\n\n\ndef get_requirements(requirements_filepath):\n '''\n Return list of this package requirements via local filepath.\n '''\n requirements = []\n with open(os.path.join(ROOT_DIR, requirements_filepath), 'rt') as f:\n for line in f:\n if line.startswith('#'):\n continue\n line = line.rstrip()\n if not line:\n continue\n requirements.append(line)\n return requirements\n\n\ndef get_version(package):\n '''\n Return package version as listed in `__version__` in `init.py`.\n '''\n init_py = read_contents(os.path.join(package, '__init__.py'))\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description(markdown_filepath):\n '''\n Return the long description in RST format, when possible.\n '''\n try:\n import pypandoc\n return pypandoc.convert(markdown_filepath, 'rst')\n except ImportError:\n return read_contents(markdown_filepath)\n\n\nsetup(\n name='functoolsplus',\n version=get_version('functoolsplus'),\n packages=find_packages(exclude=['tests.*', 'tests']),\n include_package_data=True,\n author='Andrzej Pragacz',\n author_email='[email protected]',\n description='Functional programming goodies',\n license='MIT',\n keywords=' '.join((\n 'functional',\n 'monads',\n 'functors',\n 'streams',\n 'immutable',\n )),\n long_description=get_long_description('README.md'),\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Web Environment',\n 'Framework :: IPython',\n 'Framework :: Jupyter',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Utilities',\n ],\n install_requires=get_requirements(\n 'requirements/requirements-base.txt'),\n python_requires='>=3.6',\n url='https://github.com/apragacz/functoolsplus',\n)\n"
},
{
"alpha_fraction": 0.5710623860359192,
"alphanum_fraction": 0.5750498175621033,
"avg_line_length": 23.725351333618164,
"blob_id": "2d6cfe2987b9868fa1824bd4b1f1d9ccedf42545",
"content_id": "3233ac438ddbd51c23c62dd7a825b19b0e2ccb3a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3511,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 142,
"path": "/functoolsplus/collections/lists.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from collections.abc import Sequence\n\nfrom functoolsplus.collections.base import SingleLinkedStruct\nfrom functoolsplus.utils.singletons import new_singleton\n\n\nclass SingleLinkedList(SingleLinkedStruct, Sequence):\n\n def __new__(cls, iterable=None):\n if iterable is None:\n iterable = []\n return cls.from_iterable(iterable)\n\n @classmethod\n def get_empty(cls):\n return _SingleLinkedListEmptyType()\n\n @classmethod\n def cons(cls, head, tail):\n return _SingleLinkedListConsType(head, tail)\n\n @classmethod\n def cons_simple(cls, head, tail):\n return cls.cons(head, tail)\n\n def __len__(self):\n counter = 0\n lst = self\n while lst:\n lst = lst.tail\n counter += 1\n return counter\n\n def __eq__(self, other):\n if not isinstance(other, SingleLinkedList):\n return NotImplemented\n\n lst1 = self\n lst2 = other\n\n while lst1 and lst2:\n if lst1.head != lst2.head:\n return False\n\n lst1 = lst1.tail\n lst2 = lst2.tail\n\n return (not bool(lst1)) and (not bool(lst2))\n\n def __add__(self, other):\n if not isinstance(other, SingleLinkedList):\n return NotImplemented\n result = other\n for item in reversed(self):\n result = self.cons(item, result)\n return result\n\n def __map__(self, func):\n result = self.get_empty()\n for item in reversed(self):\n result = self.cons(func(item), result)\n return result\n\n def __filter__(self, func):\n result = self.get_empty()\n for item in reversed(self):\n if func(item):\n result = self.cons(item, result)\n return result\n\n def __flatmap__(self, func):\n reversed_result = self.get_empty()\n for item in self:\n for result_item in func(item):\n reversed_result = self.cons(result_item, reversed_result)\n return reversed_result.reversed()\n\n def __reversed__(self):\n return iter(self.reversed())\n\n def reversed(self):\n reversed_list = self.get_empty()\n lst = self\n while lst:\n reversed_list = self.cons(lst.head, reversed_list)\n lst = lst.tail\n return reversed_list\n\n def appended_left(self, item):\n return self.cons(item, self)\n\n def popped_left(self):\n if not self:\n raise ValueError(f\"{type(self).__name__!r} object is empty\")\n return self.tail\n\n def _get_repr_items(self):\n items = []\n lst = self\n while lst:\n items.append(lst.head)\n lst = lst.tail\n return items\n\n\nclass _SingleLinkedListEmptyType(SingleLinkedList):\n\n def __new__(cls):\n return new_singleton(cls)\n\n def __bool__(self):\n return False\n\n\nclass _SingleLinkedListConsType(SingleLinkedList):\n\n def __new__(cls, head, tail):\n obj = object.__new__(cls)\n obj._head = head\n obj._tail = tail\n return obj\n\n @property\n def head(self):\n return self._head\n\n @property\n def tail(self):\n return self._tail\n\n def __bool__(self):\n return True\n\n\nSingleLinkedList.empty = SingleLinkedList.get_empty()\n\nfor cls in (\n _SingleLinkedListEmptyType,\n _SingleLinkedListConsType):\n cls.__internal_name__ = cls.__name__\n cls.__name__ = SingleLinkedList.__name__\n cls.__qualname__ = SingleLinkedList.__qualname__\n"
},
{
"alpha_fraction": 0.5985915660858154,
"alphanum_fraction": 0.5985915660858154,
"avg_line_length": 9.142857551574707,
"blob_id": "14e87963a093c0aa3bedc1820af846e7c4e639bb",
"content_id": "790d907ce96067c105d27b4ff18d5cd849620ec7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 14,
"path": "/tests/functions.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "def identity(x):\n return x\n\n\ndef always_false(x):\n return False\n\n\ndef always_true(x):\n return True\n\n\ndef add(x, y):\n return x + y\n"
},
{
"alpha_fraction": 0.5608999133110046,
"alphanum_fraction": 0.5989139080047607,
"avg_line_length": 24.780000686645508,
"blob_id": "2151606a7838768a84f1acbf76bc200dd4ea8fa2",
"content_id": "8b7962ebd082d087061d42c2a58bc86508770b34",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1289,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 50,
"path": "/tests/containers/test_pipes.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from functools import partial\n\nimport pytest\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nfrom functoolsplus import filter, map\nfrom functoolsplus.containers.pipes import P\n\n\ndef test_chain_methods_cons():\n pipe = (\n P\n .filter(lambda x: x % 2 == 1)\n .map(lambda x: x + 1)\n .sum()\n .to_type(float))\n result = pipe([1, 2, 3, 4, 5])\n assert result == 12.0\n assert isinstance(result, float)\n\n\ndef test_operator_cons():\n pipe = (\n P |\n partial(filter, lambda x: x % 2 == 1) |\n partial(map, lambda x: x + 1) |\n sum |\n float)\n result = pipe([1, 2, 3, 4, 5])\n assert result == 12.0\n assert isinstance(result, float)\n\n\n@given(st.lists(st.integers()))\ndef test_concat_with_another_pipe(input_list):\n pipe1 = P | partial(map, lambda x: x + 1)\n pipe2 = P | partial(map, lambda x: x * 2)\n pipe12 = pipe1 | pipe2\n pipe21 = pipe2 | pipe1\n pipe22 = pipe2 | pipe2\n assert pipe12(input_list) == [2 * (x + 1) for x in input_list]\n assert pipe21(input_list) == [2 * x + 1 for x in input_list]\n assert pipe22(input_list) == [4 * x for x in input_list]\n\n\n@given(st.text())\ndef test_concat_with_str(input_str):\n with pytest.raises(TypeError):\n P | input_str\n"
},
{
"alpha_fraction": 0.6330451369285583,
"alphanum_fraction": 0.6330451369285583,
"avg_line_length": 27.135135650634766,
"blob_id": "21ebb31945442baaa03f16a6bbbfabfc7e98482b",
"content_id": "51bb351333d0bf7b26ab1ebebca49eaf6d5fc825",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1041,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 37,
"path": "/functoolsplus/utils/implementations.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from collections import OrderedDict, defaultdict, namedtuple\n\nImplEntry = namedtuple('ImplEntry', [\n 'base_cls',\n 'impl_cls',\n])\n\n_impementations_registry = defaultdict(OrderedDict)\n\n\ndef provide_impl_for(abc_class, base_class):\n\n def decorator(impl_cls):\n\n impl_reg = _impementations_registry[abc_class.__qualname__]\n impl_reg[base_class.__qualname__] = ImplEntry(\n base_cls=base_class,\n impl_cls=impl_cls)\n return impl_cls\n\n return decorator\n\n\ndef get_impl(abc_class, cls):\n impl_reg = _impementations_registry[abc_class.__qualname__]\n for test_cls in cls.mro():\n if test_cls.__qualname__ in impl_reg:\n return impl_reg[test_cls.__qualname__].impl_cls\n\n # For \"virtual\" base classes, which may not be in the MRO.\n for impl_entry in impl_reg.values():\n if issubclass(cls, impl_entry.base_cls):\n return impl_entry.impl_cls\n\n raise TypeError(\n f'No implementation of {abc_class.__name__}'\n f' for {cls.__name__} provided')\n"
},
{
"alpha_fraction": 0.6956914067268372,
"alphanum_fraction": 0.7033611536026001,
"avg_line_length": 28.164474487304688,
"blob_id": "d5a6a0b9c0abbeddc77ff4766212a1b20e531012",
"content_id": "3b00425be2df1e5638a292ab39d7a65f5e74660e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4433,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 152,
"path": "/tests/collections/test_single_linked_list.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nfrom functoolsplus import filter as generic_filter\nfrom functoolsplus import flatmap\nfrom functoolsplus import map as generic_map\nfrom functoolsplus import unit\nfrom functoolsplus.collections import SingleLinkedList\nfrom tests import strategies as tests_st\n\n\ndef test_empty_list_is_a_singleton():\n assert SingleLinkedList() is SingleLinkedList.empty\n assert SingleLinkedList([]) is SingleLinkedList.empty\n\n\n@given(st.lists(st.integers()))\ndef test_len(input_list):\n lst = SingleLinkedList(input_list)\n assert len(lst) == len(input_list)\n\n\n@given(st.lists(st.integers()))\ndef test_eq(input_list):\n lst1 = SingleLinkedList(input_list)\n lst2 = SingleLinkedList(input_list)\n assert lst1 == lst2\n\n\n@given(st.lists(st.integers()), st.lists(st.integers()))\ndef test_neq(input_list1, input_list2):\n if input_list1 == input_list2:\n return\n lst1 = SingleLinkedList(input_list1)\n lst2 = SingleLinkedList(input_list2)\n assert lst1 != lst2\n\n\n@given(st.lists(st.integers()), st.lists(st.integers()))\ndef test_concat(input_list1, input_list2):\n lst1 = SingleLinkedList(input_list1)\n lst2 = SingleLinkedList(input_list2)\n assert lst1 + lst2 == SingleLinkedList(input_list1 + input_list2)\n\n\n@given(st.lists(st.integers()), st.lists(st.integers()))\ndef test_concat_list(input_list1, input_list2):\n lst1 = SingleLinkedList(input_list1)\n with pytest.raises(TypeError):\n lst1 + input_list2\n\n\n@given(st.lists(st.integers()))\ndef test_isinstance(input_list):\n lst = SingleLinkedList(input_list)\n assert isinstance(lst, SingleLinkedList)\n\n\n@given(st.lists(st.integers()))\ndef test_reversed(input_list):\n lst = SingleLinkedList(input_list)\n reversed_lst = lst.reversed()\n assert len(lst) == len(reversed_lst)\n assert len(lst) > 1 or lst == reversed_lst\n assert reversed_lst.reversed() == lst\n\n\n@given(st.lists(st.integers()), st.integers())\ndef test_appended_left(input_list, value):\n lst = SingleLinkedList(input_list)\n assert lst.appended_left(value) == SingleLinkedList([value] + input_list)\n assert lst == SingleLinkedList(input_list)\n\n\n@given(st.lists(st.integers(), min_size=1))\ndef test_popped_left_nonempty(input_list):\n lst = SingleLinkedList(input_list)\n assert lst.popped_left() == SingleLinkedList(input_list[1:])\n assert lst == SingleLinkedList(input_list)\n\n\ndef test_popped_left_empty():\n lst = SingleLinkedList()\n with pytest.raises(ValueError):\n lst.popped_left()\n\n\n@given(st.lists(st.integers()), tests_st.integer_functions())\ndef test_map(input_list, func):\n lst = SingleLinkedList(input_list)\n expected_lst = SingleLinkedList(func(x) for x in input_list)\n assert generic_map(func, lst) == expected_lst\n\n\n@given(st.lists(st.integers()), tests_st.integer_predicates())\ndef test_filter(input_list, pred):\n lst = SingleLinkedList(input_list)\n expected_lst = SingleLinkedList(x for x in input_list if pred(x))\n assert generic_filter(pred, lst) == expected_lst\n\n\n@given(\n st.lists(st.integers()),\n tests_st.integer_expand_functions(SingleLinkedList))\ndef test_flatmap(input_list, expand_func):\n lst = SingleLinkedList(input_list)\n expected_list = [y for x in input_list for y in expand_func(x)]\n assert isinstance(flatmap(expand_func, lst), SingleLinkedList)\n assert list(flatmap(expand_func, lst)) == expected_list\n\n\n@given(st.integers())\ndef test_unit(n):\n lst = unit(SingleLinkedList, n)\n assert lst == SingleLinkedList([n])\n\n\n@given(st.lists(st.integers()), st.integers())\ndef test_getitem_int_key(input_list, i):\n n = len(input_list)\n lst = SingleLinkedList(input_list)\n if 0 <= i < n:\n assert lst[i] == input_list[i]\n else:\n with pytest.raises(IndexError):\n lst[i]\n\n\n@given(st.lists(st.integers()), st.text())\ndef test_getitem_str_key(input_list, key):\n lst = SingleLinkedList(input_list)\n with pytest.raises(TypeError):\n lst[key]\n\n\n@given(st.lists(st.integers()))\ndef test_str(input_list):\n stream = SingleLinkedList(input_list)\n assert 'SingleLinkedList' in str(stream)\n for elem in input_list:\n assert repr(elem) in str(stream)\n\n\ndef test_empty_head():\n with pytest.raises(AttributeError):\n SingleLinkedList.empty.head\n\n\ndef test_empty_tail():\n with pytest.raises(AttributeError):\n SingleLinkedList.empty.tail\n"
},
{
"alpha_fraction": 0.695652186870575,
"alphanum_fraction": 0.760869562625885,
"avg_line_length": 45,
"blob_id": "5d49ab09b5d2a4c56383cc9133307bf429369fe0",
"content_id": "ea43a9a6e83e818ab70c923812682bdcb037bdf3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 2,
"path": "/functoolsplus/collections/__init__.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from .lists import SingleLinkedList # noqa: F401\nfrom .streams import Stream # noqa: F401\n"
},
{
"alpha_fraction": 0.5978835821151733,
"alphanum_fraction": 0.60317462682724,
"avg_line_length": 22.625,
"blob_id": "b4e2c5bbb13e94302ca4cdf8dc4d3ca185920148",
"content_id": "0a602d3399719642d95d3c9dad68730e30213143",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 189,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 8,
"path": "/scripts/run_pylint.sh",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nset -euo pipefail\n\nreadonly BASE_DIR=\"$( cd \"$( dirname \"$( dirname \"${BASH_SOURCE[0]}\" )\" )\" && pwd )\"\n\ncd \"${BASE_DIR}\"\npylint --rcfile=setup.cfg functoolsplus tests -E \"$@\"\n"
},
{
"alpha_fraction": 0.5879712700843811,
"alphanum_fraction": 0.5884200930595398,
"avg_line_length": 27.202531814575195,
"blob_id": "e11ff804c1540b46990556455005eac55a0f649c",
"content_id": "62fbf0449c8e3b047c20efdfb3dffcd002346bd3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2228,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 79,
"path": "/functoolsplus/collections/base.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "import abc\nfrom collections.abc import Iterable, Reversible\nfrom itertools import islice\n\nfrom functoolsplus.abc import Filterable, Functor, Monad\n\n\nclass SingleLinkedStruct(Iterable, Filterable, Functor, Monad):\n\n @classmethod\n @abc.abstractclassmethod\n def get_empty(cls):\n raise NotImplementedError()\n\n @classmethod\n @abc.abstractclassmethod\n def cons(cls, head, tail):\n raise NotImplementedError()\n\n @classmethod\n @abc.abstractclassmethod\n def cons_simple(cls, head, tail):\n raise NotImplementedError()\n\n @classmethod\n def from_iterable(cls, iterable):\n if not isinstance(iterable, Reversible):\n iterable = list(iterable)\n lst = cls.get_empty()\n for item in reversed(iterable):\n lst = cls.cons_simple(item, lst)\n return lst\n\n @staticmethod\n def __unit__(cls, value):\n return cls.cons_simple(value, cls.get_empty())\n\n @property\n def head(self):\n raise AttributeError(\n f\"{type(self).__name__!r} object has no attribute 'head'\")\n\n @property\n def tail(self):\n raise AttributeError(\n f\"{type(self).__name__!r} object has no attribute 'tail'\")\n\n def __iter__(self):\n current = self\n while current:\n yield current.head\n current = current.tail\n\n def __repr__(self):\n items = self._get_repr_items()\n if items:\n return f'{type(self).__name__}({items!r})'\n else:\n return f'{type(self).__name__}()'\n\n def __str__(self):\n return repr(self)\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n return self.from_iterable(\n islice(self, index.start, index.stop, index.step))\n if isinstance(index, int):\n try:\n return next(islice(self, index, index + 1))\n except (StopIteration, ValueError):\n raise IndexError('list index out of range')\n raise TypeError(\n f\"{type(self).__name__!r} indices must be integers or slices,\"\n f\" not {type(index).__name__!r}\")\n\n @abc.abstractmethod\n def _get_repr_items(self):\n raise NotImplementedError()\n"
},
{
"alpha_fraction": 0.698300302028656,
"alphanum_fraction": 0.7082152962684631,
"avg_line_length": 20.393939971923828,
"blob_id": "65adf970cd3e1828926f604e53c8f3af5da715dc",
"content_id": "651c1f0950ee94fd7109c698064614b26b49f2ba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 706,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 33,
"path": "/tox.ini",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist =\n packagecheck,\n lint,\n py36,\n\n[testenv]\nsetenv =\n PYTHONPATH={toxinidir}\ncommands = py.test -v --cov --cov-report xml\nenvdir = {toxworkdir}/venv/{envname}\ndeps =\n -rrequirements/requirements-testing-base.txt\n -rrequirements/requirements-testing.txt\n\n[testenv:lint]\nbasepython = python3.6\ncommands =\n flake8 .\n isort --check --diff\n pylint --rcfile=setup.cfg functoolsplus tests -E\ndeps =\n -rrequirements/requirements-testing-base.txt\n -rrequirements/requirements-linting.txt\n\n[testenv:packagecheck]\nbasepython = python3.6\ncommands =\n ./setup.py sdist\n ./setup.py bdist_wheel\n twine check dist/*\ndeps =\n -rrequirements/requirements-packaging.txt\n"
},
{
"alpha_fraction": 0.6568602919578552,
"alphanum_fraction": 0.6622991561889648,
"avg_line_length": 23.664634704589844,
"blob_id": "def1d7382eb1664565cee34af9726ff3bfc866e5",
"content_id": "eb0228b89671b3483b2c701e76493cfacee2442b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4045,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 164,
"path": "/tests/test_hof.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom hypothesis import given\nfrom hypothesis import strategies as st\n\nfrom functoolsplus import filter as generic_filter\nfrom functoolsplus import flatmap, fold\nfrom functoolsplus import map as generic_map\nfrom functoolsplus import unit\nfrom functoolsplus.abc import Filterable, Foldable, Functor, Monad\nfrom functoolsplus.utils.singletons import Missing\nfrom tests import strategies as tests_st\nfrom tests.functions import add, always_false, identity\n\n\nclass FallbackImpl(Filterable, Foldable, Functor, Monad, object):\n\n def __init__(self, value):\n self._value = value\n\n def __fold__(self, func, *, initial_value=Missing):\n return super().__fold__(func, initial_value=initial_value)\n\n def __filter__(self, func):\n return super().__filter__(func)\n\n def __map__(self, func):\n return super().__map__(func)\n\n def __flatmap__(self, func):\n return super().__flatmap__(func)\n\n @staticmethod\n def __unit__(cls, value):\n return super().__unit__(cls, value)\n\n\nclass NoImpl(Filterable, Foldable, Functor, Monad, object):\n\n def __init__(self, value):\n self._value = value\n\n __fold__ = None\n __filter__ = None\n __map__ = None\n __flatmap__ = None\n __unit__ = None\n\n\n@given(st.lists(st.integers()), tests_st.integer_functions())\ndef test_list_map(input_list, func):\n assert generic_map(func, input_list) == [func(x) for x in input_list]\n\n\n@given(st.lists(st.integers()), tests_st.integer_predicates())\ndef test_list_filter(input_list, pred):\n assert generic_filter(pred, input_list) == [\n x for x in input_list if pred(x)]\n\n\n@given(st.lists(st.integers(), min_size=1))\ndef test_list_fold_sum(input_list):\n assert fold(lambda x, y: x + y, input_list) == sum(input_list)\n\n\ndef test_list_fold_sum_on_empty():\n with pytest.raises(ValueError):\n fold(lambda x, y: x + y, [])\n\n\n@given(st.lists(st.integers()))\ndef test_list_fold_sum_with_initial_value(input_list):\n assert fold(\n lambda x, y: x + y, input_list, initial_value=0) == sum(input_list)\n\n\n@given(st.lists(st.integers()), tests_st.integer_expand_functions(list))\ndef test_list_flatmap(input_list, expand_func):\n assert flatmap(expand_func, input_list) == [\n y for x in input_list for y in expand_func(x)]\n\n\n@given(st.integers())\ndef test_list_unit(n):\n assert unit(list, n) == [n]\n\n\n@given(st.integers(), tests_st.integer_functions())\ndef test_int_map(n, func):\n with pytest.raises(TypeError):\n generic_map(func, n)\n\n\n@given(st.integers())\ndef test_int_filter(n):\n with pytest.raises(TypeError):\n generic_filter(always_false, n)\n\n\n@given(st.integers())\ndef test_int_fold(n):\n with pytest.raises(TypeError):\n fold(identity, n)\n\n\n@given(st.integers())\ndef test_int_flatmap(n):\n with pytest.raises(TypeError):\n flatmap(identity, n)\n\n\n@given(st.integers())\ndef test_int_unit(n):\n with pytest.raises(TypeError):\n unit(int, n)\n\n\ndef test_fallback_impl_map():\n with pytest.raises(TypeError):\n generic_map(identity, FallbackImpl(42))\n\n\ndef test_fallback_impl_filter():\n with pytest.raises(TypeError):\n generic_filter(always_false, FallbackImpl(42))\n\n\ndef test_fallback_impl_fold():\n with pytest.raises(TypeError):\n fold(add, FallbackImpl(42))\n\n\ndef test_fallback_impl_flatmap():\n with pytest.raises(TypeError):\n flatmap(lambda x: FallbackImpl(x), FallbackImpl(42))\n\n\ndef test_fallback_impl_unit():\n with pytest.raises(TypeError):\n unit(FallbackImpl, 42)\n\n\ndef test_no_impl_map():\n with pytest.raises(TypeError):\n generic_map(identity, NoImpl(42))\n\n\ndef test_no_impl_filter():\n with pytest.raises(TypeError):\n generic_filter(always_false, NoImpl(42))\n\n\ndef test_no_impl_fold():\n with pytest.raises(TypeError):\n fold(add, NoImpl(42))\n\n\ndef test_no_impl_flatmap():\n with pytest.raises(TypeError):\n flatmap(lambda x: NoImpl(x), NoImpl(42))\n\n\ndef test_no_impl_unit():\n with pytest.raises(TypeError):\n unit(NoImpl, 42)\n"
},
{
"alpha_fraction": 0.590745210647583,
"alphanum_fraction": 0.590745210647583,
"avg_line_length": 21.79452133178711,
"blob_id": "751d426a04b7b795e006295ed7a0a61ae5e5e420",
"content_id": "0ca655feb682c367a3f9b14ef88ff4170959b260",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1664,
"license_type": "permissive",
"max_line_length": 63,
"num_lines": 73,
"path": "/functoolsplus/abc.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "import abc\n\nfrom functoolsplus.utils.singletons import Missing\n\n\nclass Functor(metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def __map__(self, func):\n return NotImplemented\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Functor:\n return _check_methods(C, '__map__')\n return NotImplemented\n\n\nclass Monad(metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def __flatmap__(self, func):\n return NotImplemented\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Monad:\n return _check_methods(C, '__flatmap__', '__unit__')\n return NotImplemented\n\n @staticmethod\n @abc.abstractmethod\n def __unit__(cls, value):\n return NotImplemented\n\n\nclass Filterable(metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def __filter__(self, func):\n return NotImplemented\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Filterable:\n return _check_methods(C, '__filter__')\n return NotImplemented\n\n\nclass Foldable(metaclass=abc.ABCMeta):\n\n @abc.abstractmethod\n def __fold__(self, func, *, initial_value=Missing):\n return NotImplemented\n\n @classmethod\n def __subclasshook__(cls, C):\n if cls is Foldable:\n return _check_methods(C, '__fold__')\n return NotImplemented\n\n\ndef _check_methods(C, *methods):\n mro = C.__mro__\n for method in methods:\n for B in mro:\n if method in B.__dict__:\n if B.__dict__[method] is None:\n return NotImplemented\n break\n else:\n return NotImplemented\n return True\n"
},
{
"alpha_fraction": 0.5979178547859192,
"alphanum_fraction": 0.5990433096885681,
"avg_line_length": 26.765625,
"blob_id": "dd563e320fd8d15039fadebe04168f83ffea08e3",
"content_id": "71d209b219cea38704da8532b1150eb35129dc76",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3554,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 128,
"path": "/functoolsplus/collections/streams.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from functools import partial\n\nfrom functoolsplus.collections.base import SingleLinkedStruct\nfrom functoolsplus.containers.lazy import LazyValue\nfrom functoolsplus.hof import filter as generic_filter\nfrom functoolsplus.hof import flatmap\nfrom functoolsplus.hof import map as generic_map\nfrom functoolsplus.utils.singletons import new_singleton\n\n\nclass Stream(SingleLinkedStruct):\n\n def __new__(cls, iterable=None):\n if iterable is None:\n iterable = []\n return cls.from_iterable(iterable)\n\n @classmethod\n def get_empty(cls):\n return _StreamEmptyType()\n\n @classmethod\n def cons(cls, head, tail):\n if not isinstance(tail, LazyValue) and callable(tail):\n tail = LazyValue(tail)\n return _StreamConsType(head, tail)\n\n @classmethod\n def cons_simple(cls, head, tail):\n return cls.cons(head, LazyValue(value=tail))\n\n def __add__(self, other):\n if not isinstance(other, Stream):\n return NotImplemented\n return self._appended_with_lazy(LazyValue(value=other))\n\n def __map__(self, func):\n if not self:\n return self.get_empty()\n return self.cons(\n func(self.head),\n self._map_tail_lazy(partial(generic_map, func)))\n\n def __filter__(self, func):\n stream = self\n while stream and not func(stream.head):\n stream = stream.tail\n if not stream:\n return self.get_empty()\n return self.cons(\n stream.head,\n stream._map_tail_lazy(partial(generic_filter, func)))\n\n def __flatmap__(self, func):\n if not self:\n return self.get_empty()\n result_items = func(self.head)\n return result_items._appended_with_lazy(\n self._map_tail_lazy(partial(flatmap, func)))\n\n def _appended_with_lazy(self, other_stream_lazy):\n if not self:\n return other_stream_lazy.value\n\n return self.cons(\n self.head,\n self._map_tail_lazy(\n lambda s: s._appended_with_lazy(other_stream_lazy)))\n\n def _map_tail_lazy(self, func):\n raise NotImplementedError()\n\n def _get_repr_items(self):\n items = []\n stream = self\n while stream:\n items.append(stream.head)\n lazy_tail = stream._tail_lazy # pylint: disable=E1101\n if not lazy_tail.is_evaluated():\n items.append(lazy_tail.raw_value)\n break\n stream = stream.tail\n return items\n\n\nclass _StreamEmptyType(Stream):\n\n def __new__(cls):\n return new_singleton(cls)\n\n def __bool__(self):\n return False\n\n\nclass _StreamConsType(Stream):\n\n def __new__(cls, head, tail):\n if not isinstance(tail, LazyValue):\n raise TypeError(\"'tail' should be lazy value\")\n obj = object.__new__(cls)\n obj._head = head\n obj._tail_lazy = tail\n return obj\n\n @property\n def head(self):\n return self._head\n\n @property\n def tail(self):\n value = self._tail_lazy.value\n if not isinstance(value, Stream):\n raise AttributeError('The tail evaluator returned invalid type')\n return value\n\n def __bool__(self):\n return True\n\n def _map_tail_lazy(self, func):\n return generic_map(func, self._tail_lazy)\n\n\nStream.empty = Stream.get_empty()\n\nfor cls in (_StreamEmptyType, _StreamConsType):\n cls.__internal_name__ = cls.__name__\n cls.__name__ = Stream.__name__\n cls.__qualname__ = Stream.__qualname__\n"
},
{
"alpha_fraction": 0.8260869383811951,
"alphanum_fraction": 0.8260869383811951,
"avg_line_length": 22,
"blob_id": "b4910b99c067841d1906abc4145aa9f4a4a62c41",
"content_id": "afca3ed1ca39e81449fa6c5d17d38121e4988575",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 69,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 3,
"path": "/requirements/requirements-testing-base.txt",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "# Here are packages directly imported in test code\nhypothesis\npytest\n"
},
{
"alpha_fraction": 0.7112675905227661,
"alphanum_fraction": 0.76408451795578,
"avg_line_length": 55.79999923706055,
"blob_id": "b1ece39f20f3363c179260d842f61c70a8fd86da",
"content_id": "2bc74cbfd74bfad9218e501a4e66f76b2b367f2b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 284,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 5,
"path": "/functoolsplus/__init__.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from functoolsplus.containers.pipes import P, Pipe, PipeRegistry # noqa: F401\nfrom functoolsplus.containers.pipes import default_registry as default_pipe_registry # noqa: F401, E501\nfrom functoolsplus.hof import filter, flatmap, fold, map, unit # noqa: F401\n\n__version__ = '0.0.1'\n"
},
{
"alpha_fraction": 0.5674703121185303,
"alphanum_fraction": 0.5678301453590393,
"avg_line_length": 23.377193450927734,
"blob_id": "5d40c047b30dc8cce5e884cabf577333cc072240",
"content_id": "1e33d620adb359efb940b40e9ad9593b6a065e13",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2779,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 114,
"path": "/functoolsplus/containers/pipes.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from collections.abc import Callable, Mapping\nfrom functools import partial\n\nfrom functoolsplus.hof import filter as generic_filter\nfrom functoolsplus.hof import flatmap, fold\nfrom functoolsplus.hof import map as generic_map\nfrom functoolsplus.utils.singletons import Missing\n\n\nclass PipeRegistry(Mapping):\n\n def __init__(self):\n self._registry = {}\n\n def __getitem__(self, key):\n return self._registry[key]\n\n def __iter__(self):\n return iter(self._registry)\n\n def __len__(self):\n return len(self._registry)\n\n def register(self, name, func):\n obj = self._clone()\n obj._registry[name] = func\n return obj\n\n def _clone(self):\n cls = type(self)\n obj = cls()\n obj._registry = self._registry.copy()\n return obj\n\n\ndefault_registry = (\n PipeRegistry()\n .register('map', generic_map)\n .register('filter', generic_filter)\n .register('flatmap', flatmap)\n .register('fold', fold)\n .register('len', len)\n .register('sum', sum)\n .register('min', min)\n .register('max', max)\n .register('imap', map)\n .register('ifilter', filter)\n)\n\n\nclass Pipe(Callable):\n\n def __init__(self, input_value=Missing, registry=default_registry):\n self._steps = []\n self._registry = registry\n self._input_value = input_value\n\n def __or__(self, other):\n if isinstance(other, Pipe):\n obj = self._clone()\n obj._steps.extend(other._steps)\n assert other._input_value is Missing\n return obj\n elif callable(other):\n return self.step(other)\n return NotImplemented\n\n def __getattr__(self, name):\n\n def func(*args, **kwargs):\n obj = args[-1]\n if not hasattr(obj, name) and name in self._registry:\n f = self._registry[name]\n else:\n f = getattr(obj, name)\n return f(*args, **kwargs)\n\n return PipeCall(self, func)\n\n def step(self, func):\n obj = self._clone()\n obj._steps.append(func)\n return obj\n\n def to_type(self, type_):\n return self.step(type_)\n\n def _clone(self):\n cls = type(self)\n obj = cls()\n obj._steps = self._steps[:]\n obj._registry = self._registry\n obj._input_value = self._input_value\n return obj\n\n def __call__(self, input_value):\n value = input_value\n for f in self._steps:\n value = f(value)\n return value\n\n\nclass PipeCall(Callable):\n\n def __init__(self, pipe, func):\n self._pipe = pipe\n self._func = func\n\n def __call__(self, *args, **kwargs):\n step_func = partial(self._func, *args, **kwargs)\n return self._pipe.step(step_func)\n\n\nP = Pipe()\n"
},
{
"alpha_fraction": 0.5876205563545227,
"alphanum_fraction": 0.5876205563545227,
"avg_line_length": 22.923076629638672,
"blob_id": "c6d069165b5d8644283264042ab074c6b17656b2",
"content_id": "3c040f82a6a17fa4682853de91ea1a09fd55403a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1244,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 52,
"path": "/functoolsplus/containers/lazy.py",
"repo_name": "apragacz/functoolsplus",
"src_encoding": "UTF-8",
"text": "from functoolsplus.abc import Functor\nfrom functoolsplus.utils.singletons import new_singleton\n\n\nclass UnevaluatedType(object):\n\n def __new__(cls):\n return new_singleton(cls)\n\n def __repr__(self):\n return '<unevaluated>'\n\n def __str__(self):\n return repr(self)\n\n\nUnevaluated = UnevaluatedType()\n\n\nclass LazyValue(Functor):\n\n def __init__(self, value_evaluator=None, *, value=Unevaluated):\n if not (bool(value is not Unevaluated) ^\n bool(value_evaluator is not None)):\n raise ValueError(\n \"You need to provide either value_evaluator or value\"\n \" exclusively\")\n self._value_eval = value_evaluator\n self._value = value\n\n @property\n def value(self):\n if not self.is_evaluated():\n self._value = self._value_eval()\n return self._value\n\n @property\n def raw_value(self):\n return self._value\n\n def __map__(self, func):\n cls = type(self)\n return cls(lambda: func(self.value))\n\n def __repr__(self):\n return f'{type(self).__name__}({self._value!r})'\n\n def __str__(self):\n return repr(self)\n\n def is_evaluated(self):\n return self._value is not Unevaluated\n"
}
] | 24 |
oDeatho/pic_crawler | https://github.com/oDeatho/pic_crawler | 886e946457187f0edb0c3aed5bcd65830c7ab708 | 87f3598611549d9c380dae13be977ac1bd2a3d3f | 56361d4b0a9b2b48ae0294fe1c65e7291f90fe3c | refs/heads/master | 2021-08-08T06:16:16.625677 | 2017-11-09T18:53:20 | 2017-11-09T18:53:20 | 110,146,942 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5365206003189087,
"alphanum_fraction": 0.5564408898353577,
"avg_line_length": 27.237499237060547,
"blob_id": "f74d3ffbba4c5624de6d66d483a938792a35873d",
"content_id": "09ca222e8e2b58b942d3442b904743d62ea422e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2259,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 80,
"path": "/Downloader.py",
"repo_name": "oDeatho/pic_crawler",
"src_encoding": "UTF-8",
"text": "import os\nimport re\nimport urllib2\nimport urlparse\nimport time\n\n\nclass Downloader(object):\n def __init__(self):\n self.header = {}\n pass\n\n def set_headers(self, headers):\n self.headers = headers\n\n def _mkdir(self, path):\n path = path.strip()\n\n isExists = os.path.exists(path)\n\n if not isExists:\n os.makedirs(path)\n return True\n else:\n return False\n\n def get_image_list(self, html):\n return re.findall('zoomfile=\"(.*?[jpg])\"', html)\n\n def _save(self, name, pic):\n with open(\"./image/\" + str(name) +\".jpg\", \"w+\") as myfile:\n myfile.write(pic)\n\n def __call__(self, url, headers={}):\n html = self.download(url, self.headers or headers)\n return html\n\n\n def download(self, url, headers={}, data=None):\n time.sleep(3)\n print 'url----is [%s]' % url\n request = urllib2.Request(url, data, self.headers or headers)\n try:\n response = urllib2.urlopen(request)\n except Exception as e:\n print 'Download error:', str(e)\n\n return response.read()\n\nclass Work(object):\n def __init__(self, url, path):\n self.down = Downloader()\n self.down._mkdir(path)\n self.html = self.down(url)\n\n def set_headers(self, headers={}):\n self.down.set_headers(headers)\n\n def work(self):\n image_list = self.down.get_image_list(self.html)\n for i, pic_url in enumerate(image_list):\n pic_info = self.down(pic_url)\n self.down._save(i, pic_info) \n\n\ndef main():\n url = \"http://www.tuyimm.com/thread-7290-1-1.html\"\n header = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch, br',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Connection': 'keep-alive',\n 'Host': 'pan.baidu.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}\n \n test = Work(url,\"image\")\n test.set_headers(header)\n test.work()\nif __name__ == \"__main__\":\n main()\n"
}
] | 1 |
ashisharora24/python_learning | https://github.com/ashisharora24/python_learning | 6d3af0e062cdd28dad52b7013083711f5b4e7d92 | b1e7a91b3fd8295d93f851d78fb6bcbde224c04a | de37df97b5ee13ba4cca0b5f7f02ac6f131a4d91 | refs/heads/master | 2020-09-25T15:22:44.992213 | 2019-12-13T07:56:43 | 2019-12-13T07:56:43 | 226,033,336 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49074074625968933,
"alphanum_fraction": 0.5277777910232544,
"avg_line_length": 11,
"blob_id": "923dbaa11f00294f3e1f93e00d303ed9f7c65ea6",
"content_id": "b0e5ddb6efccdf5c312703fb9f70d640eb7d9959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 18,
"path": "/chapter17_classes_and_objects/class_objects_4_instance_variable.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Sample:\n\n def __init__(self):\n self.x = 10\n\n def modify(self):\n self.x +=1\n\n def display(self):\n print(self.x)\n\n\ns1 = Sample()\ns2 = Sample()\n\ns1.modify()\ns1.display()\ns2.display()\n"
},
{
"alpha_fraction": 0.6193293929100037,
"alphanum_fraction": 0.6291913390159607,
"avg_line_length": 15.354838371276855,
"blob_id": "25bb713aedb1592ee47ed7f465b94ee2f17c9b25",
"content_id": "9297e9c1eee36e5949969675d7aa0213ec25ac63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 507,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 31,
"path": "/chapter18_inheritance/inheritance_1_.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Teacher:\n\n def setID(self, id):\n self.id = id\n\n def getID(self):\n return self.id\n\n def setName(self, name):\n self.name = name\n\n def getName(self):\n return self.name\n\nclass Student(Teacher):\n\n def setRollNumber(self, rollnumber):\n self.rollnumber = rollnumber\n\n def getRollNumber(self):\n return self.rollnumber\n\n\ns = Student()\ns.setID(1)\ns.setName(\"ashish\")\ns.setRollNumber(1234)\n\nprint(s.getID())\nprint(s.getName())\nprint(s.getRollNumber())\n"
},
{
"alpha_fraction": 0.4910988211631775,
"alphanum_fraction": 0.5113566517829895,
"avg_line_length": 26.149999618530273,
"blob_id": "c8c3895d2013489c41f416d362e76a4a93885455",
"content_id": "a084cfc3384f79ed91e57358420422151f07bfb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1629,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 60,
"path": "/chapter17_classes_and_objects/classes_1.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Student:\n\n ''' this class is to handle student details and there related activities'''\n\n def __init__(self,name = \"admin\",age=33,year=2016,batch=2):\n self.name = name\n self.age = age\n self.year = year\n self.batch = batch\n\n def student_details(self):\n print(\"---------PRINTING DETAILS-----------\")\n print(\"Name = \",self.name)\n print(\"Age = \",self.age)\n self.age += 1\n print(\"Age = \",self.age)\n print(\"Year = \",self.year)\n print(\"Batch = \",self.batch)\n print(\"-----------------------------------\")\n# ---------------------------------\n\nstd = Student()\nprint(\"std.name : \", std.name)\nprint(\"std.age : \", std.age)\nprint(\"std.year : \", std.year)\nprint(\"std.batch : \", std.batch)\nstd.student_details()\n\n# update values in this object\nstd.name = \"ashish\"\nstd.age+=1\n\nprint(\"std.name : \", std.name)\nprint(\"std.age : \", std.age)\nprint(\"std.year : \", std.year)\nprint(\"std.batch : \", std.batch)\nstd.student_details()\n# ---------------------------------\nprint(\"**************************************************\")\nprint(\"**************************************************\")\n# ---------------------------------\n\nstd1 = Student(\"astha\",10,10,10)\nprint(\"std1.name : \", std1.name)\nprint(\"std1.age : \", std1.age)\nprint(\"std1.year : \", std1.year)\nprint(\"std1.batch : \", std1.batch)\nstd1.student_details()\n\n# update values in this object\nstd1.name = \"ashish\"\nstd1.age+=1\n\nprint(\"std.name : \", std1.name)\nprint(\"std.age : \", std1.age)\nprint(\"std.year : \", std1.year)\nprint(\"std.batch : \", std1.batch)\nstd1.student_details()\n\nStudent.student_details()\n"
},
{
"alpha_fraction": 0.6131386756896973,
"alphanum_fraction": 0.6277372241020203,
"avg_line_length": 13.102941513061523,
"blob_id": "9bb45f4e6cfc913d6f93fd5205865474b9e1efa7",
"content_id": "3549749890c4d07ccefbed458f522df8d6b2074b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 959,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 68,
"path": "/chapter5_arrays/arrays.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "from array import *\n\n'''\n\n arrays are not default available in the python.\n for using arrays you need to do the following:\n from array import *\n\n datatype :\n i --> integer\n f --> float\n'''\n\n# create array\na = array('i', [1,2,3,4])\nprint(type(a))\nprint(a)\n\n\n''' add element at end of the array '''\na.append(5)\nprint(a)\n\n\n''' count number of times elements occurs on array '''\nprint(a.count(3))\n\n\n\n'''extend array with another array'''\na.extend([6,7,8])\nprint(a)\n\n''' get the index number of the element'''\nprint(a.index(6))\n\n'''remove specific index'''\na.pop(5)\nprint(a)\n\n\n'''remove last element on the array'''\na.pop()\nprint(a)\n\n\n'''reverse the array'''\na.reverse()\nprint(a)\n\n'''convert the array to list'''\nl = a.tolist()\nprint(a)\nprint(l)\n\n\n''' convert array to string in unicode format'''\ns = a.tostring()\nprint(a)\nprint(s)\n\n'''length of array'''\nprint(len(a))\n\n\n'''slicing of array\n arrayname[start:end:stride]'''\nprint(a[1:5:2])\n"
},
{
"alpha_fraction": 0.6875,
"alphanum_fraction": 0.6875,
"avg_line_length": 15,
"blob_id": "3b1459d0b2ea7edcbf4102460473fbe65532fb06",
"content_id": "f66ede291b64e91cdf34ed93b3acca5bc863744c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 9,
"path": "/chapter21_json_python/loading_json_file.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n this will import json file as dictionary\n'''\n\n# only for testing\nimport json\n\n# importing json file\ndata = json.load(open(\"data.json\"))\n"
},
{
"alpha_fraction": 0.747474730014801,
"alphanum_fraction": 0.7613636255264282,
"avg_line_length": 36.71428680419922,
"blob_id": "67e71fa28df28c91429fd9b55eaf282f974f2845",
"content_id": "359070624cc09568e2f7694a7d120419c51e93aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 792,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 21,
"path": "/chapter11_logging/logging.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n logging the exceptions\n\n\nCRITICAL 50 represents Very Serious (high attention)\nERROR 40 represents serious error\nWARNING 30 represents warning message, some caution is needed\nINFO 20 represents message with some important information\nDEBUG 10 represents message with debugging information\nNOTSET 0 represents level is not set\n\n'''\n\nimport logging as log\nlog.basicConfig(filename=\"mylog.txt\")\nlog.critical(\"represents Very Serious (high attention)\")\n# logging.error(\"represents serious error\")\n# logging.warning(\"represents warning message, some caution is needed\")\n# logging.info(\"represents message with some important information\")\n# logging.debug(\"represents message with debugging information\")\n# logging.notset(\"represents level is not set\")\n"
},
{
"alpha_fraction": 0.6794354915618896,
"alphanum_fraction": 0.6864919066429138,
"avg_line_length": 16.714284896850586,
"blob_id": "6fac667aad994167432103130669b17d8e2a951d",
"content_id": "88196773f0dde9f6231ac15d86dce5145a4a6bd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 56,
"path": "/chapter13_directories/directories.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n get current directory path\n'''\n\nimport os\ncurrent = os.getcwd()\nprint(current)\n\n\n'''\n create directory\n # gives error if directory already exist\n'''\nimport os\n#_ os.mkdir('mysub')\n# gives error if directory already exist\n\n\n\n# if mysub doesnt exist then mysub2 will not be created\n#_ os.mkdir('mysub/mysub2')\n# gives error if directory already exist\n\n\n'''\n create complete directory path\n'''\n#_ os.makedirs('mysub3/mysub4')\n# gives error if directory already exist\n\n\n'''\n not understood\n change directory\n'''\nimport os\ngoto = os.chdir('mysub/mysub2')\n\n\n'''remove directory'''\nos.rmdir(mysub)\n\n'''remove complete directory path'''\nos.removedirs('mysub3/mysub4')\n\n\n'''rename directory'''\nos.rename('old_name', 'new_name')\n\n'''get directory content'''\nos.walk(path,topdown=True,oneerror=None,followlinks=False)\n\nfor dirpath, dirname,filenames in os.walk('.'):\n print(\"current directory : \", dirpath)\n print(\"directories : \", dirname)\n print(\"files : \", filenames)\n"
},
{
"alpha_fraction": 0.4359733462333679,
"alphanum_fraction": 0.4949420094490051,
"avg_line_length": 23.713415145874023,
"blob_id": "c010ca7cb5b8c57cc54ed7cd0d046f6e208a498f",
"content_id": "400e70aa326c1a170368e43ef5402583e9680c6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8106,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 328,
"path": "/chapter15_date_time/date_time.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\npython provides 3 modules:\n 1. datetime\n 2. time\n 3. calendar\n\ndatetime module has 4 important classes:\n 1. datetime class\n - handles conbination of date and time\n - attributes : year, month, day, hour, minute, seconds, microseconds, and tzinfo\n 2. date class\n - handles dates of Gregorian Calendar\n - attributes: year, month and day\n 3. time class\n - handles time assuming that everyday has exactly 24*60*60 seconds\n - attributes : hours minutes seconds microseconds and tzinfo\n 4. time delta class\n - handles the duration\n - difference between two dates and time or datetime instance\n'''\n\n# ------------------------------------------------------------------------------\n'''the epoch time : means time duration from 1st Jan 1990'''\n\nimport time\nepoch = time.time()\nprint(epoch)\n# output : 1575903408.308934\n\n# ------------------------------------------------------------------------------\n''' converting epoch to date time '''\nt = time.localtime(epoch)\nyear = t.tm_year # 2019\nmonth = t.tm_mon # 12\nday = t.tm_mday # 9\nhour = t.tm_hour # 15\nmin = t.tm_min # 20\nsecond = t.tm_sec # 17\ndayInWeek = t.tm_wday # 0\n\n#print(\"Current Time : {%d}-%d-%d %d:%:%d\"%(day,month,year,hour,min,second))\nprint(\"{}-{}-{} {}:{}:{}\".format(day,month,year,hour,min,second))\n# OUTPUT : 9-12-2019 20:26:48\n# ------------------------------------------------------------------------------\n'''\n Date and time NOW\n\n for current date and time in our computer\n\n 1. ctime() of \"time\" module\n - direct output as Mon Dec 9 20:28:18 2019\n 2. now() of 'datetime' module\n - direct output as 2019-12-09 20:28:18.294130\n 3. today() of 'datetime' module\n - direct output as 2019-12-09 20:30:38.989535\n 4. date.today() if we need only date\n - direct output as 2019-12-09\n'''\n\nimport time\nt = time.ctime()\nprint(t)\n# output Mon Dec 9 20:28:18 2019\n\nfrom datetime import *\ntime = datetime.now()\nprint(time)\n# output as 2019-12-09 20:28:18.294130\n\nprint(\"Current Time : {}-{}-{} {}:{}:{}\".format(\n time.day,\n time.month,\n time.year,\n time.hour,\n time.min,\n time.second))\n\n\nfrom datetime import *\ntdm = datetime.today()\nprint(\"tdm : \",tdm)\ntdm = date.today()\nprint(tdm)\n\n\n# ------------------------------------------------------------------------------\n''' how to create date time object from date time variables'''\n# combining date and time\n# create date time object\nfrom datetime import *\n\n# 1 (No date time feeded)\ndt = datetime.now()\nprint(dt)\n\n# 2 (when u have separate date time object)\nd = date(2016,4,29)\nt = time(15,30)\ndt = datetime.combine(d,t)\nprint(dt)\n\n# 3 (only date is available)\ndt = datetime(year=2016,month=6,day=24)\nprint(dt)\n\n# 4 (and time time both are avaialable)\ndt = datetime(2016,6,24,18,30)\nprint(dt)\n\n# 5 (passing date and time as defined parameters)\ndt = datetime(year=2016,month=6,day=24,hour=15, minute=30,second=10)\nprint(dt)\n\n# ------------------------------------------------------------------------------\n'''replacing parameters'''\ndt = datetime(year=2016,month=6,day=24,hour=15, minute=30,second=10)\nprint(dt)\n# output as 2016-06-24 15:30:10\ndt = dt.replace(year=2018,day=10)\nprint(dt)\n\n\n# ------------------------------------------------------------------------------\n'''\n FORMATING OF DATES AND TIME:\n\n string format\n syntax :\n from datetime import *\n td = date.today()\n str = td.strftime(\"%d,%B,%y\")\n '''\nfrom datetime import *\ntd = date.today()\nstr = td.strftime(\"%d,%B,%y\")\nprint(td.strftime(\"a : %a\"))\nprint(td.strftime(\"A : %A\"))\nprint(td.strftime(\"w : %w\"))\nprint(td.strftime(\"d : %d\"))\nprint(td.strftime(\"b : %b\"))\nprint(td.strftime(\"B : %B\"))\nprint(td.strftime(\"m : %m\"))\nprint(td.strftime(\"y : %y\"))\nprint(td.strftime(\"Y : %Y\"))\nprint(td.strftime(\"H : %H\"))\nprint(td.strftime(\"I : %I\"))\nprint(td.strftime(\"p : %p\"))\nprint(td.strftime(\"M : %M\"))\nprint(td.strftime(\"S : %S\"))\nprint(td.strftime(\"f : %f\"))\nprint(td.strftime(\"Z : %Z\"))\nprint(td.strftime(\"j : %j\"))\nprint(td.strftime(\"U : %U\"))\nprint(td.strftime(\"W : %W\"))\nprint(td.strftime(\"c : %c\"))\nprint(td.strftime(\"x : %x\"))\nprint(td.strftime(\"X : %X\"))\n\n'''\na : Mon (day)\nA : Monday (day)\nw : 1 (week)\nd : 09 date of the month\nb : Dec month\nB : December month\nm : 12 month\ny : 19 year\nY : 2019 year\nH : 00 hour\nI : 12 hour\nD : AM am or pm\nM : 00 minutes\nS : 00 seconds\nf : 000000 microseconds\nZ : timezone(empty or utc or est or cst)\nj : 343 day of the year\nU : 49 week of the year (sunday as start day)\nW : 49 week of the year (monday as start day)\nc : Mon Dec 9 00:00:00 2019 (appropriate date time representation)\nx : 12/09/19 appropriate date representation\nX : 00:00:00 appropriate time representation\n'''\n\n\n\n# ------------------------------------------------------------------------------\n\n'''\n Time Difference\n\nd1 = date(y1, m1, d1)\nd2 = date(y2, m2, d2)\n\ndt = d1-d2\ndt --> this is in days\n\nfor weeks : divmod(dt.days, 7)\nfor months : divmod(dt.days, 30)\n'''\n\n\nt1 = datetime(2018,12,12,15,15,15)\nt2 = datetime(2017,6,6,14,14,14)\nprint(t1)\nprint(t2)\ndt = t1-t2\nprint(dt)\nprint(\"dt.days\", dt.days)\nprint(\"dt.seconds\", dt.seconds)\n\n\n\nmonths, days = divmod(dt.days,30)\nprint(\"months, days : \", months, days)\n\nweeks, days = divmod(days,7)\nprint(\"weeks, days : \", weeks, days)\n\nhours, secs = divmod(dt.seconds, 3600)\nprint(\"hours, seconds : \",hours, secs)\n\nminutes = secs//60\nsecs = secs%60\nprint(\"minutes, seconds : \", minutes, secs)\n\n\nprint(\"Difference : {} months {} weeks {} days {} hours {} minutes {} seconds\".format(\n months,\n weeks,\n days,\n hours,\n minutes,\n secs))\n\n\n\n# ------------------------------------------------------------------------------\n'''\n FINDING duration using \"time delta\"\n\n time_old + delta_time = new_time\n'''\nfrom datetime import *\n\nd1 = datetime(2016,4,29,16,45,0)\n\ndelta = timedelta(\n days=10,\n seconds=10,\n minutes=20,\n hours=12,\n weeks=2\n )\nnew_time = d1+delta\nprint(new_time)\n\n\n#-------------------------------------------------------------------------------\n'''\n comparing 2 dates:\n'''\nt1 = datetime(2018,12,12,15,15,15)\nt2 = datetime(2017,6,6,14,14,14)\n\nif t1==t2:\n print(\"both same\")\nelif t1>t2:\n print(t1)\nelse:\n print(t2)\n\n\n# ------------------------------------------------------------------------------\n\n'''\n sorting of dates\n\n sorting is done in list\n add the dates to the list and and then sort them in order\n'''\n\n\n\nt1 = datetime(2016,1,12,15,15,15)\nt2 = datetime(2017,2,6,14,14,14)\nt3 = datetime(2018,3,12,15,15,15)\nt4 = datetime(2019,4,6,14,14,14)\nlist = [t4,t2,t3,t1]\nlist.sort()\nprint(list)\n\n\n# ------------------------------------------------------------------------------\n\n'''\n stop the execution temporarily\n'''\nimport time\nseconds = 10\ntime.sleep(seconds)\n\n# ------------------------------------------------------------------------------\n\n'''\n how to find the time taken by a program\n\n Syntax :\n t1 = perf_counter()\n t2 = perf_counter()\n seconds = t2-t1\n'''\nfrom time import *\nt1 = perf_counter()\nt2 = perf_counter()\nprint(\"seconds : {}\".format(t2-t1))\n\n\n# ------------------------------------------------------------------------------\n\n'''\n check if year is leap year or not\n'''\nfrom calendar import *\nyear = 2016\nif(isleap(year)):\n print(\"its leap year\")\nelse:\n print(\"its not leap year\")\n"
},
{
"alpha_fraction": 0.5100864768028259,
"alphanum_fraction": 0.5648415088653564,
"avg_line_length": 12.880000114440918,
"blob_id": "a4e928877a0811f82d2bd8e1f81dce187ffa97a1",
"content_id": "df84963c8260c321582e711f1aeda9fb80a6bd5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 25,
"path": "/chapter18_inheritance/inheritance_2_constructor.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Father:\n\n\n def __init__(self):\n self.property = 5000\n\nclass Son1(Father):\n\n def display(self):\n print(\"Son1 : \", self.property)\n\n\nclass Son2(Father):\n\n def __init__(self):\n self.property = 10000\n\n\n def display(self):\n print(\"Son2 : \", self.property)\n\ns1 = Son1()\ns1.display()\ns2 = Son2()\ns2.display()\n"
},
{
"alpha_fraction": 0.4395886957645416,
"alphanum_fraction": 0.4507283568382263,
"avg_line_length": 27.463415145874023,
"blob_id": "2f5a3626ffe7527e8bec676a7d023ec5c410db93",
"content_id": "1df9c705be4c600c682df49041da0a0b094baf47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2334,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 82,
"path": "/chapter2_output_statements/output_statement.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "\"\"\"there are mupltiple ways of doing output string:\n\n print('ashish\\tarora') --> ashish arora\n print(\"ashish\\tarora\") --> ashish arora\n print(r'ashish\\tarora') --> ashish\\tarora\n\n \"\"\"\nprint('ashish\\tarora')\nprint(\"ashish\\tarora\")\nprint(r'ashish\\tarora')\n\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\n\"\"\" for catenation we use + file \"\"\"\n\na = \"ashish\"\nb= \"arora\"\nprint(a+b)\n\n\"\"\" during concatenation there is no extra space added around +\nbut if we do the same with comma it will add extra space\"\"\"\n\na = \"ashish\"\nb= \"arora\"\nprint(a,b)\n\n\n\"\"\"\n how to remove this extra space from the comma --> sep = \"\"\n\"\"\"\n\na = \"ashish\"\nb= \"arora\"\nprint(a,b,sep=\"_\")\n\n\"\"\" after every print statement the is a default newline character added to it \"\"\"\n\nprint(a)\nprint(b)\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\n\"\"\" removing or replacing the newline character from end of the string\"\"\"\n\nprint(a, b, sep=\"@\")\n\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\"\"\"\nformating the string for variables . make this as habit.\n\"\"\"\na = \"ashish\"\nb = \"arora\"\n\n# way 1\nprint(\"Firstname : {}, Last name :{}\".format(a,b))\n# way 2\nprint(\"Firstname : {0}, Last name :{1}\".format(a,b))\n\n# way 3\nprint(\"Firstname : {firstname}, Last name :{lastname}\".format(firstname=a,lastname=b))\n# way 3 advantage\nprint(\"Firstname : {firstname}, Last name :{lastname}\".format(lastname=b, firstname=a))\n\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\n\"\"\" another way to do formating is :\n\n example :\n print(\"FirstName : %20s, LastName : %20s\"%(a,b))\n\n %s --> string\n %f --> float\n %i --> decimal and integers\n %20s--> 20 spaces inserted before s string\n %-20s--> 20 spaces inserted after s string\n\"\"\"\nprint(\"FirstName : %s, LastName : %s.\"%(a,b))\nprint(\"FirstName : %20s, LastName : %20s.\"%(a,b))\nprint(\"FirstName : %-20s, LastName : %-20s.\"%(a,b))\n"
},
{
"alpha_fraction": 0.5047619342803955,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 16.5,
"blob_id": "9a809b7bca767a4927e71fd5f612f69d7ca86d7d",
"content_id": "9ef9c16fafea8dfe774355ad65ecb8bfb0c280d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/chapter8_list_and_tuple/tuple.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "''' similar to list not non mutiable'''\n\ntup1 = (1,2,3)\n\nlist = [1,2,3,4,56,\"ashish\"]\ntupl = tuple(list)\n"
},
{
"alpha_fraction": 0.6616161465644836,
"alphanum_fraction": 0.6616161465644836,
"avg_line_length": 18.799999237060547,
"blob_id": "e7e8e0d466d94732a27db65f655e44c96576c716",
"content_id": "170d915d12e7a387324c63ba3b455fed91384e90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/chapter19_polymorphism/poly1_ducking.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Duck:\n def talk(self):\n print(\"Quack Quack\")\n\nclass Human:\n def talk(self):\n print(\"bolo bolo\")\n\n# while designing call_talk we are not worried whether its duck or human.\n# all we know it will talk\n# so call_talk can we used for more than one form\n# thats why its called polymorphism\ndef call_talk(obj):\n obj.talk()\n\n\nx = Duck()\ncall_talk(x)\ny = Human()\ncall_talk(y)\n"
},
{
"alpha_fraction": 0.5536332130432129,
"alphanum_fraction": 0.5536332130432129,
"avg_line_length": 15.05555534362793,
"blob_id": "5de5bce62e003bc3122cec30be4827b836a77a22",
"content_id": "b425a90aa4356b84fa0c956b78429de64b934d21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 289,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 18,
"path": "/chapter19_polymorphism/poly2_check_if_Class_has_a_method.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Duck:\n def bark(self):\n print(\"Quack Quack\")\n\nclass Human:\n def talk(self):\n print(\"bolo bolo\")\n\ndef call_talk(obj):\n if hasattr(obj,\"talk\"):\n obj.talk()\n elif hasattr(obj,\"bark\"):\n obj.bark()\n\ne = Duck()\ncall_talk(e)\nh = Human()\ncall_talk(h)\n"
},
{
"alpha_fraction": 0.4503311216831207,
"alphanum_fraction": 0.4503311216831207,
"avg_line_length": 13.15625,
"blob_id": "03710d764b4cab2a591e694215c7204b7121b3b9",
"content_id": "58d110936aaba38d3a0252a137fbb98b53952a44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 906,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 64,
"path": "/chapter18_inheritance/inheritance_4_multi_inheritance.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Father:\n\n def __init__(self):\n print(\"Father\")\n\nclass A(Father):\n\n def __init__(self):\n print(\"A start\")\n super().__init__()\n print(\"A end\")\n\nclass B(Father):\n\n def __init__(self):\n print(\"B start\")\n super().__init__()\n print(\"B end\")\n\nclass C(Father):\n\n def __init__(self):\n print(\"C start\")\n super().__init__()\n print(\"C end\")\n\nclass X(A,B):\n\n def __init__(self):\n print(\"X start\")\n super().__init__()\n print(\"X end\")\n\nclass Y(B, C):\n\n def __init__(self):\n print(\"Y start\")\n super().__init__()\n print(\"Y end\")\n\nclass P(X,Y, C):\n\n def __init__(self):\n print(\"P start\")\n super().__init__()\n print(\"P end\")\n\ne = P()\n\n\n# OUTPUT ::\n# P start\n# X start\n# A start\n# Y start\n# B start\n# C start\n# Father\n# C end\n# B end\n# Y end\n# A end\n# X end\n# P end\n"
},
{
"alpha_fraction": 0.41185078024864197,
"alphanum_fraction": 0.4162399470806122,
"avg_line_length": 25.288461685180664,
"blob_id": "542e6a10776d2dd9dcf6c1be358f8d0aa83d2d06",
"content_id": "43aec656d077fc6e37addf60da6d51b609e65a09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1367,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 52,
"path": "/chapter14_regular_expression/regular_expression.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''METHOD 1 (long way)'''\n\nimport re\nprog = re.compile(r'm\\w\\w')\nstr = 'cat mat bat rat'\nresult = prog.search(str)\nprint(result.group())\n\n# ------------------------------------------------------------------------------\n'''METHOD 2 (shortway)'''\nimport re\nstr = 'man sun map run'\nresult = re.search(r'm\\w\\w',str)\nprint(result)\nif result:\n print(result.group())\n\n# ------------------------------------------------------------------------------\n'''METHOD 3: findall() prints all matching values in list format'''\nimport re\nstr = 'man sun map run'\nresult = re.findall(r'm\\w\\w',str)\nprint(result)\n# OUTPUT\n# ['man', 'map']\n\n# ------------------------------------------------------------------------------\n'''method 4 : match'''\nimport re\nstr = 'man sun map run'\nresult = re.match(r'm\\w\\w',str)\nprint(result.group())\n\n# ------------------------------------------------------------------------------\n'''method 5 : split\n returns list of matached'''\nimport re\nstr = 'man sun map run'\nresult = re.split(r'm\\w+',str)\nprint(result)\n\n# ------------------------------------------------------------------------------\n'''method 6 : replace\n syntax:\n sub(regular_expression, new_string, string)\n'''\n\nimport re\nstr = 'man sun map run'\nresult = re.sub(r'map','women',str)\nprint(result)\n# ------------------------------------------------------------------------------\n"
},
{
"alpha_fraction": 0.4363979995250702,
"alphanum_fraction": 0.45891058444976807,
"avg_line_length": 20.75342559814453,
"blob_id": "69522739b922d6a4534ba719be100ad1f0134708",
"content_id": "59db9b65d53d2a2ac3c574fc08b5992afe200f75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6352,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 292,
"path": "/chapter9_dictionaries/dictionaries.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n dictionary handles key:value pair\n'''\n\ndict1 = {\"a\":\"ashish\", \"b\":\"testing\", 1:32, 2:\"passed\"}\n\n\n\n\n# ------------------------------------------------------------------------------\n'''\n accessing values of the dictionary\n'''\n\nprint(dict1[\"a\"])\nprint(dict1[1])\n\n\n# ------------------------------------------------------------------------------\n'''\n for loop in dictionary\n there are 2 type of forloop in the dictionary\n\n 1. for key in dictionaryName:\n 2. for key, value in dictionaryName.items():\n'''\n\n# type 1\nfor key in dict1:\n print(\"key : \", key)\n print(\"value : \", dict1[key])\n\n# type 2:\n\nfor key, value in dict1.items():\n print(\"key : \", key)\n print(\"value : \", value)\n\n\n# ------------------------------------------------------------------------------\n'''\n length of the dictionary means : number of (key,value) pairs\n'''\n\nprint(\"length of the dictionary :\", len(dict1))\n\n\n\n# ------------------------------------------------------------------------------\n\n'''\n checking membership\n incase of dictionary we can check if the key is a member or not\n we are not able to check the membership of the values\n'''\n\nprint(\"a\" in dict1)\n\nif \"c\" in dict1:\n print(\"found\")\nelse:\n print(\"not found\")\n\n\n# ------------------------------------------------------------------------------\n\n'''\n clearing data from dictionary\n'''\n\n# clear dictionary data\nprint(\"dict1 : \", dict1)\ndict1.clear()\nprint(\"dict1 : \", dict1)\n\n\n# ------------------------------------------------------------------------------\n'''\n copy old dictionary to new dictionary\n'''\ndict2 = {\"a\":\"ashish\", \"b\":\"testing\", 1:32, 2:\"passed\"}\ndict3 = dict2.copy()\n# for testing we will modify dict1 , and dict2 , the changes should not be\n# reflected in th oppposite dictionary\ndict2[1]=33\ndict3[1]=34\nprint(\"dict2 : \", dict2)\nprint(\"dict3 : \", dict3)\n\n\n# ------------------------------------------------------------------------------\n'''\n - create dictionary :\n - list is given\n - and final value : this can be string, digit or list, or anything.\n same will be placed as values for all the keys\n - new dictionary is created with list elements as keys and final value as its\n values\n\n'''\nkeysList = [1,2,3,4]\nfinal_value = [\"a\",\"b\"]\ndict4 = dict.fromkeys(keysList, final_value)\nprint(\"dict4 : \", dict4)\n# output:\n# dict4 : {1: ['a', 'b'], 2: ['a', 'b'], 3: ['a', 'b'], 4: ['a', 'b']}\n# ------------------------------------------------------------------------------\n\n\n'''\n get the value from dictionary\n\n syntax:\n n = dictionaryName.get(key,val1)\n\n note:\n if the key doesnt exist it will return val1\n'''\nn = dict4.get(1,\"key not found\")\nprint(n)\nn = dict4.get(11,\"key not found\")\nprint(n)\nn = dict4.get(1,['a','b'])\nprint(n)\nn = dict4.get(11,['a','b'])\nprint(n)\n\n\n# ------------------------------------------------------------------------------\n\n'''\n dictionary.items()\n returns the object containing the key value pair\n'''\ncheck = dict4.items()\nprint(check)\n\n\n\n# ------------------------------------------------------------------------------\n\n'''\n dictionary.keys()\n\n returns all the keys in a sequence\n'''\ncheck = dict4.keys()\nprint(check)\n\n\n# ------------------------------------------------------------------------------\n'''\n dictionaryName.values()\n\n returns all the values in the sequence format\n'''\ncheck = dict4.values()\nprint(check)\n\n\n# ------------------------------------------------------------------------------\n'''\n dict_a.update(dict_b)\n\n update one dictionary to another\n'''\ndict_a = {1:'a',2:'b',3:'c',4:'d'}\ndict_b = {5:'e',6:'f',7:'g',8:'h'}\n\ndict_a.update(dict_b)\n\nprint(\"dict_a : \", dict_a)\nprint(\"dict_b : \", dict_b)\n\n# ------------------------------------------------------------------------------\n\n'''\n dictionary.pop(key, return_this_if_not_found)\n\n note:\n 1. if the key is found, then key, value pair will be removed\n 2. it will return the value\n'''\n\ncheck = dict4.pop(4,\"not_found\")\nprint(\"check : \", check)\nprint(\"dict4 : \", dict4)\n\ncheck = dict4.pop(24,\"not_found\")\nprint(\"check : \", check)\nprint(\"dict4 : \", dict4)\n\n\n# ------------------------------------------------------------------------------\n\n'''\n dictionary.setdefault(key,value)\n\n Note :\n - if the key is found then it will return the value of the key from the\n dict and will not perform any taskabs\n - if key is not found then it will add the key value to it\n'''\nprint(\"dict4 : \" , dict4)\ncheck = dict4.setdefault(3,\"testing\")\nprint(\"check : \", check)\nprint(\"dict4 : \" , dict4)\ncheck = dict4.setdefault(5,\"testing\")\nprint(\"check : \", check)\nprint(\"dict4 : \" , dict4)\n\n# ------------------------------------------------------------------------------\n\n'''\n dictionary.update({key:value})\n\n note:\n - if key is found.\n its value will be updated\n - if key not found\n new key value pair will be added to dictionary\n'''\n\nprint(\"dict4 : \", dict4)\ndict4.update({3:\"ashish\"})\nprint(\"dict4 : \", dict4)\ndict4.update({66:\"ashish\"})\nprint(\"dict4 : \", dict4)\n\n# ------------------------------------------------------------------------------\n'''\n convert key list and value list to dictionary\n'''\nkeys_list = [1,2,3,4,5]\nvalue_list = ['a','b','c','d','e']\n\ndict5 = dict(zip(keys_list,value_list))\nprint(dict5)\n\n# output :\n# {1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e'}\n\n\n# ------------------------------------------------------------------------------\n'''\n converting a string to dictionary\n\n string = 'a=ashish,b=testing,1=32,2=passed'\n\n import json\n res = json.loads(test_string)\n'''\n\nimport json\ntest_string = '{\"Nikhil\" : 1, \"Akshat\" : 2, \"Akash\" : 3}'\nprint(\"The original string : \" + str(test_string))\nres = json.loads(test_string)\nprint(\"The converted dictionary : \", res)\n\n\n\n# ------------------------------------------------------------------------------\n'''\n passing dictionary to function\n\n'''\n\ndef test(dict):\n for k,v in dict.items():\n print(k,v)\nd = {1:2,3:4,5:6}\ntest(d)\n\n# ------------------------------------------------------------------------------\n\n'''\n ordered dictionary:\n\n from collections import OrderedDict\n d = OrderedDict()\n\n'''\n\n\nfrom collections import OrderedDict\nd = OrderedDict()\nd[1]=2\nd[2]=3\nd[3]=4\n\nfor k,v in d.items():\n print(k,v)\n"
},
{
"alpha_fraction": 0.6150081753730774,
"alphanum_fraction": 0.6476345658302307,
"avg_line_length": 16.514286041259766,
"blob_id": "7b2fa688fd6bf349089423176cbe5e16cdd4385b",
"content_id": "bb56161dec466d7b4cae5784de17c67a0167b672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 613,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 35,
"path": "/chapter20_abstract_and_interface/abstract_class.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "from abc import ABC\n\nclass MyClass(ABC):\n\n @abstractmethod\n def calculate(self,x):\n pass\n\nclass sub1(MyClass):\n\n def calculate(self, x):\n print(\"square : \", x*x)\n\n\nclass sub2(MyClass):\n\n def calculate(self, x):\n print(\"squareroot : \", math.sqrt(x))\n\nobj1 = sub1()\nobj1.calculate(10)\nobj2 = sub2()\nobj2.calculate(100)\n\n\n# 1. abstract class can have\n# 1. constructor\n# 2. concrete method\n# 3. abstract method\n#\n# 2. you cannot create object for abstract class\n#\n# 3. you can create objects of subclass (child class)\n#\n# 4. abstract method need to be defined in subclass\n"
},
{
"alpha_fraction": 0.4907975494861603,
"alphanum_fraction": 0.49284252524375916,
"avg_line_length": 17.80769157409668,
"blob_id": "a6a3a72c94b6538c2875b056c3bd1bd472b8ef67",
"content_id": "14154c9d32ab26238dd7fdc440bfec696ef8723e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 489,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 26,
"path": "/chapter12_files_in_python/zip_and_unzip.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "''' how to zip a file'''\n\nfrom zipfile import *\n\nf = ZipFile('test.zip','w', ZIP_DEFLATED)\nf.write(\"F3.txt\")\nf.write('testing_read_file.txt')\nf.close()\n\n\n# # ------------------------------------------------------------------------------\n\n\n''' how to unzip a file '''\n\nfrom zipfile import *\nz = ZipFile(\"test.zip\",'r')\nnames = z.namelist()\n\nfor file in names:\n f = z.open(file,\"r\")\n content=f.read()\n WR = open(file,'w')\n WR.write(content.decode())\n WR.close()\n f.close()\n"
},
{
"alpha_fraction": 0.6764705777168274,
"alphanum_fraction": 0.6764705777168274,
"avg_line_length": 22.799999237060547,
"blob_id": "9f1420bde0ea7e10e49d3509e520b9826719726a",
"content_id": "83863e27f01f6f88a04faff17cdcee7514b4fba0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 476,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 20,
"path": "/chapter3_input_statement/input_statements.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "\"\"\" while taking the input values from the users, we have a single syntax:\na = input(\"How are you\")\n\ninput is received only in the form of string\n\nonce received it can be converted to what we like :\n\na = int(input(\"Enter Your age : \"))\n\"\"\"\n\n\n\n\nfullname = input(\"Enter Your Name : \")\nprint(\"You have entered : {name}\".format(name=fullname))\n\nage = int(input(\"Enter your age : \"))\n\nprint(\"age type is : \", type(age))\nprint(\"you have entered your age as {age}\".format(age=age))\n"
},
{
"alpha_fraction": 0.6331360936164856,
"alphanum_fraction": 0.6525013446807861,
"avg_line_length": 24.81944465637207,
"blob_id": "33b9ddcd59d5b67f068106ff40497ee38c18b664",
"content_id": "5a2ec74a3dee018e918c711193cdf82b969fccd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1859,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 72,
"path": "/chapter17_classes_and_objects/old_class_objects_2.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Automobile:\n\n # this is class variable\n objectCount = 0\n\n # this is constructor and is used to initiate instance variable\n # it can also act upon class variable\n def __init__(self, type=\"land_Vehicle\", tyresCount = 4):\n Automobile.objectCount += 1\n self.type = type\n self.tyresCount = tyresCount\n\n def setCompany(self, company = \"maruti\"):\n self.company = company\n print(\"id(company) : \", id(company))\n print(\"id(self.company) : \", id(self.company))\n def setTyresCount(self, tyresCount = 4):\n self.tyresCount = tyresCount\n print(\"id(tyreCount) : \", id(self.tyresCount))\n def getCompany(self):\n return self.company\n def getTyresCount(self):\n return self.tyresCount\n\n def increaseTyre(self):\n self.tyresCount+=1\n print(\"id(self.tyre..) : \", id(self.tyresCount))\n\n\n # this is instance month\n def price(self):\n if self.tyres==1:\n return 1000\n elif self.tyres==2:\n return 2000\n elif self.tyres==3:\n return 3000\n else:\n return 4000\n\n @classmethod\n def classObjectCheck(cls):\n return cls.objectCount\n\n # static method\n @staticmethod\n def staticObjectCheck():\n return Automobile.objectCount\n\n# calling static method:\n# Classname.StaticMethod()\nAutomobile.staticObjectCheck()\n\n\n# you can call class variable directly as\n# Classname.variable\nprint(Automobile.objectCount)\n\n# you can call class methods directly as\n# Classname.methodname\nprint(Automobile.classObjectCheck())\n\n\n# creating the class object to start instance\ns1 = Automobile()\ntypreCount1 = 5\nprint(\"id(typreCount1) : \", id(typreCount1))\ns1.setTyresCount(typreCount1)\ns1.increaseTyre()\nprint(\"id(typreCount1) : \", id(typreCount1))\nprint(typreCount1)\nprint(s1.getTyresCount())\n"
},
{
"alpha_fraction": 0.4979757070541382,
"alphanum_fraction": 0.5364372730255127,
"avg_line_length": 13.969696998596191,
"blob_id": "a3258715f1c147f2bb281113513a1323c7df0f9d",
"content_id": "663ba45df2c693822f9d7302bdbee5b1c01038e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 33,
"path": "/chapter18_inheritance/inheritance_3_constructor_super_keyword.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Father:\n\n def __init__(self):\n self.property = 5000\n\n def display(self):\n print(\"Father : \", self.property)\n\n\nclass Son1(Father):\n\n def __init__(self):\n super().__init__()\n\n def display(self):\n print(\"Son1 : \", self.property)\n\n\nclass Son2(Father):\n\n def __init__(self):\n self.property = 10000\n\n\n\n def display(self):\n super().display()\n # print(\"Son2 : \", self.property)\n\ns1 = Son1()\ns1.display()\ns2 = Son2()\ns2.display()\n"
},
{
"alpha_fraction": 0.46341463923454285,
"alphanum_fraction": 0.5219511985778809,
"avg_line_length": 23.117647171020508,
"blob_id": "d5db4d451bf148738209ae3d83c16d2a56e5395c",
"content_id": "d6b85e680be4324667a5d563a9ba63475573477a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 17,
"path": "/chapter10_exceptions/custom_exception.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class MyException(Exception):\n def __init__(self,arg):\n self.msg=arg\n\n def check(dict):\n for k,v in dict.items():\n print('Name = {:15s} Balance = {:10.2f}'.format(k,v))\n if (v<2000.00):\n raise MyException(\"Balance is less\")\n\n\n bank = {'Raj':5000,'Vani':55000,'Ajay':1990}\n\n try:\n check(bank)\n except MyException as me:\n print(me)\n"
},
{
"alpha_fraction": 0.6248794794082642,
"alphanum_fraction": 0.6321118474006653,
"avg_line_length": 28.628570556640625,
"blob_id": "a9f12e45947a337c0f4200adb4ac30332c5fef07",
"content_id": "24d8a59d9421d7b8645a56dc705df951a708d212",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2074,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 70,
"path": "/chapter17_classes_and_objects/old_class_object3.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Tyres:\n # class level variables:\n objectCount = 0\n\n def __init__(self):\n pass\n\n def setTyreCount(self, tyreCount=4):\n self.TyreCount = tyreCount\n print(\"method : setTyreCount\")\n print(\"id(self.TyreCount) : \", id(self.TyreCount))\n print(\"value(self.TyreCount) : \", self.TyreCount)\n\n def getTyreCount(self):\n print(\"method : getTyreCount\")\n print(\"id(self.TyreCount) : \", id(self.TyreCount))\n print(\"value(self.TyreCount) : \",self.TyreCount)\n return self.TyreCount\n\n def increaseTyreCount(self):\n self.TyreCount += 1\n print(\"method : increaseTyreCount\")\n print(\"id(self.TyreCount) : \", id(self.TyreCount))\n print(\"value(self.TyreCount) : \",self.TyreCount)\n\n def setBrandNames(self, brandNames=[]):\n self.brandNames = brandNames\n print(\"method : setBrandNames\")\n print(\"id(self.brandNames) : \", id(self.brandNames))\n print(\"value(self.brandNames) : \",self.brandNames)\n\n def getBrandNames(self):\n print(\"method : getBrandNames\")\n print(\"id(self.brandNames) : \", id(self.brandNames))\n print(\"value(self.brandNames) : \",self.brandNames)\n return self.brandNames\n\n def increaseBrandNames(self, newBrandName):\n self.brandNames.append(newBrandName)\n print(\"method : increaseBrandNames\")\n print(\"id(self.brandNames) : \", id(self.brandNames))\n print(\"value(self.brandNames) : \",self.brandNames)\n\n\nbrandNames = [\"ford\", \"chev\"]\nprint(\"brandNames : \", brandNames)\nprint(\"id(brandNames) : \", id(brandNames))\n\ns2 = Tyres()\ns2.setBrandNames(brandNames)\n\ns2.increaseBrandNames(\"maruti\")\n\nprint(\"brandNames : \", brandNames)\nprint(\"id(brandNames) : \", id(brandNames))\n\n# tyreCount = 3\n# print(\"Value : \", tyreCount)\n# print(\"id(tyreCount) : \", id(tyreCount))\n#\n# s1 = Tyres()\n# s1.setTyreCount(tyreCount)\n# s1.increaseTyreCount()\n#\n# print(\"Value : \", tyreCount)\n# print(\"id(tyreCount) : \", id(tyreCount))\n#\n# t1 = s1.getTyreCount()\n# print(\"Value : \", t1)\n# print(\"id(t1) : \", id(t1))\n"
},
{
"alpha_fraction": 0.6441515684127808,
"alphanum_fraction": 0.6441515684127808,
"avg_line_length": 17.96875,
"blob_id": "9cb2677d194d19ef6878598d39337fc63cb8918f",
"content_id": "ddb22ba05013f49d2c9aa249eea58337614c1c16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 607,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 32,
"path": "/chapter20_abstract_and_interface/interface.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "from abc import ABC\n\nclass Database(ABC):\n\n def connect(self):\n pass\n\n def disconnect(self):\n pass\n\nclass Oracle(Database):\n\n def connect(self,input):\n print(\"Oracle Connect\")\n\n def disconnect(self,input):\n print(\"Oracle disconnect\")\n\nclass MSSQL(Database):\n def connect(self,input):\n print(\"MSSQL connect\")\n\n def disconnect(self,input):\n print(\"MSSQL disconnect\")\n\nprint(\"Connect to database : \")\nprint(\"MSSQL or Oracle\")\nstr = input(\"Enter your selection : \")\nclassname = globals()[str]\nx = classname()\nx.connect(\"ashish\")\nx.disconnect(\"arora\")\n"
},
{
"alpha_fraction": 0.4743025004863739,
"alphanum_fraction": 0.4747919738292694,
"avg_line_length": 19.43000030517578,
"blob_id": "8155c35e9403833f0a8e038e8c6076e0ab3f660b",
"content_id": "add3f1986a2e13a159b89d7444229b4ae4bea7b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2043,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 100,
"path": "/chapter16_database/database.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n INSTALL FOLLOWING\n syntax :\n pip install mysql-connector-python\n'''\n\n# ------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------\n''' fetch ONE ROW AT A TIME from database '''\n\nimport mysql.connector\n\nconn = mysql.connector.connect(\n host = 'localhost',\n database = 'urbanclap',\n user = 'root',\n password = 'admin'\n )\n\nif conn.is_connected():\n print(\"connected to mysql\")\n\ncursor = conn.cursor()\n\ncursor.execute(\"select * from uc\")\n\n### getting one row at a time\nrow = cursor.fetchone()\n\nwhile row is not None:\n print(row)\n row = cursor.fetchone()\n\ncursor.close()\nconn.close()\n\n\n# ------------------------------------------------------------------------------\n\n'''\n fetch ALL ROWS at one time\n'''\n\n\nimport mysql.connector\n\nconn = mysql.connector.connect(\n host = 'localhost',\n database = 'urbanclap',\n user = 'root',\n password = 'admin'\n )\n\nif conn.is_connected():\n print(\"connected to mysql\")\n\ncursor = conn.cursor()\n\ncursor.execute(\"select * from uc\")\n\n### getting one row at a time\nrows = cursor.fetchall()\n\nprint(\"row count : \", cursor.rowcount)\n\nfor row in rows:\n print(row)\n\ncursor.close()\nconn.close()\n\n# # ------------------------------------------------------------------------------\n#\n# ''' INSERT STATEMENT '''\n#\nimport mysql.connector\n\nconn = mysql.connector.connect(\n host = 'localhost',\n database = 'urbanclap',\n user = 'root',\n password = 'admin'\n )\n\nif conn.is_connected():\n print(\"connected to mysql\")\n\ncursor = conn.cursor()\n\ntry:\n str = 'INSERT INTO test (test) VALUES (3)'\n cursor.execute(str)\n conn.commit()\n print(cursor.rowcount, \"record inserted.\")\nexcept:\n conn.rollback()\n print(\"failed\")\n\ncursor.close()\nconn.close()\n"
},
{
"alpha_fraction": 0.5828343033790588,
"alphanum_fraction": 0.628742516040802,
"avg_line_length": 11.219511985778809,
"blob_id": "eb6fac97270743d901a5e6a5eb34a338ffbe98bc",
"content_id": "a0557ee275a3a3e3119b34999ea93669927d979f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 41,
"path": "/chapter17_classes_and_objects/class_objects_5_class_variable.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Sample:\n\n # class variables\n x = 10\n\n @classmethod\n def modify(cls):\n cls.x +=1\n\n def display(cls):\n print(cls.x)\n\n\ns1 = Sample()\ns2 = Sample()\n\n\ns1.display()\ns2.display()\n\ns1.modify()\n\ns1.display()\ns2.display()\n\ns2.modify()\n\ns1.display()\ns2.display()\n\n\n# this action is happeing for object s1 level only\n# the memory holded by s1.x is been operated with +=\n# so this will not change s2.x value\ns1.x+=1\nprint(s1.x)\nprint(s2.x)\n\ns2.modify()\ns1.display()\ns2.display()\n"
},
{
"alpha_fraction": 0.569198727607727,
"alphanum_fraction": 0.602497398853302,
"avg_line_length": 16.796297073364258,
"blob_id": "afbc6265efa4fbc701d914cd34ac2ed52b1437a6",
"content_id": "38204400d22e87352d58adfb6140168f2cce5fff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1922,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 108,
"path": "/chapter8_list_and_tuple/list.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''list vs array\n\n list: can have elements on any datatype (mixer)\n array : all elements in arrays are of same datatype\n\n'''\n\na = []\nprint(a)\n\n\na = [10,20,30,'as','b']\nprint(a)\n\n\n'''\n creating list from range\n'''\na = list(range(1,10,2))\nprint(a)\n\n\n\n''' how to add elements to list'''\na.append(\"ashish\")\n\n\n'''slicing'''\nprint(a[1:10:2])\n\n\n'''concaatenation of list'''\na = [1,2,3,4,5]\nb = ['a','b','c','d','e']\nc = a+b\nprint(c)\n\n\n'''repetition'''\nprint(c*2)\n\n\n'''membership in list'''\nprint(\"a\" in a)\n# output : True or False\n\n\nlist = [1,2,3,4,5,'a','b','c','d','e']\nprint(list.index('a'))\n# returns the index number of the element of found and if not found it returns error\n\nlist.insert(5,6)\nprint(list)\n# inserts element \"6\" at the index 5\n\nnew_list = list.copy()\n# copys the list to new list\n# no relation in both after creation\n\nnew_list.extend(list)\n# adds list elements to new_list\n\nlist.count(\"a\")\n# returns the number of occurance of element \"a\" on the list\n# if element is not found then value 0 is returned\n\nlist.remove('a')\n# removes the element a from list\n# removes only the first occurence element of the list\n# looks from left to right\n\nlast_element = list.pop()\n# removes the last element of the list\n\n#list.sort()\n# list is sorted in the ascending order\n\nlist.reverse()\n# reverse the order of the elements\n\nlist.clear()\n# clears all the elements of the list\n\n\n# n1 = max(list)\n# n2 = min(list)\n\n\n# ------------------------------------------------------------------------------\n'''finding common elements in between 2 list'''\nl1 = [1,2,7,6,5,1,2]\nl2 = [5,6,7,8,9,5,5]\n\n# change element to set() to get unique elements\ns1 = set(l1)\ns2 = set(l2)\n\n# get intersection elements\ns3 = s1.intersection(s2)\nprint(s3)\n# common_elements = list(s3)\n# print(common_elements)\n\n\n\n# ------------------------------------------------------------------------------\nlist = [x for x in range(1,10,1) if x%2==0]\nprint(list)\n"
},
{
"alpha_fraction": 0.5829787254333496,
"alphanum_fraction": 0.6281914710998535,
"avg_line_length": 16.24770736694336,
"blob_id": "ea277a4ec091b6abab73e41fa29f0599967410bc",
"content_id": "86dffec93b6efed3aa43afe3678f80fad9e74d41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5640,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 327,
"path": "/chapter5_arrays/np.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''numpy arrays are called n dimensional arrays where n=dimension'''\n\nimport numpy as np\n\n# 1 demensional\narr = np.array([1,2,3])\nprint(arr)\n\n# 2 demensional\narr = np.array([[1,2,3],[4,5,6]])\nprint(arr)\n\n\n\n'''creating numpy array while declaring the datatype'''\narr = np.array([1,2,3], int)\nprint(arr)\n\n\n\n'''\n numpy array as linspace\n creates array with evenly spaced points\n syntax : array = numpy.lispace(start, end, n)\n'''\narr = np.linspace(1,10,2)\nprint(arr)\narr = np.linspace(0,10,5)\nprint(arr)\n\n\n\n''' numpy with logspace\n evenly spaces points on a logaithmically spaced scale\n syntax : numpy.logspace(start, end, n)\n '''\n\narr = np.logspace(1,10,2)\nprint(arr)\n\n\n'''numpy with arange,\n works same as range\n syntax : numpy.arange(start,end,stepsize)\n '''\narr = np.arange(1,10,2)\nprint(arr)\n\n''' create numpy with zeros\n syntax : numpy.zeros(n,datatype)\n datatype : int or float\n '''\narr = np.zeros(10, int)\nprint(arr)\narr = np.zeros(10, float)\nprint(arr)\n\n''' create array with ones\n syntax : numpy.ones(n, datatype)\n datatype : int or float\n'''\narr = np.ones(10, int)\nprint(arr)\narr = np.ones(10, float)\nprint(arr)\n\n\n''' OPERATORS\n mathematical operators work on numpy'''\na = np.array([1,2,3,4,5,6,7,8,9,10])\nb = np.array([11,12,13,14,15,16,17,18,19,20])\nc = a+b\nprint(c)\nc = a-b\nprint(c)\nc = a/b\nprint(c)\nc = a*b\nprint(c)\n\n''' more operators'''\nprint(np.sin(c))\nprint(np.cos(c))\nprint(np.tan(c))\nprint(np.arcsin(c))\nprint(np.arccos(c))\nprint(np.arctan(c))\nprint(np.log(c))\n\n# absolut value\nprint(np.abs(c))\nprint(np.sqrt(c))\n#print(np.pow(c,2))\nprint(np.exp(c))\n\n#sum of all the elements of array\nprint(np.sum(c))\n# prod of all elements of the array\nprint(np.prod(c))\n# returns smallest element\nprint(np.min(c))\n# returns largest element\nprint(np.max(c))\n# returns avaerage of all the elements\nprint(np.mean(c))\n# returns the median values (center value of the array when its in sorted format)\nprint(np.median(c))\n# returns the variance\nprint(np.var(c))\n# returns the convariance\nprint(np.cov(c))\n# returns standard deviation\nprint(np.std(c))\n# returns the index of min values of the array\nprint(np.argmin(c))\n# returns the index of max values of the array\nprint(np.argmax(c))\n# returns unique elements on the array in list format\nprint(np.unique(c))\n# returns the array in sorted format\nprint(np.sort(c))\n\n\n\n\n'''COMPARING THE ARRAYS:\n when the arrays are compared the output is in True False format\n'''\na = np.array([1,2,3,4,5,6,7,8,9,10])\na = np.array([11,2,13,4,15,6,17,8,19,10])\nc = a==b\nprint(c)\n# output : [ True False True False True False True False True False]\n\nc = a>b\nprint(c)\n\nc = a>=b\nprint(c)\n\nc = a<b\nprint(c)\n\nc = a<=b\nprint(c)\n\nc = a!=b\nprint(c)\n\n\n\n\n\n''' where clause\n if we have a condition which we need to check on every element of the array.\n if condition is meant true. then expression of true is printed\n else false expression is printed\n\n syntax : a = where(condition, true_expression, false_expression)\n'''\n\na = np.array([1,2,3,4,5,-6,-7,-8,-9,-10])\nfrom numpy import where\nc = where( a>=1,\n \"found\",\n \"not_found\")\nprint(c)\n# output:\n# ['found' 'found' 'found' 'found' 'found' 'not_found' 'not_found'\n# 'not_found' 'not_found' 'not_found']\n\n\n\n\n'''\n shallow copy:\n syntax : b = a.view()\n which means that when b is modified, a will also be updated\n'''\nfrom numpy import *\na = np.arange(1,10,2)\nb = a.view()\nprint(a)\nprint(b)\nb[2]=10\nprint(a)\nprint(b)\n\n\n\n'''\n deep copy:\n syntax : b = a.copy()\n means when b is modified , a is not effect\n'''\nfrom numpy import *\na = np.arange(1,10,2)\nb = a.copy()\nb[2]=10\nprint(a)\nprint(b)\n\n\n'''FIND DIMENSIONS\n syntax : arr.ndim\n'''\narr = array([1,2,3,4])\nprint(arr.ndim)\narr = array([[1,2,3,4],[5,6,7,8]])\nprint(arr.ndim)\n\n\n'''\n shape\n syntax : arr.shape\n will give the exact shape (row, column)\n'''\na = array([[1,2,3],[4,5,6]])\nprint(a.shape)\n# output : (2,3)\n# 2 rows and 3 columns\n\n\n'''\n total number of elements in array\n syntax : arr.size\n'''\na = array([[1,2,3],[4,5,6]])\nprint(a.size)\n\n\n'''\n total itemsize of the array in bytes\n syntax : arr.itemsize\n'''\narr = array([[1,2,3],[4,5,6]])\nprint(arr.itemsize)\n\n\n'''\n datatype\n synatx : arr.dtype\n'''\na = array([[1,2,3],[4,5,6]])\nprint(a.dtype)\n\n\n''' reshape the dimensions of the array\n syntax :\n arr.reshape(rows, columns)\n\n in reshape new array is created and reshaped structure and old array remains same\n'''\narr = array([1,2,3,4,5,6,7,8,9,10])\narr1 = arr.reshape(2,5)\nprint(arr1)\nprint(arr)\n\n\n\n''' flatten the multi-dimensional array\n syntax :\n arr.flatten()\n'''\narr = array([[1,2,3,4,5],[6,7,8,9,10]])\narr1 = arr.flatten()\nprint(arr1)\n\n\n'''\n ones,zeros, eye for multi dimensions\n np.ones((r,c),int))\n np.zeros((r,c),int))\n np.eye(r))\n\n int or float\n'''\n\nprint(np.ones((2,3),int))\nprint(np.zeros((2,3),int))\nprint(np.eye(3))\n\n\n''' slicing\n syntax :\n array[start:end:stepwise , start:end:stepwise]\n array[row_start:row_end:row_stepwise , col_start:col_end:col_stepwise]\n'''\narr = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])\nprint(arr[1:3,2:4])\n\n\n''' get diagonal value of the matrix\n syntax : diagonal(arr)\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n 13 14 15 16\n\n answer is [1,6,11,16]\n'''\nprint(diagonal(arr))\n\n\n\n\n\n\n''' TRANSPOSE:\n syntax :\n arr.transpose()\n'''\nt = arr.transpose()\nprint(t)\n\n\n\n'''\n random number generator'''\n\n# random number generated as single element\nprint(random.rand())\n\n# random.rand(row, column)\n\nprint(random.rand(5))\nprint(random.rand(5,2))\n"
},
{
"alpha_fraction": 0.5117924809455872,
"alphanum_fraction": 0.5377358198165894,
"avg_line_length": 16.66666603088379,
"blob_id": "ec8697214bc004959846a78673d8c38f7daf77ca",
"content_id": "8f6afd9d67ec405f79245a6d08579ca0cca3e40a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 424,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 24,
"path": "/chapter17_classes_and_objects/class_objects_8_pass_class_object_as_parameter.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Emp:\n\n def __init__(self,id,name,salary):\n self.id = id\n self.name = name\n self.salary = salary\n\n def display(self):\n print(\"ID : \", self.id)\n print(\"Name : \", self.name)\n print(\"Salary : \", self.salary)\n\n\nclass MyClass:\n\n @staticmethod\n def mymethod(e):\n e.salary += 1000\n e.display()\n\n\ne = Emp(10, \"Ashish\", 10000)\ne.display()\nMyClass.mymethod(e)\n"
},
{
"alpha_fraction": 0.5991902947425842,
"alphanum_fraction": 0.5991902947425842,
"avg_line_length": 29.875,
"blob_id": "39916516532abf17a6e9c33d6fd522db3c929f59",
"content_id": "decc863e21697ccc5d00cbf7d298073caeaff913",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 16,
"path": "/chapter1_basics/docstring.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Person:\n ''' in this file we will learn how to create a docstring\n docstring means that we need to get the html format of this triple quoted lines\n\n command for executing:\n python -m pydoc -w filename\n\n we will only enter filename and not the extension.\n example\n python -m pydoc -w abc --> correct\n python -m pydoc -w abc.py --> incorrect\n\n '''\n def __init__(self, name, age):\n self.name = name\n self.age = age\n"
},
{
"alpha_fraction": 0.592121958732605,
"alphanum_fraction": 0.6073697805404663,
"avg_line_length": 14.134614944458008,
"blob_id": "54e25b6b77e22583fa092f91207f061c451430a4",
"content_id": "97127e12465fafd437e4b8f68b21089cb2220257",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 787,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 52,
"path": "/chapter4_control_statements/control_statement.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "name = \"ashish\"\n\nif name == \"arora\":\n print(True)\nelif name == \"ashish\":\n print(False)\nelse:\n print(\"None\")\n\n\n\"\"\"\nelse block in while loop and forloop will only execute when the complete blocks without break\n\"\"\"\n\ni = 10\nwhile i>0:\n print(\"hello\")\n if i==5:\n break\n i-=1\nelse:\n print(\"else block\")\n\n\n\"\"\"\nelse block in while loop and forloop will only execute when the complete blocks without break\n\"\"\"\n\ni = 10\nfor j in range(i):\n print(j)\n if j==5:\n break\nelse:\n print(\"else block\")\n\n\n\"\"\"continue will continue the loop , except that loop. else block will be executed \"\"\"\n\nfor j in range(10):\n if j == 5:\n continue\n print(j)\nelse:\n print(\"this is else block\")\n\n\n\na = \"ashish1\"\n\nassert a==\"ashish\", \" wrong name\"\nprint(\"done\")\n"
},
{
"alpha_fraction": 0.5717171430587769,
"alphanum_fraction": 0.5757575631141663,
"avg_line_length": 18.799999237060547,
"blob_id": "aba849d607ed13ae26ac02bc728b92cdca182386",
"content_id": "a7626e0735058f91f4f616ef0cb7266f350e8df6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 25,
"path": "/chapter17_classes_and_objects/class_objects_6_getter_setter.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Student:\n\n def setName(self,name):\n self.name = name\n\n def getName(self):\n return self.name\n\n def setMarks(self,marks):\n self.marks = marks\n\n def getMarks(self):\n return self.marks\n\nn = int(input(\"Enter the number of students : \"))\ni = 0\nwhile (i<n):\n s = Student()\n s.setName(input(\"enter name : \"))\n s.setMarks(int(input(\"enter marks : \")))\n\n print(\"Name Entered : \", s.getName())\n print(\"marks Entered : \", s.getMarks())\n\n i+=1\n"
},
{
"alpha_fraction": 0.697604775428772,
"alphanum_fraction": 0.7065868377685547,
"avg_line_length": 19.875,
"blob_id": "817950096138418ac82ea96b621fc22c01be3f29",
"content_id": "26a88f35846d97729dd6db49be9bf80eab219cc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 16,
"path": "/chapter1_basics/is_operator.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "\"\"\"is operator is used for checking the memory address of 2 variables are same or not.\nif the address of the memory is same then it returning True else False\n\nin order to check the addess of the memory for any variable. we do\nid(variable)\n\"\"\"\n\na = 10\nb = a\nif b is a:\n print(True)\nelse:\n print(False)\n\nprint(id(a))\nprint(id(b))\n"
},
{
"alpha_fraction": 0.6812499761581421,
"alphanum_fraction": 0.706250011920929,
"avg_line_length": 19,
"blob_id": "a21e1f1c7b1ef253e25e9a32c8f2c961ae665f99",
"content_id": "8472f5867557d4b52cac8225b9f760713f160a00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 8,
"path": "/chapter1_basics/how_to_create_pyc_file.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "\"\"\"in this section we will understand how to\ncreate pyc file.\ncommand creating the pyc file is :\npython -m py_compile file_name.py\"\"\"\n\na = 10\nb = 20\nprint(a+b)\n"
},
{
"alpha_fraction": 0.45791372656822205,
"alphanum_fraction": 0.4852745532989502,
"avg_line_length": 19.320463180541992,
"blob_id": "2a36da54af24e0681107cc30426cbad7c0614650",
"content_id": "aabb11efb7d082a5a87ac7be699a00c3df9ae812",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5263,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 259,
"path": "/chapter7_functions/functions.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n difference between function and method\n\n a function can be writting independently as a python program\n called by its name\n\n method:\n when a function is written inside the class, then it becomes method\n\n called as :\n 1. classname.methodname()\n 2. objectname.methodname()\n'''\n\n#-------------------------------------------------------------------------------\n'''\n defining function\n\n syntax:\n def functionname(attributes_passing):\n logic\n return\n'''\ndef function_testing(name):\n string=\"my name is \"+name\n return string\n\nprint(function_testing(\"ashish\"))\n\n\n#-------------------------------------------------------------------------------\n'''function inside another function'''\n\ndef display(str):\n def msg():\n return \"How are you\"\n return str+msg()\nprint(display(\"ashish\"))\n\n\n#-------------------------------------------------------------------------------\n'''function having function as parameter'''\n\ndef display1(fun):\n return fun()+\" how are you\"\ndef msg():\n return \"ashish arora\"\nprint(display1(msg))\n\n\n# ------------------------------------------------------------------------------\n''' function returning another function'''\ndef display2():\n def msg():\n return \"how are you \"\n return msg()\n\nprint(display2())\n\n#-------------------------------------------------------------------------------\n'''pass by object reference'''\n\ndef testing_reference(a,b=10):\n print(\"------------------\")\n print(\"id(a) : \", id(a))\n print(\"id(b) : \", id(b))\n\na = 10\nb = 20\nprint(\"id(a) : \", id(a))\nprint(\"id(b) : \", id(b))\ntesting_reference(a,b)\n\n# output:\n# id(a) : 1627811120\n# id(b) : 1627811440\n# ------------------\n# id(a) : 1627811120\n# id(b) : 1627811440\n\n\n# but in case of list, refence doesnt work\ndef testing_list_reference(a):\n print(\"------------------\")\n print(\"id(a) : \", id(a))\n\na = ['10','20']\nprint(\"id(a) : \", id(a))\ntesting_reference(a)\n\n\n#-------------------------------------------------------------------------------\n'''\nArguments are of 4 types:\n 1. positional arguments\n 2. keywords argument\n 3. default arguments\n 4. variable arguments\n'''\n\n# positional argument:\ndef msg1(a, b):\n print(a, b)\n\nmsg1(\"a\", \"b\")\n\n# keyword argument\ndef msg3(a, b):\n print(a,b)\n\nmsg3(a=\"a\",b=\"b\")\n\n# default argument\ndef msg4(a=\"a\",b=\"b\"):\n print(a,b)\n\nmsg4()\n\n\n# variable length argument\ndef msg5(farg, *args):\n print(farg)\n for i in args:\n print(i)\nmsg5(1,2,3,4)\n\n# variable argument passed as normal but received as dictionary\ndef msg6(farg, **kwargs):\n print(farg)\n\n for x,y in kwargs.items():\n print(x,y)\n\ndict = {'a':'a','b':'b','c':'c'}\nmsg6(\"ashish\",a='a',b='b',c='c')\n\n\n#-------------------------------------------------------------------------------\n'''local variable'''\n\na = 1 # global variable\nprint(a)\nprint(\"outside : \", id(a))\ndef msg7():\n b=1 # local variable\n a=2 # this is local variable not global\n # you cannot modify global variable inside function directly\n\n print(\"inside : \",a)\n print(\"inside id : \",id(a))\n\nmsg7()\nprint(a)\nprint(\"outside : \", id(a))\n\n\n\n#-------------------------------------------------------------------------------\n\n''' global variable which can be modified inside function'''\n\na = 1\ndef msg8():\n global a\n a = 2\nmsg8()\n# this will modify the variable a at gloabl level\n\n\n\n#-------------------------------------------------------------------------------\n'''recursion'''\ndef msg9(num):\n print(num)\n if num>0:\n msg9(num-1)\n else:\n return\nmsg9(5)\n\n\n\n#-------------------------------------------------------------------------------\n''' lambda Function'''\n\n# normal function\ndef msg10(x):\n return x**2\nprint(msg10(2))\n\n\n# lambda function\nf = lambda x:x**2\nprint( f(5) )\n\n\n'''\nhow does this work:\nfunction name is replaced by lambda : msg10 --> lambda\nparameters are placed as it is after lambda : msg10(x)--> lambda x\nreturn expression is placed after ':' --> lambda x:x**2\n'''\ndef msg11(x, y):\n return x**y\nprint(msg11(2,3))\n\n\n# lambda function\nf = lambda x, y:x**y\nprint( f(2,3) )\n\n\n#-------------------------------------------------------------------------------\n''' how lambda function reduces code'''\n\ndef is_even(x):\n if x%2==0:\n return True\n return False\n\nlst = [1,2,3,4,5,6,7,8,9]\n\nlst = list(filter(is_even,lst))\nprint(lst)\n\n\n'''this complete code can be simplfied in one line'''\nlst = [1,2,3,4,5,6,7,8,9]\nlst = list(filter(lambda x:x%2==0,lst))\nprint(lst)\n\n\n#-------------------------------------------------------------------------------\n'''\n function decorator\n accepts function as argument and returns a function\n'''\ndef msg12(fun):\n def inner():\n value = fun()\n return value+2\n return inner()\n\ndef msg13():\n return 10\n\nresult = msg12(msg13)\nprint(result)\n\n\n\n# ------------------------------------------------------------------------------\n'''special variable : __name__\n this special variabl is created by python automaticallyait tells where the python program is executed\n\n if the program is executed directly then it will contain the value '__main__'\n or\n if the program is executed through module then it will contains module name\n '''\n"
},
{
"alpha_fraction": 0.6133121848106384,
"alphanum_fraction": 0.6180665493011475,
"avg_line_length": 22.370370864868164,
"blob_id": "0448638cb231cac2ff7fa30401e4fe532c02166c",
"content_id": "6c9d384f32fb77185ec599aaabcda272f8387191",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 631,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 27,
"path": "/chapter10_exceptions/exceptions.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n try:\n - Code under scan\n except:\n - if there is error then this will be executed.\n - we can have multiple except block\n - madatory block\n else:\n - this block will only be executed if no error occurs\n - not madatory block\n finally:\n - this block is executed no matter error comes or not\n - not madatory block\n\n'''\n\ntry:\n print(\"this is try block\")\n print(10/0)\nexcept ArithmeticError:\n print(\"this is except block\")\nexcept Exception:\n print(\"this is except block\")\nelse:\n print(\"this is else block\")\nfinally:\n print(\"this is finally block\")\n"
},
{
"alpha_fraction": 0.5064694881439209,
"alphanum_fraction": 0.5656192302703857,
"avg_line_length": 17.65517234802246,
"blob_id": "98311e84d8faf8fb36a530b7ec3415f839a5947c",
"content_id": "af0ab62ed9e5c99cf069be4fed90010bfb2113be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 541,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 29,
"path": "/chapter17_classes_and_objects/class_objects_3.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Students:\n def __init__(self,n=\"A\",m=0):\n self.name = \"Vishnu\"\n self.age = n\n self.marks = m\n\n def talk(self):\n print(\"Hii i am \",self.name)\n print(\"my age is \", self.age)\n print(\"My marks \",self.marks)\n\ns1 = Students(\"ashsih\",30)\ns2 = Students(\"abhishek\",34)\ns2.name = \"Varun\"\ns2.age = 17\ns2.marks = 950\ns2.talk()\ns1.talk()\n\n# OUTPUT\n# Hii i am Vishnu\n# my age is 20\n# My marks 900\n# Hii i am Varun\n# my age is 17\n# My marks 950\n# Hii i am Vishnu\n# my age is 20\n# My marks 900\n"
},
{
"alpha_fraction": 0.4473067820072174,
"alphanum_fraction": 0.46370023488998413,
"avg_line_length": 16.79166603088379,
"blob_id": "7158e1e0bbde5a1dc99d6024c7f5ac404c7d46e2",
"content_id": "d2bd98f9d6ede6441f838c4ab0f348347c227f20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 427,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 24,
"path": "/chapter17_classes_and_objects/class_objects_9_inner_class.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Person:\n\n def __init__(self):\n self.name = \"Ashish\"\n self.db = self.DOB()\n\n def display(self):\n print(\"Name : \", self.name)\n\n class DOB:\n\n def __init__(self):\n self.dd = 10\n self.mm = 5\n self.yy = 1988\n\n def display(self):\n print('DOB = {}/{}/{}'.format(self.dd, self.mm, self.yy))\n\n\np = Person()\np.display()\nu = p.DOB()\nu.display()\n"
},
{
"alpha_fraction": 0.5606468915939331,
"alphanum_fraction": 0.5687331557273865,
"avg_line_length": 34.33333206176758,
"blob_id": "beac4e81d839a215b6b6228a6159497f552f1ef3",
"content_id": "fa77150758e211bb4fbc74851ad1970e93aee303",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1484,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 42,
"path": "/chapter14_regular_expression/difflib.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n this is for getting the close matches\n\n example:\n we have list :\n possibilities = [\"ape\", \"apple\", \"peach\", \"puppy\"]\n\n and we have a word \"appel\"\n\n output = get_close_matches(word, possibilities, n=3, cutoff=0.6)\n\n output = ['apple', 'ape']\n\n get_close_matches(word, possibilities, n=3, cutoff=0.6)\n Use SequenceMatcher to return list of the best \"good enough\" matches.\n\n word is a sequence for which close matches are desired (typically a\n string).\n\n possibilities is a list of sequences against which to match word\n (typically a list of strings).\n\n Optional arg n (default 3) is the maximum number of close matches to\n return. n must be > 0.\n\n Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities\n that don't score at least that similar to word are ignored.\n\n The best (no more than n) matches among the possibilities are returned\n in a list, sorted by similarity score, most similar first.\n\n >>> get_close_matches(\"appel\", [\"ape\", \"apple\", \"peach\", \"puppy\"])\n ['apple', 'ape']\n >>> import keyword as _keyword\n >>> get_close_matches(\"wheel\", _keyword.kwlist)\n ['while']\n >>> get_close_matches(\"Apple\", _keyword.kwlist)\n []\n >>> get_close_matches(\"accept\", _keyword.kwlist)\n ['except']\n\n'''\n"
},
{
"alpha_fraction": 0.5381355881690979,
"alphanum_fraction": 0.5593220591545105,
"avg_line_length": 12.882352828979492,
"blob_id": "18cb337e897abd0e14d7de4a47c7274d0576b701",
"content_id": "23c574fcc9a92fa9adb0a4f8684bbfef5f0ff104",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 236,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 17,
"path": "/chapter17_classes_and_objects/class_objects_7_static_method.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class MyClass:\n\n n=0\n\n def __init__(self):\n MyClass.n += 1\n\n @staticmethod\n def no_of_object():\n print(\"No of object : \", MyClass.n)\n\n\nobj1 = MyClass()\nobj2 = MyClass()\nobj3 = MyClass()\n\nMyClass.no_of_object()\n"
},
{
"alpha_fraction": 0.4800589084625244,
"alphanum_fraction": 0.48913976550102234,
"avg_line_length": 24.545454025268555,
"blob_id": "fa0dda77e6b10c25a1070191968fbeff6526bba9",
"content_id": "23837cc59172b84d4d7f4eeae8e20566b5ebfdca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8149,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 319,
"path": "/chapter6_strings/strings.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''there are 4 ways to handle string,\n and when we say handle we mean by single quote double and all abs\n'''\na1 = 'ashish arora'\na2 = \"ashish arora\"\na3 = '''ashish\n arora'''\na4 = \"\"\"ashish\n arora\"\"\"\na5 = r'ashish \\ arora'\n\nprint(\"a1 : \", a1)\nprint(\"a2 : \", a2)\nprint(\"a3 : \", a3)\nprint(\"a4 : \", a4)\nprint(\"a5 : \",a5)\n\n# OUTPUT\n# a1 : ashish arora\n# a2 : ashish arora\n# a3 : ashish\n# arora\n# a4 : ashish\n# arora\n# a5 : ashish \\ arora\n\n\n# ------------------------------------------------------------------------------\n\n''' ESCPAE CHARACTERS'''\n # ESCAPE CHARACTERS\n # \\a : bell or alert\n # \\b : backspace\n # \\n : newline\n # \\t : horizontal tab space\n # \\v : vertial tab space\n # \\r : Enter Button\n # \\x : character x\n # \\\\ : display single \\\nprint(\"bell or alert: \\a\")\nprint(\"backspace: \\b\")\nprint(\"newline: \\n\")\nprint(\"horizontal tab space: \\t\")\nprint(\"vertial tab space: \\v\")\nprint(\"Enter Button: \\r\")\nprint(\"display single slash: \\\\\")\n\n\n# ------------------------------------------------------------------------------\n''' create a string by unicode'''\na = u'\\u0915\\u094b\\u0930'\nprint(a)\n\n\n# ------------------------------------------------------------------------------\n'''length of string'''\na = \"ashish arora\"\nprint(len(a))\n\n\n# ------------------------------------------------------------------------------\n'''\n slicing in strings\n stringName[start:stop:stepsize]\n'''\na = \"ashish arora\"\nprint(a[1:6:2])\n\n# ------------------------------------------------------------------------------\n''' repeating the strings'''\nprint(\"abc\"*3)\n\n\n# ------------------------------------------------------------------------------\n''' concatenation in string\n concatenation doesnot add any extra spaces\n'''\n\na,b = \"ashish\", \"arora\"\nc = a+b\nprint(c)\n\n\n# ------------------------------------------------------------------------------\n'''checking membership\n returns True or False'''\na = \"ashish arora\"\nb = 'sh'\nif b in a:\n print(\"found\")\nelse:\n print(\"not found\")\n\nc = b in a\nprint(c)\n\n# ------------------------------------------------------------------------------\n'''\n comparing 2 or more strings\n comparsion happens on the basis of the english dictionary order\n comparsion operators:\n >,>=,<,=<,==,!=\n'''\na = \"ashish\"\nb = \"ASHISH\"\nif a==b:\n print(\"same\")\nelse:\n print(\"different\")\n\n# ------------------------------------------------------------------------------\n'''\nremoving the extra spaces from the strings\nthis can be removed from\nstring.lstrip() : left side of string,\nstring.rstrip() : right side of the string\nstring.strip() : both sides of the string\n'''\na = \" ashish arora \"\nprint(a.strip())\na = \" ashish arora \"\nprint(a.rstrip())\nprint(a.lstrip())\n\n\n# ------------------------------------------------------------------------------\n''' find in strings\n front search :\n find() : gives index number or -1 if not found\n index() : gives index number or error if not found\n backward search:\n rfind() : gives index number or -1 if not found\n rindex() : gives index number or error if not found\n\n syntax :\n result = mainString.find(substring, startIndex, endIndex)\n result = mainString.index(substring, startIndex, endIndex)\n result = mainString.rfind(substring, startIndex, endIndex)\n result = mainString.rindex(substring, startIndex, endIndex)\n'''\nmain_string = \"ashish arora how are you\"\nsub_string = \"aro1ra\"\nresult = main_string.find(sub_string)\nprint(result)\n\n\n\n# ------------------------------------------------------------------------------\n''' count the number of occurence of substring in string\n syntax:\n result = mainString.count(substring)\n\n result gives zero if not found\n'''\nmainString = \"ashish\"\nsubString = \"is\"\nprint(mainString.count(subString))\n\n\n# ------------------------------------------------------------------------------\n''' string are immutiable\n means :\n string = \"ashish\"\n string[0]=\"U\"\n\n this is not possible\n'''\n\n# ------------------------------------------------------------------------------\n'''replacing text in the string\n syntax :\n newString = mainString.replace(oldtext, newtext)\n'''\nmainString = \"ashsih\"\nold_text='si'\nnew_text='is'\nnewString = mainString.replace(old_text, new_text)\nprint(newString)\n\n\n#-------------------------------------------------------------------------------\n''' spliting of the string\n syntax :\n list = string.split(split_text)\n\n default the spliting_text is \" \" (one space)\n'''\nmainString = \"ashish arora\"\nprint(mainString.split())\n\n\n#-------------------------------------------------------------------------------\n''' join list to make a string\n list = ['a','b','c','d']\n\n syntax :\n string = \"joining_sring\".join(list)\n'''\nlist1 = ['a','b','c','d']\nstring = \"-\".join(list1)\nprint(string)\n\n#-------------------------------------------------------------------------------\n'''\n changing the case senetivity of the string\n mainString.upper() : make all upper\n mainString.lower() : make all lower\n mainString.title() : make all Camel Case\n'''\nmainString = \"ashish arora\"\nnewString = mainString.upper()\nprint(newString)\n\nmainString = \"ASHISH ARORA\"\nnewString = mainString.lower()\nprint(newString)\n\nmainString = \"ASHISH ARORA\"\nnewString = mainString.title()\nprint(newString)\n\n\n#-------------------------------------------------------------------------------\n'''\n checking starting and ending of the string\n\n syntax:\n result = mainString.startswith(substring)\n result = mainString.endswith(substring)\n\n returns : True and False\n'''\nmainString = 'ashish arora'\nsubstring = 'a'\nresult = mainString.startswith(substring)\nprint(result)\nresult = mainString.endswith(substring)\nprint(result)\n\n\n#-------------------------------------------------------------------------------\n''' string testing methods:\n syntax :\n result = mainString.isalnum() : checks alphabets and characters\n result = mainString.isalpha() : checks for alphabets\n result = mainString.isdigit() : checks for digits\n result = mainString.islower() : checks for lower case\n result = mainString.isupper() : checks for upper case\n result = mainString.istitle() : checks for title or camelcase\n result = mainString.isspace() : checks for spaces\n\n result is in True or False\n'''\nmainString = \"Ashish Arora 123\"\nresult = mainString.isalnum()\nprint(result)\nresult = mainString.isalpha()\nprint(result)\nresult = mainString.isdigit()\nprint(result)\nresult = mainString.islower()\nprint(result)\nresult = mainString.isupper()\nprint(result)\nresult = mainString.istitle()\nprint(result)\nresult = mainString.isspace()\nprint(result)\n\n#-------------------------------------------------------------------------------\n''' formating of strings\n 3 ways :\n string = \"My name is {}. My age is {}\".format(name,age)\n string = \"My name is {0}. My age is {1}\".format(name,age)\n string = \"My name is {fullname}. My age is {agenow}\".format( fullname=age, agenow=age)\n'''\nname = \"ashish arora\"\nage = 33\nstring = \"My name is {}. My age is {}\".format(name,age)\nstring = \"My name is {0}. My age is {1}\".format(name,age)\nstring = \"My name is {fullname}. My age is {agenow}\".format( fullname=age, agenow=age)\n\n\n#-------------------------------------------------------------------------------\n'''formatting contains 3 datatypes\n d : decimals\n i : integers\n c : character\n s : string\n f : floating\n\n also:\n {:*>15d} : means : *************32\n {:*^15d} : means : ******32*******\n {:*<15d} : means : 32*************\n'''\nage=32\nstring = \"{:*>15d}\".format(age)\nprint(string)\nstring = \"{:*^15d}\".format(age)\nprint(string)\nstring = \"{:*<15d}\".format(age)\nprint(string)\n\n\n\n#-------------------------------------------------------------------------------\n'''\n sorting of the strings:\n\n doesnt modify the mainString : sorted()\n\n syntax :\n list = Sorted(mainString)\n'''\n\nresult = sorted(mainString)\nprint(mainString)\nprint(result)\n"
},
{
"alpha_fraction": 0.49819493293762207,
"alphanum_fraction": 0.5234656929969788,
"avg_line_length": 20.30769157409668,
"blob_id": "5b50f2998e6d351c7c4b4c818e4677bfa65e0ac9",
"content_id": "bd721b39a32c1d795afaad7d3665480e2ac49632",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 13,
"path": "/chapter17_classes_and_objects/class_objects_1.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "class Students:\n def __init__(self):\n self.name = \"Vishnu\"\n self.age = 20\n self.marks = 900\n\n def talk(self):\n print(\"Hii i am \",self.name)\n print(\"my age is \", self.age)\n print(\"My marks \",self.marks)\n\ns1 = Students()\ns1.talk()\n"
},
{
"alpha_fraction": 0.7317073345184326,
"alphanum_fraction": 0.7439024448394775,
"avg_line_length": 20.866666793823242,
"blob_id": "8aa6f2aba141a6cc6a37c8dc55438afb5f28ce86",
"content_id": "7f2c27a2225000ebb84e4f507cde892616925782",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 328,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 15,
"path": "/chapter3_input_statement/take_input_at_system_level.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "\"\"\" taking input at the system level means that when we are executing the file\nthen only we are passing the variable to the file\n\nthe file will receive the input by sys.argv\n\nalso note that you need to import sys first\"\"\"\n\n\nimport sys\n\nprint(sys.argv)\nprint(sys.argv[0])\nprint(sys.argv[1])\nprint(sys.argv[2])\nprint(sys.argv[3])\n"
},
{
"alpha_fraction": 0.4521581530570984,
"alphanum_fraction": 0.4675520658493042,
"avg_line_length": 24.484615325927734,
"blob_id": "9e0b9a2539102c2260f94b405df69f912b4d9f3f",
"content_id": "f6bb0d53d3459e7ce6f8c09b48ad71e39713e489",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3313,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 130,
"path": "/chapter12_files_in_python/files_in_python.py",
"repo_name": "ashisharora24/python_learning",
"src_encoding": "UTF-8",
"text": "'''\n file_hanlder = open(\"filename\",\"open_mode\",\"buffering\")\n\nw : 1. write data to file\n : 2. if data already present, then it will be deleted\n : 3. new data will be written\n\nr : 1. read data from file\n : 2. pointer is positioned at the start of the file\n\na : 1. append data to file\n : 2. file pointer placed at end of the file\n\nw+ : 1. to write and read file\n : 2. previous data not deleted\n\nr+ : 1. read and write data to file\n : 2. pointer at begining\n : 3. new data will be written\n\na+ : 1. append and read\n : 2. pointer will be at end\n : 3. file created if not exist\n\nx : 1. open file in exculusive creation mode\n 2. file creation fails if file exist already\n'''\n\n# ------------------------------------------------------------------------------\n'''create file object'''\n# read file\nF = open(\"testing_read_file.txt\", \"r\")\nF.close()\n\n# ------------------------------------------------------------------------------\n'''\n read compete file\n'''\n# now you can read complete file at once\nF1 = open(\"testing_read_file.txt\", \"r\")\nstr = F1.read()\nprint(str)\nF1.close()\n\n# ------------------------------------------------------------------------------\n'''\n read file line by line\n'''\n# we can read file line by line\nF2 = open(\"testing_read_file.txt\", \"r\")\nlist = F2.readlines()\nprint(list)\nF2.close()\n\n# ------------------------------------------------------------------------------\n'''\n write data to file\n'''\n# write data to file\nF3 = open(\"F3.txt\", \"w\")\nF3.write(\"How are you\\n\")\nF3.write(\"this is ashish\")\nF3.close()\n\n# ------------------------------------------------------------------------------\n'''\n read and write data to file\n'''\n# append data to existing file\nF4 = open(\"F3.txt\", \"a+\")\n# write function will only be writing files at the end of the file\nF4.write(\"ASHISH AROA\")\n\n# with seek function we can change the location of the pointer only for the\n# reading of the file. not for the writing\nF4.seek(0,0)\nprint(\"fourth try : \", F4.read())\n# Object.seek(offset, fromwhere)\n# offset means bytes from the start position\n# fromwhere have 3 values: 0,1,2\n# 0 --> start of file\n# 1 --> current position in file\n# 2 --> end of file\nF4.close()\n# ------------------------------------------------------------------------------\n\n\n''' know if file exist or not'''\n\nimport os\nfilename = \"F3.txt\"\nif os.path.isfile(filename):\n print(\"found\")\nelse:\n print(\"notfile\")\n\n# ------------------------------------------------------------------------------\n'''\n with statement\n advantage of by \"with\" that we dont need to close the file object\n'''\nwith open(\"F3.txt\",\"a+\") as F5:\n F5.write(\"\\nthis is from with statement\")\n F5.seek(0,0)\n print(\"from with statement block : \", F5.read())\n\n\n# ------------------------------------------------------------------------------\n''' working with binary\nw -> wb\nr -> rb\na -> ab\nw+ -> wb+\nr+ -> rb+\na+ -> ab+\nx -> xb\n'''\n\n\n# ------------------------------------------------------------------------------\n'''\n converting string to binary\n and\n converting binary to string\n'''\nstr = \"ashish arora\"\nbny = str.encode()\nprint(bny)\nstr = bny.decode()\nprint(str)\n"
}
] | 44 |
thfellner/ScreenBrightnessControl | https://github.com/thfellner/ScreenBrightnessControl | b8e96ac23ff7d1004629b43de575fb329ebb3945 | 4c360c4107b936122c7f8dd56c252c9d11f34f04 | e784b91a4dc78e56a7a34b6c34ddcaf307bbdfe2 | refs/heads/main | 2023-05-13T17:41:47.903408 | 2021-06-07T05:08:49 | 2021-06-07T05:08:49 | 373,508,480 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7463768124580383,
"alphanum_fraction": 0.7542819380760193,
"avg_line_length": 22.015151977539062,
"blob_id": "50499dc7dd53f8b1af7b60dc923cd8eb8f297e27",
"content_id": "6a43cac5fc5e50f70177173bb12df815be45e074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1518,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 66,
"path": "/brightnesscontrol.py",
"repo_name": "thfellner/ScreenBrightnessControl",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport threading\n\nimport screen_brightness_control as sbc\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QSlider, QWidgetAction, QAction\n\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\napp = QApplication([])\napp.setQuitOnLastWindowClosed(False)\n\n# Adding an icon\nicon = QIcon(resource_path(\"icon.ico\"))\n\n# Adding item on the menu bar\ntray = QSystemTrayIcon()\ntray.setIcon(icon)\ntray.setVisible(True)\n\n# Creating the options\nmenu = QMenu()\n\nbrightnessControl = QSlider(Qt.Vertical)\nbrightnessControl.setValue(100)\nbrightnessControl.setMaximum(20)\n\n# menu.addActions(mySlider)\n\n\ndef valuechange():\n value = brightnessControl.value()*5\n x = threading.Thread(target=sbc.set_brightness, args=(value,), daemon=True)\n x.start()\n\n\n\nwidgetSlider = QWidgetAction(brightnessControl)\nwidgetSlider.setDefaultWidget(brightnessControl)\nmenu.addAction(widgetSlider)\nbrightnessControl.sliderMoved.connect(valuechange)\n\n\n# To quit the app\nmenu2 = QMenu()\nquit = QAction(\"X\")\nquit.triggered.connect(app.quit)\nmenu2.addAction(quit)\n\nmenu.addMenu(menu2)\n\n# Adding options to the System Tray\ntray.setContextMenu(menu)\n\nsys.exit(app.exec())"
},
{
"alpha_fraction": 0.7897793054580688,
"alphanum_fraction": 0.7932636737823486,
"avg_line_length": 34.91666793823242,
"blob_id": "f55d269ec929827769cbdedfe25b596ec92860fe",
"content_id": "929a389ce1a641ca772815a636e758b2aa0d3a93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 861,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 24,
"path": "/README.md",
"repo_name": "thfellner/ScreenBrightnessControl",
"src_encoding": "UTF-8",
"text": "# ScreenBrightnessControl\nBrightness Control App based on QT and screen_brightness_control. Creates a tray icon with a slider where you can change the brightness\n\nI made this because some OS's don't want to natively support this feature unless it is an integrated display (laptop for example)\n\nBy right clicking the tray icon you can pull up the slider. By hovering over the arrow a button will show to close the Application\n\n\n\n## Requirements\n\n* Python 3.8\n* PyQT 5\n* screen_brightness_control\n* pyinstaller (used for creating an executable)\n\n## Installation\n\n\nA build for Windows can be found in the releases section, though it can be built using pyinstaller using the install script.\n\n```shell script\npyinstaller brightnesscontol.spec\n```"
},
{
"alpha_fraction": 0.4883720874786377,
"alphanum_fraction": 0.7151162624359131,
"avg_line_length": 17.105262756347656,
"blob_id": "c61ada80d1f8dcc64f52e23ef393a1ddbd10a4fe",
"content_id": "d33eb7d4a1c53623d6d81eb43b0174f259522b4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 344,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 19,
"path": "/requirements.txt",
"repo_name": "thfellner/ScreenBrightnessControl",
"src_encoding": "UTF-8",
"text": "altgraph==0.17\ncx-Logging==3.0\nfuture==0.18.2\nimportlib-metadata==4.4.0\ninstall==1.3.4\nol==0.0.1\npefile==2021.5.24\npyinstaller==4.3\npyinstaller-hooks-contrib==2021.1\npypiwin32==223\nPyQt5==5.15.4\nPyQt5-Qt5==5.15.2\nPyQt5-sip==12.9.0\nPyQt5-stubs==5.15.2.0\npywin32==300\npywin32-ctypes==0.2.0\nscreen-brightness-control==0.8.5\nWMI==1.5.1\nzipp==3.4.1\n"
}
] | 3 |
ethantkoenig/Substantiv | https://github.com/ethantkoenig/Substantiv | b474523865535b45c9357c8b7aa30c26ffa3bec1 | 232ba4e57b661d1cce0b68294be3280ca57593c4 | b0eb2a63032356bcf5f618592032e55a2a463f99 | refs/heads/master | 2016-08-11T06:56:04.255545 | 2016-01-10T16:52:52 | 2016-01-10T16:52:52 | 48,244,734 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7698091268539429,
"alphanum_fraction": 0.7727009654045105,
"avg_line_length": 43.33333206176758,
"blob_id": "dc495b0130462e2b8cca71cb24a91f39b6b71651",
"content_id": "a134695139cdfff4b9c0abbe2e431ed011fd7c0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1729,
"license_type": "no_license",
"max_line_length": 310,
"num_lines": 39,
"path": "/README.md",
"repo_name": "ethantkoenig/Substantiv",
"src_encoding": "UTF-8",
"text": "# Substantiv\n\nSubstantiv is a utility that creates and plays utterances of German nouns learned on Duolingo. Its name comes from the German word for noun.\n\n## Overview\n\nOne of the hardest facets of German for English speakers to learn is the genders of nouns; unlike the nouns of other gendered languages, such as the Romance languages, one cannot determine the gender of a German noun from its orthography. Instead, the genders of nouns, for the most part, have to be memorized.\n\nSubstantiv takes the German nouns that a Duolingo user has learned, generates phrases of the form `[definite article] [noun]`, and continually plays these phrases. \n\nSince the gender of a noun can (usually) be inferred from its definite article, these phrases can help a listener remember the genders of nouns. This tool can also serve as a resource for improving listening and pronunciation skills.\n\n## Dependencies\n\nTo install and run Substantiv, make sure that you have `pip` and `sox` (with an mp3 handler) installed on your machine.\n\n## Installation\n\nTo install Substantiv, first run \n\n $ python setup.py\n\nThen, create a `username.txt` file in the base directory which contains your Duolingo username. This can be done by running \n\n $ echo \"[Duolingo username]\" > username.txt\n\nFinally, create a `password.txt` file in the base directory which contains your Duolingo password. This can be done by running\n\n $ echo \"[Duolingo password]\" > password.txt\n\nAll of the above commands only need to be run once.\n\n## Usage\n\nTo run Substantiv, invoke the following command\n\n $ python substantiv.py [number of utterances]\n\nThe optional `[number of utterances]` argument is number of utterances that the system will play. It defaults to 1000.\n"
},
{
"alpha_fraction": 0.660788357257843,
"alphanum_fraction": 0.6690871119499207,
"avg_line_length": 27.690475463867188,
"blob_id": "d7ac74ad9988caaea5912b571feb05cb1f880eab",
"content_id": "d16ab656818d275a19f6ab0d26dbdf6cd6bca6e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4827,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 168,
"path": "/substaniv.py",
"repo_name": "ethantkoenig/Substantiv",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*- \n\nfrom duolingo import Duolingo\nfrom os.path import isfile\nimport random\nfrom subprocess import Popen\nfrom sys import argv\nfrom time import sleep\nfrom urllib import URLopener\n\n\n# File contains Duolingo username\nUSERNAME_FILE = \"username.txt\"\n\n# File containing Duolingo password\nPASSWORD_FILE = \"password.txt\"\n\n# Delay (in seconds) between utterances\nINTER_UTTERANCE_DELAY = 1.0\n\n# Default number of utterances played\nDEFAULT_NUM_UTTERANCES = 1000\n\n\n# Returns: string - contents of file. Ideally suited for files with one line.\n# leading and trailing whitespace is removed\n#\n# filename : string - filepath of file\ndef read_file(filename):\n fl = open(filename)\n contents = fl.read().strip()\n fl.close()\n return contents\n\n\n# Returns : Duolingo object\n#\n# username : string - Duolingo username\ndef get_duolingo():\n username = read_file(USERNAME_FILE)\n password = read_file(PASSWORD_FILE)\n return Duolingo(username, password)\n\n\n# Returns - dict from id to Duolingo lexeme\n#\n# lingo : Duolingo\n# language_abbr : string\ndef get_vocab(lingo, language_abbr):\n vocab = lingo.get_vocabulary(language_abbr)\n return dict((lexeme[\"lexeme_id\"], lexeme) for lexeme in\n lingo.get_vocabulary(language_abbr)[\"vocab_overview\"])\n\n\n# Returns : string - sanitized word with non-ASCII characters replaced with\n# sentinel ASCII sequences\n#\n# word : string\ndef sanitize(word):\n word = word.strip().lower()\n word = word.replace(u\"ä\", \"%C3%A4\")\n word = word.replace(u\"Ä\", \"%C3%A4\")\n word = word.replace(u\"ö\", \"%C3%B6\")\n word = word.replace(u\"Ö\", \"%C3%B6\")\n word = word.replace(u\"ü\", \"%C3%BC\")\n word = word.replace(u\"Ü\", \"%C3%BC\")\n word = word.replace(u\"ß\", \"%C3%9F\")\n return word\n\n\n# Returns : int - number of non-ASCII characters in word\n#\n# word : string\ndef count_non_ascii(word):\n return sum(1 for x in word if ord(x) >= 128)\n\n\n# Returns : bool - whether noun is singular\n# \n# vocab : dict from id to Duolingo lexeme\n# noun : Duolingo lexeme - must be a noun\ndef singular(vocab, noun):\n related_lexemes = [vocab[lexeme_id] for lexeme_id in noun[\"related_lexemes\"]]\n related_nouns = [lexeme for lexeme in related_lexemes \n if lexeme[\"pos\"] == \"Noun\"]\n noun_string = noun[\"word_string\"]\n\n # heuristically check if any relatives are the singular version of noun\n for relative in related_nouns:\n relative_string = relative[\"word_string\"]\n if len(noun_string) > len(relative_string):\n return False\n if (len(noun_string) == len(relative_string)\n and count_non_ascii(noun_string) > count_non_ascii(relative_string)):\n return False\n\n return True\n\n\n# Returns : string list list - outer list is list of utterances. each inner list\n# is a list of words\n#\n# lingo : Duolingo\n# language_abbr : string\ndef get_utterances(lingo, language_abbr):\n vocab = get_vocab(lingo, language_abbr)\n nouns = [word for word in vocab.values() if word[\"pos\"] == \"Noun\"]\n result = []\n for noun in nouns:\n gender = noun[\"gender\"]\n article = \"die\" if not singular(vocab, noun) else \\\n \"die\" if gender == \"Feminine\" else \\\n \"der\" if gender == \"Masculine\" else \\\n \"das\"\n result.append([article, noun[\"word_string\"]])\n return result\n \n\n\n# Returns: string - filepath of audio file containing word\n#\n# lingo : string\n# word : string\n# language_abbr : string\ndef get_audio(lingo, word, language_abbr):\n word = sanitize(word)\n filepath = \"{0}.mp3\".format(word)\n if not(isfile(filepath)):\n mp3url = lingo.get_audio_url(word, language_abbr)\n url_opener = URLopener()\n url_opener.retrieve(mp3url, \"temp.mp3\")\n url_opener.close()\n\n # remove trailing silence\n Popen(\"sox temp.mp3 {0} reverse trim 0.500 reverse > /dev/null\".format(filepath),\n shell = True).wait()\n Popen(\"rm temp.mp3\", shell = True).wait()\n\n return filepath\n\n\n# Plays the given utterance\n#\n# lingo : Duolingo\n# language_abbr : string\n# utterances : string list list\ndef play_utterance(lingo, language_abbr, utterance):\n audio_files = [get_audio(lingo, word, language_abbr) for word in utterance]\n print u\"Playing {0}\".format(\" \".join(utterance))\n Popen(\"play {0} 2> /dev/null\".format(\" \".join(audio_files)), shell = True).wait()\n \n\n# Plays n randomly selected utterances \n#\n# language_abbr : string\n# num_utterances : int - number of utterances to play\ndef main(language_abbr, num_utterances):\n lingo = get_duolingo()\n utterances = get_utterances(lingo, language_abbr)\n for _ in xrange(num_utterances):\n utterance = random.choice(utterances) \n play_utterance(lingo, language_abbr, utterance)\n sleep(INTER_UTTERANCE_DELAY)\n \n\nif (__name__ == \"__main__\"):\n n = int(argv[1]) if len(argv) > 1 else DEFAULT_NUM_UTTERANCES\n main(\"de\", n)\n"
},
{
"alpha_fraction": 0.523809552192688,
"alphanum_fraction": 0.523809552192688,
"avg_line_length": 13.714285850524902,
"blob_id": "c7b00f7c03b168a472e2b8eba9b64f0b15b23e6d",
"content_id": "082f1b40a62fe1244f1feea45df7a236dca27fb2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 7,
"path": "/setup.py",
"repo_name": "ethantkoenig/Substantiv",
"src_encoding": "UTF-8",
"text": "\n\nimport pip\n\ndef main():\n pip.main([\"install\", \"duolingo-api\"])\n\nif (__name__ == \"__main__\"):\n main()\n"
}
] | 3 |
nitikaa11/MyCaptain | https://github.com/nitikaa11/MyCaptain | 67ee1a763b8abc05287055b966561d7e72b8776f | 8755a1d412df67a3404903360c4d5664079b14c9 | 7b48ca38268649417d7a1ff2333673fa1195a21d | refs/heads/master | 2022-10-05T18:20:07.848867 | 2020-06-12T07:04:53 | 2020-06-12T07:04:53 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6305732727050781,
"alphanum_fraction": 0.7070063948631287,
"avg_line_length": 25.16666603088379,
"blob_id": "1740f850140d81da887cc54a141ae096998b99ce",
"content_id": "4105cc0d670230cde6fbb2076dd39f152daab18c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 157,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 6,
"path": "/positive.py",
"repo_name": "nitikaa11/MyCaptain",
"src_encoding": "UTF-8",
"text": "list1=[12,-7,5,64,-14]\nprint(\"All values of list:\")\nprint(list1)\nnew_list=list(filter(lambda x:x>0,list1))\nprint(\"positive values of list:\")\nprint(new_list)\n"
},
{
"alpha_fraction": 0.6114649772644043,
"alphanum_fraction": 0.6815286874771118,
"avg_line_length": 25.16666603088379,
"blob_id": "afb9e5bc50d80d6d6111d8547611987777c16291",
"content_id": "06fc3767b06577823ad6d30f26df15d4062f9d4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 314,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 12,
"path": "/assignment3.py",
"repo_name": "nitikaa11/MyCaptain",
"src_encoding": "UTF-8",
"text": "num1=[5,10,15,20]\nprint(num1,\"is the list\")\nnum1.append(25)\nprint(num1,\"is the new list\")\nmytuple=(2,4,6,8)\nprint(mytuple,\"is a tuple\")\nprint(len(mytuple),\"is the tuple length\")\nprint(mytuple[2],\"is the third element of tuple\")\nmydict = {1: 'My', 2: 'Captian', 3: 'App'} \nprint(mydict)\ndel mydict[3]\nprint(mydict)\n"
},
{
"alpha_fraction": 0.5729729533195496,
"alphanum_fraction": 0.6054053902626038,
"avg_line_length": 14.416666984558105,
"blob_id": "a7d47a0ffac319b0d9c989318bb67598cb6b7b7c",
"content_id": "b7403f850da92dcc46a4fa746451fcfae5993ea5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 12,
"path": "/fibo.py",
"repo_name": "nitikaa11/MyCaptain",
"src_encoding": "UTF-8",
"text": "a=0\nb=1\nc=0\nn=int(input(\"Enter a number:\"))\nprint(n,\"fibonacci numbers are given below:\")\nfor i in range (1,n):\n\tprint(a,end=' ')\n\tc=a+b\n\ta=b\n\tb=c\nprint()\nprint('{:^25}'.format('end'))\n"
}
] | 3 |
meangreen02/home_automation.py | https://github.com/meangreen02/home_automation.py | 3accfeaea9a0ce66acda9480bddeeea69e23daa2 | 7a48febfce4d44029cf98ca1d49720da58aa38df | 433cf182cc00c9d43dd0bbf2e050a7a4c6edd0cf | refs/heads/master | 2021-05-27T12:25:48.310827 | 2014-01-21T20:45:41 | 2014-01-21T20:45:41 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5332223176956177,
"alphanum_fraction": 0.5371575355529785,
"avg_line_length": 39.042423248291016,
"blob_id": "bf8bad65080edfe5ea7e04790ba4c1b90db42a94",
"content_id": "083fb53ca2531413d5106e0994ac5b992a392abd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6607,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 165,
"path": "/home_automation.py",
"repo_name": "meangreen02/home_automation.py",
"src_encoding": "UTF-8",
"text": "# This module's future home should be inside userdata/addon_data/script.cinema.experience/ha_scripts\n# to make sure it does not get over written when updating the script\n\nimport xbmc, xbmcaddon\nimport sys, urllib2, os\nfrom threading import Thread\nfrom urllib import urlencode\n\n__script__ = sys.modules[ \"__main__\" ].__script__\n__scriptID__ = sys.modules[ \"__main__\" ].__scriptID__\ntriggers = sys.modules[ \"__main__\" ].triggers\nha_settings = sys.modules[ \"__main__\" ].ha_settings\nBASE_RESOURCE_PATH = sys.modules[\"__main__\"].BASE_RESOURCE_PATH\nsys.path.append( os.path.join( BASE_RESOURCE_PATH, \"lib\" ) )\nimport utils\n\nclass Automate:\n def __init__( self ):\n pass\n \n def sab_pause(self, mode):\n \"\"\"\n This function provides a method to pause and resume SabNZBd downloading, very useful on a limited network or low powered system\n \n Usage:\n \n apikey - Your SabNZBd API key goes here\n ip - The IP of your SabNZBd Machine, if local, leave as is, if it does not work, put the actual address in\n port - Normally 5000 but change it to match you SabNZBd program\n \n Pause:\n \n self.sab_pause( \"pause\" )\n \n Resume:\n \n self.sab_pause( \"resume\" )\n \"\"\"\n apikey = \"\"\n ip = \"127.0.0.1\" # address \n port = \"5000\"\n url = \"http://%s:%s/sabnzbd/\" % ( ip, port )\n query = {}\n query[ \"mode\" ] = mode\n query[\"apikey\"] = apikey\n response = urllib2.urlopen( urllib2.Request( url + \"api?\", urlencode( query ) ) )\n response_data = response.read()\n \n def activate_ha( self, trigger = None, prev_trigger = None, mode=\"normal\" ):\n if ha_settings[ \"ha_enable\" ]:\n if ha_settings[ \"ha_multi_trigger\" ] and prev_trigger == trigger:\n pass\n elif mode != \"thread\":\n self.activate_on( trigger )\n else:\n thread = Thread( name='ha_trigger', target=self.activate_on, args=( trigger, ) )\n thread.start()\n prev_trigger = trigger\n return prev_trigger\n\n def activate_on( self, trigger = None ):\n \"\"\"\n Scripting to trigger almost anything(HA, other scripts, etc...) when videos start. \n \n Usage:\n activate_on( \"Movie\" )\n will trigger code that is set under the Movie heading.\n \n \"\"\"\n if not trigger:\n utils.log( \" - [ home_automation.py ] - No Trigger Sent, Returning\", xbmc.LOGNOTICE )\n return\n utils.log( \" - [ home_automation.py ] - activate_on( %s ) Triggered\" % trigger, xbmc.LOGNOTICE )\n if trigger in triggers:\n utils.log( \" - [ home_automation.py ] - Trigger %s\" % trigger, xbmc.LOGNOTICE )\n # Script Start\n if trigger == \"Script Start\" and ha_settings[ \"ha_script_start\" ]: \n # place code below this line\n pass\n # Trivia Intro\n elif trigger == \"Trivia Intro\" and ha_settings[ \"ha_trivia_intro\" ]: \n # place code below this line\n pass\n # Trivia\n elif trigger == \"Trivia\" and ha_settings[ \"ha_trivia_start\" ]: \n # place code below this line\n pass\n # Trivia Outro\n elif trigger == \"Trivia Outro\" and ha_settings[ \"ha_trivia_outro\" ]:\n # place code below this line\n pass\n # Movie Theatre Intro\n elif trigger == \"Movie Theater Intro\" and ha_settings[ \"ha_mte_intro\" ]:\n # place code below this line\n pass\n # Coming Attractions Intro\n elif trigger == \"Coming Attractions Intro\" and ha_settings[ \"ha_cav_intro\" ]:\n # place code below this line\n pass\n # Trailer\n elif trigger == \"Movie Trailer\" and ha_settings[ \"ha_trailer_start\" ]:\n # place code below this line\n pass\n # Coming Attractions Outro\n elif trigger == \"Coming Attractions Outro\" and ha_settings[ \"ha_cav_outro\" ]: \n # place code below this line\n pass\n # Feature Presentation Intro\n elif trigger == \"Feature Presentation Intro\" and ha_settings[ \"ha_fpv_intro\" ]: \n # place code below this line\n pass\n #3D Intro\n elif trigger == \"3D Intro\" and ha_settings[ \"ha_3d_intro\" ]:\n # place code below this line\n pass\n #3D Trailers\n elif trigger == \"3D Movie Trailer\" and ha_settings[ \"ha_3d_trailer\" ]:\n # place code below this line\n pass\n #3D Outro\n elif trigger == \"3D Outro\" and ha_settings[ \"ha_3d_outro\" ]:\n # place code below this line\n pass\n # MPAA Rating\n elif trigger == \"MPAA Rating\" and ha_settings[ \"ha_mpaa_rating\" ]: \n # place code below this line\n pass\n # Countdown\n elif trigger == \"Countdown\" and ha_settings[ \"ha_countdown_video\" ]:\n # place code below this line\n pass\n # Audio Format\n elif trigger == \"Audio Format\" and ha_settings[ \"ha_audio_format\" ]:\n # place code below this line\n pass\n # Movie\n elif trigger == \"Movie\" and ha_settings[ \"ha_movie\" ]: \n # place code below this line\n pass\n # Feature Presentation Outro\n elif trigger == \"Feature Presentation Outro\" and ha_settings[ \"ha_fpv_outro\" ]:\n # place code below this line\n pass\n # Movie Theatre Intro\n elif trigger == \"Movie Theatre Outro\" and ha_settings[ \"ha_mte_outro\" ]: \n # place code below this line\n pass\n # Intermission\n elif trigger == \"Intermission\" and ha_settings[ \"ha_intermission\" ]: \n # place code below this line\n pass\n # Script End\n elif trigger == \"Script End\" and ha_settings[ \"ha_script_end\" ]: \n # place code below this line\n pass\n # Paused\n elif trigger == \"Pause\" and ha_settings[ \"ha_paused\" ]: \n # place code below this line\n pass\n # Resumed\n elif trigger == \"Resume\" and ha_settings[ \"ha_resumed\" ]: \n # place code below this line\n pass\n else:\n utils.log( \" - [ home_automation.py ] - Opps. Something happened\", xbmc.LOGNOTICE )\n"
}
] | 1 |
alab1001101/telepathy-gabble-console-client-python | https://github.com/alab1001101/telepathy-gabble-console-client-python | 1208852ae95c1188ecf735d3f0bbeafce4c68fec | 7921eddb8836e57ecadeafb880ad2772f8b34982 | 04641aeaee24ee226a273841b587eacd9d28bc52 | refs/heads/master | 2016-09-06T04:56:28.631566 | 2012-04-14T21:33:51 | 2012-04-14T21:33:51 | 3,559,787 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6461980938911438,
"alphanum_fraction": 0.6535145044326782,
"avg_line_length": 35.4476203918457,
"blob_id": "616254d1eabfc24feaef71ee337e48926f57dd69",
"content_id": "f096f22226797cdb2fabe27b0cb4fc4db0a1b55a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3827,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 105,
"path": "/roomlist.py",
"repo_name": "alab1001101/telepathy-gabble-console-client-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport sys\nimport string\n\nfrom gi.repository import GObject\nGObject.threads_init()\n\nfrom gi.repository import TelepathyGLib\n\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\n\nimport pprint\nimport inspect\n\ndef dump(what):\n pprint.pprint(inspect.getmembers(what), indent=2)\n\ndef dumpclass(which):\n print which\n pprint.pprint(dir(which), indent=2)\n\ndef listing_rooms_cb(is_listing):\n if False == is_listing:\n channel.close_async(None, None)\n main_loop.quit()\n\ndef got_rooms_cb(therooms):\n# dbus.Struct(\n# (dbus.UInt32(29L),\n# dbus.String(u'org.freedesktop.Telepathy.Channel.Type.Text'),\n# dbus.Dictionary({\n# dbus.String(u'name'): dbus.String(u'', variant_level=1),\n# dbus.String(u'members-only'): dbus.Boolean(False, variant_level=1),\n# dbus.String(u'persistent'): dbus.Boolean(True, variant_level=1),\n# dbus.String(u'moderated'): dbus.Boolean(False, variant_level=1),\n# dbus.String(u'handle-name'): dbus.String(u'', variant_level=1),\n# dbus.String(u'members'): dbus.UInt32(2L, variant_level=1),\n# dbus.String(u'invite-only'): dbus.Boolean(False, variant_level=1),\n# dbus.String(u'hidden'): dbus.Boolean(False, variant_level=1),\n# dbus.String(u'password'): dbus.Boolean(True, variant_level=1),\n# dbus.String(u'anonymous'): dbus.Boolean(False, variant_level=1)\n# },\n# signature=dbus.Signature('sv'))\n# ),\n# signature=None)\n \n for (_, channel_type, room_info) in therooms:\n print \"%s %s %s\" % (_, room_info['name'], room_info['handle-name'])\n\ndef channel_prepared_cb(channel, result, data):\n success = channel.prepare_finish(result)\n\n roomlist = session_bus.get_object(channel.get_bus_name(), channel.get_object_path())\n iface = dbus.Interface(roomlist, dbus_interface=TelepathyGLib.IFACE_CHANNEL_TYPE_ROOM_LIST)\n \n iface.connect_to_signal('GotRooms', got_rooms_cb)\n iface.connect_to_signal('ListingRooms', listing_rooms_cb)\n iface.ListRooms()\n\ndef create_channel_cb(request, result, data):\n global channel\n channel, context = request.create_and_handle_channel_finish(result)\n channel.prepare_async(None, channel_prepared_cb, None)\n\ndef account_prepared_cb(account, result, data):\n success = account.prepare_finish(result)\n\n request_dict = {TelepathyGLib.PROP_CHANNEL_CHANNEL_TYPE: TelepathyGLib.IFACE_CHANNEL_TYPE_ROOM_LIST,\n TelepathyGLib.PROP_CHANNEL_TARGET_HANDLE_TYPE: int(TelepathyGLib.HandleType.NONE),\n TelepathyGLib.PROP_CHANNEL_TYPE_ROOM_LIST_SERVER: ''}\n \n request = TelepathyGLib.AccountChannelRequest.new(account, request_dict, 0)\n request.create_and_handle_channel_async(None, create_channel_cb, None)\n\ndef manager_prepared_cb(account_manager, result, data):\n account_manager.prepare_finish(result)\n account = account_manager.ensure_account(account_path)\n account.prepare_async(None, account_prepared_cb, None);\n \ndef manager_prepare():\n account_manager = TelepathyGLib.AccountManager.dup()\n account_manager.prepare_async(None, manager_prepared_cb, None)\n\ndef path_encode_jid(jid):\n path = jid\n path = string.replace(path, '@', '_40')\n path = string.replace(path, '.', '_2e')\n path = \"%sgabble/jabber/%s0\" % (TelepathyGLib.ACCOUNT_OBJECT_PATH_BASE, path)\n return path\n\nif __name__ == '__main__':\n DBusGMainLoop(set_as_default=True)\n main_loop = GObject.MainLoop()\n\n if len(sys.argv) < 2:\n print \"Usage: %s <jid>\" % sys.argv[0]\n sys.exit(1)\n\n account_path = path_encode_jid(sys.argv[1])\n \n session_bus = dbus.SessionBus()\n\n manager_prepare() \n main_loop.run()\n"
},
{
"alpha_fraction": 0.6291866302490234,
"alphanum_fraction": 0.6318780183792114,
"avg_line_length": 25.33070945739746,
"blob_id": "441c52a59646fe32b1bb3b29baf7a9fd47647da9",
"content_id": "bffe0adf583a98e17abf505a801a25d526a6aa37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3344,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 127,
"path": "/account.py",
"repo_name": "alab1001101/telepathy-gabble-console-client-python",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport os\nimport pprint\n\nfrom gi.repository import GObject\nGObject.threads_init()\n\nfrom gi.repository import TelepathyGLib as Tp\n\nimport cmd\nimport inspect\n\ndef dump(what):\n pprint.pprint(inspect.getmembers(what), indent=2)\n\ndef dumpclass(which):\n pprint.pprint(dir(which), indent=2)\n\nclass AccountCmd(cmd.Cmd):\n \"\"\"Simple command processor example.\"\"\"\n \n prompt = 'account> '\n\n restart = False\n\n # 'AVAILABLE', 'AWAY', 'BUSY', 'ERROR', 'EXTENDED_AWAY', 'HIDDEN', 'OFFLINE', 'UNKNOWN', 'UNSET'\n\n presenceTypes = { 'available': Tp.ConnectionPresenceType.AVAILABLE\n , 'away': Tp.ConnectionPresenceType.AWAY\n , 'busy': Tp.ConnectionPresenceType.BUSY\n , 'hidden': Tp.ConnectionPresenceType.HIDDEN\n , 'offline': Tp.ConnectionPresenceType.OFFLINE\n }\n\n def do_list(self, line):\n \"\"\"list accounts\"\"\"\n for account in getAccounts():\n printAccount(account)\n\n def do_presence(self, line):\n args = (line.split(\" \") + [\"\",\"\",\"\"])\n account = getAccountByName(args[0])\n\n if account is not None:\n self.restart = True\n account.request_presence_async(self.presenceTypes[args[1]], args[1], args[2], account_presence_cb, None)\n return True\n\n def complete_presence(self, text, line, begidx, endidx):\n count = len(line.split(\" \"))\n\n if count == 2:\n completions = getAccountNames()\n elif count == 3:\n completions = self.presenceTypes.keys()\n\n if text:\n completions = [ f for f in completions if f.startswith(text) ]\n\n return completions\n\n def do_quit(self, line):\n self.restart = False\n return True\n \n def do_EOF(self, line):\n print\n self.restart = False\n return True\n \n #def postmainLoop(self):\n # print\n\ndef printAccount(account):\n print \"%s %s %s %s\" % ((account.get_service(), account.get_normalized_name()) + account.get_current_presence()[1:])\n\ndef getAccountByName(name):\n for account in getAccounts():\n if name == account.get_normalized_name():\n return account\n return None\n\ndef getAccountNames():\n names = []\n for account in getAccounts():\n names.append(account.get_normalized_name())\n return names\n\ndef getAccounts():\n return manager.get_valid_accounts()\n\ndef account_presence_cb(account, result, data):\n account.request_presence_finish(result)\n account.connect(\"presence-changed\", current_presence_cb)\n return True\n\ndef current_presence_cb(account, statusType, status, message):\n printAccount(account)\n reloop()\n\ndef reloop():\n accountLoop.cmdloop()\n if False == accountLoop.restart:\n mainLoop.quit()\n\ndef manager_prepared_cb(manager, result, data):\n manager.prepare_finish(result)\n accountLoop.cmdloop()\n\ndef manager_prepare():\n global manager\n\n manager = Tp.AccountManager.dup()\n\n factory = manager.get_factory()\n factory.add_account_features([Tp.Account.get_feature_quark_connection()])\n\n manager.prepare_async(None, manager_prepared_cb, None)\n\nif __name__ == '__main__':\n Tp.debug_set_flags(os.getenv('EXAMPLE_DEBUG', ''))\n\n mainLoop = GObject.MainLoop()\n accountLoop = AccountCmd()\n manager_prepare()\n mainLoop.run()\n"
}
] | 2 |
ademaro/teahouse | https://github.com/ademaro/teahouse | 0ff671cb8ae74921621d4ef8d4ae5e16aa2f6031 | b5fd233694be8d8f5dc0b44526f63f7ede76524f | 2ab4c751baa39aeb9fba4bdefcb8ccaf47f0b35b | refs/heads/master | 2016-09-05T16:32:10.437112 | 2011-12-26T16:47:45 | 2011-12-26T16:47:45 | 2,713,265 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6570218801498413,
"alphanum_fraction": 0.6626676321029663,
"avg_line_length": 39.485713958740234,
"blob_id": "70ad8ffc1d0a6e920201e09b219a32fb17441eff",
"content_id": "847e8cd96c9f4ad7e4e1d00177a78eb28ff417c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1584,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 35,
"path": "/blog/models.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.db import models\n\nfrom pyadmin import verbose_name_cases, verbose_name_field_cases\n\nclass Category(models.Model):\n name = models.CharField(verbose_name = verbose_name_field_cases(u'категория', sort = u'категории', add = u'категорию'), max_length=250, unique=True)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n ordering = ['name']\n verbose_name = verbose_name_cases(u'категория', (u'категория', u'категории', u'категории'),\n gender = 0, change = u'категорию', delete = u'категорию', add = u'категорию')\n verbose_name_plural = verbose_name.plural\n\nclass Entry(models.Model):\n title = models.CharField(verbose_name='Заголовок', max_length=255)\n alias = models.SlugField(verbose_name='Алиас для url', null=True, blank=True, unique=True)\n text = models.TextField(verbose_name='Текст', blank=True, help_text=u'Используйте синтаксис Markdown.')\n category = models.ManyToManyField(Category, verbose_name=u'Категория')\n date_publication = models.DateTimeField(verbose_name='Дата публикации', blank=True, null=True)\n\n def __unicode__(self):\n return self.title\n #return '%s %s' % (self.title, self.category)\n\n class Meta:\n ordering = ['-date_publication']\n verbose_name = 'Запись'\n verbose_name_plural = 'Записи'\n \n# class Admin:\n# js = ('/static/js/jquery.js',)\n"
},
{
"alpha_fraction": 0.6780693531036377,
"alphanum_fraction": 0.6832239627838135,
"avg_line_length": 28.21917724609375,
"blob_id": "2abd077e13f70fdf516d45ab65f8090452e7398b",
"content_id": "d1b0f4f5840260da496f911b61242699c0f34871",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2134,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 73,
"path": "/pytils/test/templatetags/helpers.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# pytils - russian-specific string utils\n# Copyright (C) 2006-2008 Yury Yurevich\n#\n# http://www.pyobject.ru/projects/pytils/\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation, version 2\n# of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\"\"\"\nHelpers for templatetags' unit tests in Django webframework\n\"\"\"\n\nfrom django.conf import settings\n\nencoding = 'utf-8'\n\nsettings.configure(\n TEMPLATE_DIRS=(),\n TEMPLATE_CONTEXT_PROCESSORS=(),\n TEMPLATE_LOADERS=(),\n INSTALLED_APPS=('pytils',),\n DEFAULT_CHARSET=encoding,\n)\n\nfrom django import template\nfrom django.template import loader\nfrom pytils.templatetags import pseudo_str\n\nimport unittest\n\n\ndef pstr(ustr):\n \"\"\"\n Provide/Pseudo unicode\n \"\"\"\n return pseudo_str(ustr, encoding, None)\n\n\nclass TemplateTagTestCase(unittest.TestCase):\n \"\"\"\n TestCase for testing template tags and filters\n \"\"\"\n def check_template_tag(self, template_name, template_string, context, result_string):\n \"\"\"\n Method validates output of template tag or filter\n \n @param template_name: name of template\n @type template_name: C{str}\n \n @param template_string: contents of template\n @type template_string: C{str} or C{unicode}\n\n @param context: rendering context\n @type context: C{dict}\n\n @param result_string: reference output\n @type result_string: C{str} or C{unicode}\n \"\"\"\n \n def test_template_loader(template_name, template_dirs=None):\n return pstr(template_string), template_name\n \n loader.template_source_loaders = [test_template_loader,]\n \n output = loader.get_template(template_name).render(template.Context(context))\n self.assertEquals(output, pstr(result_string))\n\n"
},
{
"alpha_fraction": 0.5997552275657654,
"alphanum_fraction": 0.6205630302429199,
"avg_line_length": 37.904762268066406,
"blob_id": "73139d095f3ffcfef767a230ba8f6c76ae514423",
"content_id": "931d40c21d3053aed6da444f2337d5e3f140579d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 819,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 21,
"path": "/urls.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nfrom django.conf import settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', '_tea.views.home', name='home'),\n # url(r'^_tea/', include('_tea.foo.urls')),\n url(r'^$', 'blog.views.index'),\n url(r'^entry/(\\d+)/$', '_tea.blog.views.entry'),\n url(r'^tags/(\\d+)/$', '_tea.blog.views.tags'),\n #url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/var/www/z-gu.ru/_tea/static'}),\n #url(r'mu-a0e9c771-fb62bfcb-31460fd2-f2daa98a.txt', 'django.views.static.serve', {'document_root': '/var/www/z-gu.ru/_tea/static'}),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n)\n"
},
{
"alpha_fraction": 0.6772152185440063,
"alphanum_fraction": 0.6772152185440063,
"avg_line_length": 51.66666793823242,
"blob_id": "7e340dbc1a65f1d6d2f2bde39d720ae911a74b22",
"content_id": "55b0e927c79ddd6d3f813ba4b008dc9be1a8908b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 316,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 6,
"path": "/static/js/editor.js",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "jQuery(document).ready(function() {\n jQuery(\"#id_text\").wymeditor({ // assuming content is field name with TextField.\n updateSelector: \"input:submit\", //without this line and next line, you will be able to see editor but content will not be passed through POST.\n updateEvent: \"click\"\n });\n});\n"
},
{
"alpha_fraction": 0.5394360423088074,
"alphanum_fraction": 0.5457703471183777,
"avg_line_length": 36.914730072021484,
"blob_id": "f9780a94cc543a88f8a21d0f22884e94b969d779",
"content_id": "b678d96ccbe68c87377817b79e3cda0e4ca67ef5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5175,
"license_type": "no_license",
"max_line_length": 191,
"num_lines": 129,
"path": "/pyadmin.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n\nNEVER EVER WRITE CODE LIKE THIS!\n\n\"\"\"\n\nimport traceback\nimport sys\nimport re\n\nfrom django.db import models\nfrom django.contrib import messages\nfrom django.contrib.admin import ModelAdmin\n\nfrom pytils.numeral import get_plural\n\n#import django.contrib.admin.util\n\nclass verbose_name_field_cases:\n \"\"\"\n Extended verbose_name for field, capable of using different case for sorting in admin\n \"\"\"\n def __init__(self, verbose_name, **kwargs):\n self._verbose_name = verbose_name\n\tself._sort_name = kwargs.get(\"sort\", verbose_name)\n\n @property\n def sort(self):\n return self._sort_name\n \n def __str__(self):\n return self._verbose_name\n\n def __unicode__(self):\n return self._verbose_name\n \n \nclass verbose_name_cases:\n def __init__(self, verbose_name, plural_forms, **kwargs):\n self._verbose_name = verbose_name\n self._change_name = kwargs.get(\"change\", verbose_name)\n self._add_name = kwargs.get(\"add\", verbose_name)\n self._delete_name = kwargs.get(\"delete\", verbose_name)\n self._plural = plural_forms\n self._gender = kwargs.get(\"gender\", 1)\n\n if self._gender == 0:\n \"\"\"\n Monkey-patch Django's ModelAdmin function with our custom message handler\n \"\"\"\n def msg(self, request, message):\n \n try:\n \n msg_re = re.compile(u\"(.*?) \\\"(.*?)\\\" был успешно добавлен\")\n if msg_re.match(message):\n grp = msg_re.search(message).groups(1)\n \n message = message.replace(u\"один \"+grp[0],u\"одну \"+self.VerboseNameCaseReplace[grp[0]]._change_name)\n message = message.replace(u\"был\",u\"была\").replace(u\"добавлен\",u\"добавлена\") \n message = message.replace(u\"Ниже вы можете снова его отредактировать\", u\"Ниже вы можете снова её отредактировать\")\n \t\t\n msg_addmore_re = re.compile(u\"(.*?)Ниже вы можете добавить еще один (.*?)\\.\") \n if msg_addmore_re.match(message):\n grp = msg_addmore_re.search(message).groups(1)\n message = message.replace(u\"Ниже вы можете добавить еще один %s.\" % grp[1], u\"Ниже вы можете добавить еще одну %s.\" % self.VerboseNameCaseReplace[grp[1]]._change_name)\n \t\t\t\t\n msg_save_re = re.compile(u\"(.*?) \\\"(.*?)\\\" был успешно изменён\")\n if msg_save_re.match(message):\n \t\t message = message.replace(u\"был\",u\"была\").replace(u\"изменён\",u\"изменена\")\n \n message = message.replace(u\"удалено\", u\"удалена\") \n msg_delete_re = re.compile(u\"Успешно удалены (.*?) (.*)\\.\")\n if msg_delete_re.match(message):\n grp = msg_delete_re.search(message).groups(1)\n \n if grp[0] > 1:\n message = message.replace(u\"удалены %s %s\" % (grp[0], grp[1]), u\"удалено %s\" % (get_plural(int(grp[0]), \",\".join(self.VerboseNameCaseReplace[grp[1]]._plural))))\n except:\n pass\n \n messages.info(request, message)\n \n ModelAdmin.message_user = msg\n\n \n if not hasattr(ModelAdmin, \"VerboseNameCaseReplace\"):\n ModelAdmin.VerboseNameCaseReplace = {}\n \n ModelAdmin.VerboseNameCaseReplace[self._verbose_name] = self\n ModelAdmin.VerboseNameCaseReplace[self._plural[1]] = self\n #ModelAdmin.VerboseNameCaseReplace[self._verbose_name] = self\n\n @property\n def plural(self):\n return self._plural[1]\n\n @property\n def plural_forms_amount(self):\n return [self._plural[1],self._plural[2]]\n\n @property\n def plural_forms(self):\n return unicode(\",\".join(self._plural))\n\n @property\n def add(self):\n return self._add_name \n\n def __str__(self):\n return self._verbose_name\n\n def __unicode__(self):\n \"\"\"\n Inspect stack 3 levels up, this is potentialy very bad thing as any change in i18n calls will break this, so\n TODO: inspect whole stack\n \"\"\" \n if \"Select %s to change\" in traceback.extract_stack()[-3][3]: # Edit entries\n return self._change_name\n elif \"Add %s\" in traceback.extract_stack()[-3][3]: # Add new entry\n return self._add_name\n elif \"Change %s\" in traceback.extract_stack()[-3][3]: # Edit entry\n return self._change_name\n elif \"delete_view\" == traceback.extract_stack()[-3][2]: # Confirm deletion\n return self._delete_name\n else: \n return self._verbose_name \n"
},
{
"alpha_fraction": 0.6336397528648376,
"alphanum_fraction": 0.6397684812545776,
"avg_line_length": 42.835819244384766,
"blob_id": "f9a510727fb450496e9ed2d1094ea3567b284a4f",
"content_id": "78a4304a80eab24c1cc3cbe7e6381cbc9b15fb61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3050,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 67,
"path": "/pytils/test/templatetags/test_common.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# pytils - russian-specific string utils\n# Copyright (C) 2006-2008 Yury Yurevich\n#\n# http://www.pyobject.ru/projects/pytils/\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation, version 2\n# of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\"\"\"\nUnit tests for pytils' templatetags common things\n\"\"\"\n\nimport unittest\n\nfrom pytils import templatetags as tt\n\nclass TemplateTagsCommonsTestCase(unittest.TestCase):\n \n def testInitDefaults(self):\n \"\"\"\n Unit-tests for pytils.templatetags.init_defaults\n \"\"\"\n self.assertEquals(tt.init_defaults(debug=False, show_value=False), ('', u''))\n self.assertEquals(tt.init_defaults(debug=False, show_value=True), ('%(value)s', u'%(value)s'))\n self.assertEquals(tt.init_defaults(debug=True, show_value=False), ('unknown: %(error)s', u'unknown: %(error)s'))\n self.assertEquals(tt.init_defaults(debug=True, show_value=True), ('unknown: %(error)s', u'unknown: %(error)s'))\n \n def testPseudoUnicode(self):\n \"\"\"\n Unit-tests for pytils.templatetags.pseudo_unicode\n \"\"\"\n self.assertEquals(tt.pseudo_unicode(u'тест', 'utf-8'), u'тест')\n self.assertEquals(tt.pseudo_unicode('тест', 'utf-8'), u'тест')\n self.assertEquals(tt.pseudo_unicode('тест', 'ascii'), u'')\n self.assertEquals(tt.pseudo_unicode('тест', 'ascii', u'опа'), u'опа')\n self.assertRaises(UnicodeDecodeError, tt.pseudo_unicode, 'тест', 'ascii', None)\n\n def testPseudoStr(self):\n \"\"\"\n Unit-tests for pytils.templatetags.pseudo_str\n \"\"\"\n # in django unicode-branch either str() must return unicode\n # this test depends on Django unicode awareness\n if tt.unicode_aware:\n self.assertEquals(tt.pseudo_str(u'тест', 'utf-8'), u'тест')\n self.assertEquals(tt.pseudo_str(u'тест', 'utf-8'), u'тест')\n self.assertEquals(tt.pseudo_str('тест', 'utf-8'), '')\n self.assertEquals(tt.pseudo_str('тест', 'utf-8', u'опа'), u'опа')\n self.assertEquals(tt.pseudo_str(u'тест', 'ascii'), u'тест')\n self.assertEquals(tt.pseudo_str(u'тест', 'ascii', 'опа'), u'тест')\n else:\n self.assertEquals(tt.pseudo_str(u'тест', 'utf-8'), 'тест')\n self.assertEquals(tt.pseudo_str('тест', 'utf-8'), '')\n self.assertEquals(tt.pseudo_str(u'тест', 'ascii'), '')\n self.assertEquals(tt.pseudo_str(u'тест', 'ascii', 'опа'), 'опа')\n self.assertRaises(UnicodeEncodeError, tt.pseudo_str, u'тест', 'ascii', None)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.6561679840087891,
"alphanum_fraction": 0.6645669341087341,
"avg_line_length": 35.63461685180664,
"blob_id": "9ff86e96f0b8492cd9cc8999f023666f0f47c2f1",
"content_id": "32c5f6be2bcc2114d26e5b92f07864a74065d0b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1905,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 52,
"path": "/blog/views.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response\nfrom _tea.blog.models import Entry, Category\n\nfrom datetime import datetime\n\ndef index(request):\n if not request.session.get('bred'):\n request.session['bred'] = 'disabled'\n if request.method == 'GET' and request.GET.get('bred') == 'enable':\n request.session['bred'] = 'enabled'\n elif request.method == 'GET' and request.GET.get('bred') == 'disable':\n request.session['bred'] = 'disabled'\n\n bred = request.session.get('bred')\n entry_list = Entry.objects.filter(date_publication__lte=datetime.now()).exclude(id__exact=13)\n\n paginator = Paginator(entry_list, 5, 2)\n page = request.GET.get('page')\n try:\n entries = paginator.page(page)\n except PageNotAnInteger:\n entries = paginator.page(1)\n except EmptyPage:\n entries = paginator.page(paginator.num_pages)\n\n categories = Category.objects.all()\n #tags = 0 #(entry_list, Category.objects.get(id=entry_list[]))\n return render_to_response('index.html', {'entry_list': entries, 'categories': categories, 'bred': bred })\n\ndef entry(request, entry_id):\n try:\n entry = Entry.objects.get(id=int(entry_id))\n except Entry.DoesNotExist:\n raise Http404()\n else:\n categories = Category.objects.all()\n tags = entry.category.all()\n return render_to_response('entry.html', {'entry': entry, 'tags': tags, 'categories': categories})\n\ndef tags(request, tag_id):\n try:\n tag = Category.objects.get(id=int(tag_id))\n except Category.DoesNotExist:\n raise Http404()\n else:\n entry_list = tag.entry_set.all()\n categories = Category.objects.all()\n return render_to_response('index.html', \n\t{'entry_list': entry_list, 'categories': categories, 'category': tag})\n"
},
{
"alpha_fraction": 0.6274875402450562,
"alphanum_fraction": 0.6349502205848694,
"avg_line_length": 32.5,
"blob_id": "755effc44929d5ed37a07f20282dbf731dd05cbc",
"content_id": "3b5bae8c30796fad37c0e0e86ac03e1a4bfc259d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1608,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 48,
"path": "/pytils/test/templatetags/__init__.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# pytils - russian-specific string utils\n# Copyright (C) 2006-2008 Yury Yurevich\n#\n# http://www.pyobject.ru/projects/pytils/\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation, version 2\n# of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\"\"\"\nUnit tests for pytils' templatetags for Django web framework\n\"\"\"\n\n__all__ = [\"test_common\", \"test_numeral\", \"test_dt\", \"test_translit\"]\n\nimport unittest\n\ndef get_suite():\n \"\"\"Return TestSuite for all unit-test of pytils' templatetags\"\"\"\n suite = unittest.TestSuite()\n for module_name in __all__:\n imported_module = __import__(\"pytils.test.templatetags.\"+module_name,\n globals(),\n locals(),\n [\"pytils.test.templatetags\"])\n \n getter = getattr(imported_module, 'get_suite', False)\n if getter:\n suite.addTest(getter())\n \n loader = unittest.defaultTestLoader\n suite.addTest(loader.loadTestsFromModule(imported_module))\n\n return suite\n\ndef run(verbosity=1):\n \"\"\"Run all unit-test of pytils' templatetags\"\"\"\n suite = get_suite()\n unittest.TextTestRunner(verbosity=verbosity).run(suite)\n\nif __name__ == '__main__':\n run(2)\n"
},
{
"alpha_fraction": 0.4984227120876312,
"alphanum_fraction": 0.5146161913871765,
"avg_line_length": 40.34782791137695,
"blob_id": "e4878fc6331e33172e913d956024956fbedf5bdd",
"content_id": "5b9a6289d9bee4b06aef3a4c9a3c68e83881b3f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5857,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 115,
"path": "/static/js/sun.js",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "$(function(){\n // HTML объект Солнца\n var sun = document.getElementById('sun');\n // jQuery объект Солнца\n var $sun = $(sun);\n // Предзакатное Солнце\n var $sunsetSun = $('#sunsetSun');\n // Предзакатное небо\n var $sunsetSky = $('#sunsetSky');\n // Ночное небо\n var $nightSky = $('#nightSky');\n // Дневной луг\n var $grass = $('#grass');\n // Ночной луг\n var $nightGrass = $('#nightGrass');\n\n // Делаем наше Солнце перемещаемым объектом\n Drag.init(sun);\n\n // \"Рисуем\" 300 звезд в случайных позициях\n makeStars(300);\n\n // Начальная позиция Солнца (сбивается при вызове Drag.init() выше)\n $sun.css({\n 'top': 20,\n 'left': 20\n });\n\n // Данное событие вызывается при перемещении объекта\n sun.onDrag = function(x, y){\n // Отступ Солнца от верхней границы экрана\n var sunTop = $sun.css('top');\n // Высота расположения Солнца относительно высоты неба, в процентах\n var sunPosition = parseInt(sunTop) / (parseInt($sunsetSky.css('height')) / 100);\n // Высота расположения Солнца относительно высоты экрана, в процентах\n var sunAbsolutePosition = parseInt(sunTop) / ($(window).height() / 100);\n\n // Изменяем прозрачность предзакатного неба\n $sunsetSky.css('opacity', (Math.floor(sunPosition) / 100));\n // Изменяем прозрачность предзакатного Солнца\n $sunsetSun.css('opacity', (Math.floor(sunPosition) / 100));\n // Изменяем прозрачность ночного луга\n $nightGrass.css('opacity', (Math.floor(sunPosition) / 100));\n\n // Проверяем, что Солнце находится на высоте ниже 60% относительно нижней части экрана\n if (sunAbsolutePosition >= 40){\n // Высота, на которой начинают проявляться звезды и ночное небо\n var start = $(window).height() / 100 * 40;\n // Высота, на которой звезды имеют максимальную яркость, а ночное небо перекрывает собой все остальные\n var end = $(window).height() / 100 * 65;\n // Позиция в процентах от start до end\n var pos = (parseInt(sunTop) - parseInt(start)) / ((parseInt(end) - parseInt(start)) / 100);\n // Изменяем прозрачность ночного неба\n $nightSky.css('opacity', pos / 100);\n // Изменяем прозрачность звезд\n $('.star').css('opacity', pos / 100);\n }\n // Если Солнце находится выше 60% относительно нижней части экрана, то скрываем все звезды\n else {\n $('.star').css('opacity', 0);\n }\n }\n\n // Возвращает случайное число в диапазоне от start до end\n function range(start, end){\n if ((start >= 0) && (end >= 0)){\n return Math.round(Math.abs(start) + (Math.random() * (Math.abs(end) - Math.abs(start))));\n }\n else if ((start <= 0) && (end <= 0)){\n return 0 - (Math.round(Math.abs(start) + (Math.random() * (Math.abs(end) - Math.abs(start)))));\n }\n else{\n return Math.round(((start) + Math.random() * (end - start)));\n }\n }\n\n // Генерирует count звезд в случайных позициях\n function makeStars(count){\n for (var i=0; i<=count; i++){\n // Создаем элемент, который будет нашей звездой\n var star = $(document.createElement('div'));\n // Присваиваем ему класс star\n star.addClass('star');\n // Вносим в DOM и делам дочерним к body\n star.appendTo('#sun-block');\n // Объявляем стили\n star.css({\n // Высота - случайное значение от 0 до 60% от высоты экрана\n 'top': range(0, parseInt($(window).height()) / 100 * 60),\n // Отступ слева - случайное значение от 0 до текущей ширины экрана\n 'left': range(0, $(window).width())\n });\n }\n }\n\n var sunMove = function(sunId, offset, timeout) {\n $('#' + sunId).css({\n left: '+=' + offset.left,\n top: '+=' + offset.top\n }).get(0).onDrag();\n setTimeout(function() {\n sunMove(sunId, offset, timeout);\n }, timeout);\n }\n\n $(document).on('click', '#interface a', function(e){\n sunMove('sun', {left:1, top:1}, 50);\n e.preventDefault();\n });\n\t\n\t$(document).ready(function() {\n\t sunMove('sun', {left:1, top:1}, 50);\n e.preventDefault();\n\t});\n});\n"
},
{
"alpha_fraction": 0.7240398526191711,
"alphanum_fraction": 0.7425320148468018,
"avg_line_length": 30.954545974731445,
"blob_id": "f62db95cb77c2062bccb0c47af68e698cd14c59a",
"content_id": "d23e0424dbea8a1200d5bd7d18d563ece64c43e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 703,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 22,
"path": "/pytils/__init__.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# pytils - russian-specific string utils\n# Copyright (C) 2006-2008 Yury Yurevich\n#\n# http://www.pyobject.ru/projects/pytils/\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation, version 2\n# of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n\"\"\"\nSimple processing for russian strings\n\"\"\"\nVERSION = '0.2.3'\n\nfrom pytils import numeral, dt, translit, err\n"
},
{
"alpha_fraction": 0.7107142806053162,
"alphanum_fraction": 0.7571428418159485,
"avg_line_length": 27,
"blob_id": "f24b1244160897d5588aeb422109fef75f6e9f20",
"content_id": "2722733d54718af6074dfea69e58203cfe58cdd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 10,
"path": "/run",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nUSER=\"tea\"\nPROJDIR=\"/var/www/z-gu.ru/_tea\"\nPIDFILE=\"$PROJDIR/server.pid\"\nMETHOD=\"threaded\" #prefork for bigmem\n\ncd $PROJDIR\n\nexec envuidgid $USER python manage.py runfcgi method=$METHOD host=127.0.0.1 port=8881 pidfile=$PIDFILE minspare=4 maxspare=30 daemonize=false\n"
},
{
"alpha_fraction": 0.5687121748924255,
"alphanum_fraction": 0.5713050961494446,
"avg_line_length": 27.924999237060547,
"blob_id": "f6b97edb8406714bc21b27d026359c4d7162a8dc",
"content_id": "ec645063a70682dbab3a211f47534badb900663e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1185,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 40,
"path": "/static/js/script.js",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "$(document).ready(function() {\n $(\"a[rel=zoom]\").fancybox({\n 'titlePosition' \t: 'outside',\n 'titleFormat'\t: function(title, currentArray, currentIndex, currentOpts) {\n\treturn '<span id=\"fancybox-title-over\">Image ' + (currentIndex + 1) + ' / ' + currentArray.length + (title.length ? ' ' + title : '') + '</span>';}\n });\n});\n\n/*\n$('button#b-all').bind('click', function(event) {\n $.cookie(\"wbred\", 0);\n $('button#b-all').attr(\"disabled\", true);\n $('button#b-nobred').removeAttr(\"disabled\");\n //return false;\n});\n$('button#b-nobred').bind('click', function(event) {\n $.cookie(\"wbred\", 1);\n $('button#b-nobred').attr(\"disabled\", true);\n $('button#b-all').removeAttr(\"disabled\");\n //return false;\n});\n*/\n\n$('a.expand').bind('click', function(event) {\n var ttt = $(this).text();\n if (ttt == 'Развернуть'){\n $(this).text('Свернуть');\n \n } else {\n $(this).text('Развернуть');\n }\n $(this).parent().next().stop(true, true).fadeToggle();\n return false;\n});\n\n$(\"a.expand\").bind(\"dblclick\", function(){\n //$(this).parent.next().stop();\n //$(this).text('qwe');\n window.location.href = $(this).attr('href');\n});\n"
},
{
"alpha_fraction": 0.58152174949646,
"alphanum_fraction": 0.6630434989929199,
"avg_line_length": 22.74193572998047,
"blob_id": "d12fd79fc5b67d609e7b5f62f66aabd420166d8a",
"content_id": "6b94847d96d0d16f52f85795714c9461bfa497e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 736,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 31,
"path": "/server.sh",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nPROJDIR=\"/var/www/z-gu.ru/_tea\"\nPIDFILE=\"$PROJDIR/server.pid\"\nMETHOD=\"threaded\" #prefork for bigmem\n\ncd $PROJDIR\n\ncase \"$1\" in\n \"start\")\n<<<<<<< HEAD\n exec python ./manage.py runfcgi method=$METHOD host=127.0.0.1 port=8881 pidfile=$PIDFILE\n ;;\n \"stop\")\n kill -9 `cat -- $PIDFILE`\n rm -f -- $PIDFILE\n=======\n #./manage.py runfcgi method=prefork host=127.0.0.1 port=8881 pidfile=H:/teahouse/ninjapirate.pid\n ./manage.py runfcgi method=threaded host=127.0.0.1 port=8881 pidfile=H:/teahouse/ninjapirate.pid\n ;;\n \"stop\")\n kill -9 `cat H:/teahouse/server.pid`\n>>>>>>> eebcb97632489b66eaf90ccd983178408fb17fd3\n ;;\n \"restart\")\n $0 stop\n sleep 1\n $0 start\n ;;\n *) echo \"Usage: ./server.sh {start|stop|restart}\";;\nesac\n"
},
{
"alpha_fraction": 0.6241379380226135,
"alphanum_fraction": 0.6290640234947205,
"avg_line_length": 34,
"blob_id": "b92647c5ab9f1242572da2fa57718005576e336b",
"content_id": "40c14c814c2a96844afdc93e285240d8e2f8ff81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2139,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 58,
"path": "/pytils/test/templatetags/test_translit.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# pytils - russian-specific string utils\n# Copyright (C) 2006-2008 Yury Yurevich\n#\n# http://www.pyobject.ru/projects/pytils/\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation, version 2\n# of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\"\"\"\nUnit tests for pytils' translit templatetags for Django web framework\n\"\"\"\n\nfrom pytils.test.templatetags import helpers\n\nclass TranslitDefaultTestCase(helpers.TemplateTagTestCase):\n \n def testLoad(self):\n self.check_template_tag('load_tag', u'{% load pytils_translit %}', {}, u'')\n \n def testTranslifyFilter(self):\n self.check_template_tag('translify_filter', \n u'{% load pytils_translit %}{{ val|translify }}', \n {'val': 'проверка'}, \n u'proverka')\n \n def testDetranslifyFilter(self):\n self.check_template_tag('detranslify_filter', \n u'{% load pytils_translit %}{{ val|detranslify }}', \n {'val': 'proverka'}, \n u'проверка')\n\n def testSlugifyFilter(self):\n self.check_template_tag('slugify_filter', \n u'{% load pytils_translit %}{{ val|slugify }}', \n {'val': 'Проверка связи'}, \n u'proverka-svyazi')\n \n # без отладки, если ошибка -- по умолчанию пустая строка\n def testDetranslifyError(self):\n # в юникод-режиме это не ошибка\n from pytils.templatetags import unicode_aware\n if not unicode_aware:\n self.check_template_tag('detranslify_error', \n u'{% load pytils_translit %}{{ val|detranslify }}', \n {'val': 'Проверка связи'}, \n u'')\n\n\nif __name__ == '__main__':\n import unittest\n unittest.main()\n"
},
{
"alpha_fraction": 0.6864951848983765,
"alphanum_fraction": 0.6881029009819031,
"avg_line_length": 28.619047164916992,
"blob_id": "ed2556a6c6069c7218603f9644c17bfe4a498fbe",
"content_id": "ba5472b5279179fbca8d8b85cdf6e06b072ef78f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 622,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 21,
"path": "/blog/admin.py",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom blog.models import Category, Entry\n\nimport settings\nmedia = settings.MEDIA_URL\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = ('name',)\n search_fields = ('name',)\n\nclass EntryAdmin(admin.ModelAdmin):\n list_display = ('title', 'date_publication')\n fields = ('title', 'alias', 'text', 'category', 'date_publication')\n filter_horisontal = ('category')\n\n# class Media:\n# js = ('/js/jquery.js', '/js/wymeditor/jquery.wymeditor.js','/js/editor.js')\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Entry, EntryAdmin)\n"
},
{
"alpha_fraction": 0.6991150379180908,
"alphanum_fraction": 0.6991150379180908,
"avg_line_length": 15.142857551574707,
"blob_id": "2621f8e9015aa745c7c3ada5cfeb22787dd926c4",
"content_id": "0d8f606fc5b65d8263581ba8b7c564a282d996be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 113,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 7,
"path": "/README.md",
"repo_name": "ademaro/teahouse",
"src_encoding": "UTF-8",
"text": "for stop server\n\n sudo svc -d /etc/service/ninjapirate/\n\nfor rerun\n\n sudo svc -u /etc/service/ninjapirate/\n"
}
] | 16 |
bubbleqi/xlm_roberta_NMT | https://github.com/bubbleqi/xlm_roberta_NMT | 1685d516b40ed7c895d8e97b4f217d142c0ab1ad | dae6fe12483bd842fa3fba3a2be2f65e5a2ebe9f | b2a9ef05d58d653c8833296a730f62eb0739f208 | refs/heads/master | 2022-05-25T02:05:20.764609 | 2020-05-01T23:15:47 | 2020-05-01T23:15:47 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5953488349914551,
"alphanum_fraction": 0.6032558083534241,
"avg_line_length": 28.86111068725586,
"blob_id": "b95a28caa8d6c096a83c42b6f684bfa00bbbb8c0",
"content_id": "029e0cd961495642d4b6a6d2ee510d4f9de96dff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2150,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 72,
"path": "/utils/data_utils.py",
"repo_name": "bubbleqi/xlm_roberta_NMT",
"src_encoding": "UTF-8",
"text": "import os\nimport logging\n\nimport torch \nfrom torch.utils.data import TensorDataset\nfrom torch.nn.utils.rnn import pad_sequence\n\nclass InputFeatures(object):\n\n def __init__(self, input_ids):\n self.src_tensor = input_ids[0]\n self.target_tensor = input_ids[1]\n\n\nclass en_fr_processor:\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._read_file(data_dir)\n\n\n def _read_file(self, data_dir):\n '''\n read file\n '''\n data = []\n\n with open(os.path.join(data_dir, \"news-commentary-v9.fr-en.en\")) as en_file, open(os.path.join(data_dir, \"news-commentary-v9.fr-en.fr\")) as fr_file:\n for fr_sentence, en_sentence in zip(fr_file, en_file):\n if fr_sentence and en_sentence:\n data.append([en_sentence.strip(), fr_sentence.strip()])\n \n #print(data[:1000])\n return data[:1000]\n\ndef convert_examples_to_features(examples, max_seq_length, encode_method):\n features = []\n for (ex_index, example) in enumerate(examples):\n\n if not example:\n continue\n\n token_ids = []\n \n for i, word in enumerate(example): \n tokens = encode_method(word.strip())\n tokens.insert(0, 1)\n tokens.append(0)\n token_ids.append(tokens)# word token ids \n #token_ids.extend(tokens) # all sentence token ids\n\n if ex_index == 0:\n logging.info(\"token ids = \")\n logging.info(token_ids)\n logging.debug(\"token ids = \")\n logging.debug(token_ids)\n\n if token_ids:\n features.append(\n InputFeatures(input_ids=token_ids))\n\n return features\n\n\ndef create_dataset(features):\n #print(f'src tensor : {features[1].src_tensor}')\n all_src_tensor = [torch.tensor(f.src_tensor) for f in features]\n all_target_tensor = [torch.tensor(f.target_tensor) for f in features]\n\n all_src_tensor = pad_sequence(all_src_tensor, batch_first=True)\n all_target_tensor = pad_sequence(all_target_tensor, batch_first=True)\n return TensorDataset(\n all_src_tensor, all_target_tensor)\n"
},
{
"alpha_fraction": 0.6419392228126526,
"alphanum_fraction": 0.6454439163208008,
"avg_line_length": 30.127273559570312,
"blob_id": "b48db9d7098f87f45a16d5bdc788d158f466a8d8",
"content_id": "382964de1d6c082f846c9c408fd36257aca15653",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3424,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 110,
"path": "/train.py",
"repo_name": "bubbleqi/xlm_roberta_NMT",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport logging\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom pytorch_transformers import AdamW, WarmupLinearSchedule\nfrom torch import nn\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\n\nfrom model.xlmr_mt import XLMR_Encoder, XLM_Decoder\nfrom utils.train_utils import add_xlmr_args\nfrom utils.data_utils import en_fr_processor, create_dataset, convert_examples_to_features\n\nfrom tqdm.notebook import tqdm\nfrom tqdm import trange\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser = add_xlmr_args(parser)\n\n args = parser.parse_args()\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n processor = en_fr_processor()\n\n train_examples = processor.get_train_examples(args.data_dir)\n \n # preparing model configs\n hidden_size = 768 if 'base' in args.pretrained_path else 1024 # TODO: move this inside model.__init__\n\n device = 'cuda' if (torch.cuda.is_available() and not args.no_cuda) else 'cpu'\n\n # creating model\n model = XLMR_Encoder_Decoder(pretrained_path=args.pretrained_path,\n hidden_size=hidden_size,\n dropout_p=args.dropout, device=device)\n\n model.encoder.to(device)\n model.decoder.to(device)\n \n params = model.encoder.named_parameters() + model.decoder.named_parameters()\n\n optimizer_grouped_parameters = [\n {'params': [p for n, p in params]}\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters,\n lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = WarmupLinearSchedule(\n optimizer, warmup_steps=1, t_total=1)\n\n train_features = convert_examples_to_features(\n train_examples, args.max_seq_length, model.encoder.encode_word)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n #logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n train_data = create_dataset(train_features)\n\n train_sampler = RandomSampler(train_data)\n \n train_dataloader = DataLoader(\n train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n for _ in tqdm(range(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n\n tbar = tqdm(train_dataloader, desc=\"Iteration\")\n \n model.encoder.train()\n for step, batch in enumerate(tbar):\n batch = tuple(t.to(device) for t in batch)\n src_tensor, target_tensor = batch\n enc_out = model.encoder(src_tensor)\n torch.nn.utils.clip_grad_norm_(\n model.encoder.parameters(), args.max_grad_norm)\n\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.encoder.zero_grad()\n \n\n model.encoder.to(device)\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6029696464538574,
"alphanum_fraction": 0.6096406579017639,
"avg_line_length": 35.3046875,
"blob_id": "d7142f5327a09aff04cbc3a70a2359c7f3f1754c",
"content_id": "785085aa821d9a82c10ca458ff17673f3bcac881",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4647,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 128,
"path": "/model/xlmr_mt.py",
"repo_name": "bubbleqi/xlm_roberta_NMT",
"src_encoding": "UTF-8",
"text": "from fairseq.models.roberta import XLMRModel\nfrom fairseq.models import FairseqDecoder\nfrom fairseq import utils\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass XLMR_Encoder(nn.Module):\n\n def __init__(self, pretrained_path, hidden_size, dropout_p, device='cuda'):\n super().__init__()\n\n \n self.xlmr = XLMRModel.from_pretrained(pretrained_path)\n self.model = self.xlmr.model\n self.dropout = nn.Dropout(dropout_p)\n \n self.device=device\n\n def forward(self, src_tensor):\n #print(src_tensor)\n transformer_out, _ = self.model(src_tensor)#, features_only=True)\n\n return transformer_out\n\n def encode_word(self, s):\n \"\"\"\n takes a string and returns a list of token ids\n \"\"\"\n tensor_ids = self.xlmr.encode(s)\n # remove <s> and </s> ids\n return tensor_ids.cpu().numpy().tolist()[1:-1]\n\n\n\nclass XLMR_Decoder(FairseqDecoder):\n\n def __init__(\n self, dictionary, encoder_hidden_dim=768, embed_dim=768, hidden_dim=768,\n dropout=0.1\n ):\n super().__init__(dictionary)\n\n # Our decoder will embed the inputs before feeding them to the LSTM.\n self.embed_tokens = nn.Embedding(\n num_embeddings=len(dictionary),\n embedding_dim=embed_dim,\n padding_idx=dictionary.pad(),\n )\n self.dropout = nn.Dropout(p=dropout)\n\n # We'll use a single-layer, unidirectional LSTM for simplicity.\n self.lstm = nn.LSTM(\n # For the first layer we'll concatenate the Encoder's final hidden\n # state with the embedded target tokens.\n input_size=encoder_hidden_dim + embed_dim,\n hidden_size=hidden_dim,\n num_layers=1,\n bidirectional=False,\n )\n\n # Define the output projection.\n self.output_projection = nn.Linear(hidden_dim, len(dictionary))\n\n # During training Decoders are expected to take the entire target sequence\n # (shifted right by one position) and produce logits over the vocabulary.\n # The *prev_output_tokens* tensor begins with the end-of-sentence symbol,\n # ``dictionary.eos()``, followed by the target sequence.\n def forward(self, prev_output_tokens, encoder_out):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (Tensor, optional): output from the encoder, used for\n encoder-side attention\n\n Returns:\n tuple:\n - the last decoder layer's output of shape\n `(batch, tgt_len, vocab)`\n - the last decoder layer's attention weights of shape\n `(batch, tgt_len, src_len)`\n \"\"\"\n bsz, tgt_len = prev_output_tokens.size()\n\n # Extract the final hidden state from the Encoder.\n final_encoder_hidden = encoder_out\n\n # Embed the target sequence, which has been shifted right by one\n # position and now starts with the end-of-sentence symbol.\n x = self.embed_tokens(prev_output_tokens)\n\n # Apply dropout.\n x = self.dropout(x)\n\n # Concatenate the Encoder's final hidden state to *every* embedded\n # target token.\n x = torch.cat(\n [x, final_encoder_hidden.unsqueeze(1).expand(bsz, tgt_len, -1)],\n dim=2,\n )\n\n # Using PackedSequence objects in the Decoder is harder than in the\n # Encoder, since the targets are not sorted in descending length order,\n # which is a requirement of ``pack_padded_sequence()``. Instead we'll\n # feed nn.LSTM directly.\n initial_state = (\n final_encoder_hidden.unsqueeze(0), # hidden\n torch.zeros_like(final_encoder_hidden).unsqueeze(0), # cell\n )\n output, _ = self.lstm(\n x.transpose(0, 1), # convert to shape `(tgt_len, bsz, dim)`\n initial_state,\n )\n x = output.transpose(0, 1) # convert to shape `(bsz, tgt_len, hidden)`\n\n # Project the outputs to the size of the vocabulary.\n x = self.output_projection(x)\n\n # Return the logits and ``None`` for the attention weights\n return x, None\n\n\nclass XLMR_Encoder_Decoder():\n def __init__(self, enc_pretrained_path=None, hidden_size=768, dec_embed_dim = 768, dropout = 0.1, task = None, device = None):\n\n self.encoder = XLMR_Encoder(pretrained_path=enc_pretrained_path, hidden_size=hidden_size, dropout_p = dropout)\n self.decoder = XLMR_Decoder(self.task.target_dictionary)\n"
},
{
"alpha_fraction": 0.5012245178222656,
"alphanum_fraction": 0.5091156363487244,
"avg_line_length": 48.66216278076172,
"blob_id": "bfc0dc730041b634bdcce7fe010a11e69701e8e6",
"content_id": "52eb2440100c284440bd8a7f3d33b3aa9e215ee7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3675,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 74,
"path": "/utils/train_utils.py",
"repo_name": "bubbleqi/xlm_roberta_NMT",
"src_encoding": "UTF-8",
"text": "from torch.utils.data import SequentialSampler, DataLoader\nfrom tqdm import tqdm\nfrom seqeval.metrics import f1_score, classification_report\nimport torch\nimport torch.nn.functional as F\n\n\ndef add_xlmr_args(parser):\n \"\"\"\n Adds training and validation arguments to the passed parser\n \"\"\"\n\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--pretrained_path\", default=None, type=str, required=True,\n help=\"pretrained XLM-Roberta model path\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n # Other parameters\n parser.add_argument(\"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3,\n type=int,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--weight_decay\", default=0.01, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--dropout', \n type=float, default=0.3,\n help = \"training dropout probability\")\n \n return parser\n"
}
] | 4 |
pip-install-HSE/GalleryHackathon | https://github.com/pip-install-HSE/GalleryHackathon | 607aafd19606e3be41c05d62b03457a17cda4a46 | 5e062c3649c9b8bafc237779f5786084067bac2e | b0e24741a6fa75c44926a150d76db5301d500812 | refs/heads/master | 2023-07-14T17:31:51.311359 | 2021-08-30T10:38:18 | 2021-08-30T10:38:18 | 398,371,648 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.552257239818573,
"alphanum_fraction": 0.5936920046806335,
"avg_line_length": 22.764705657958984,
"blob_id": "e40e16344aa34fe2c6f27ff3de37a9ad1aab7e8a",
"content_id": "25ae68e2846ef0013ef22611983f097bb0bba5be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1617,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 68,
"path": "/models/preprocessing.py",
"repo_name": "pip-install-HSE/GalleryHackathon",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[66]:\n\n\nimport pandas as pd\nimport pyarrow.parquet as pq\nimport datetime\n\n\n# In[67]:\n\n\ndf = pq.read_table(source=\"/home/toliman/Desktop/257-2021-4-player-log.parquet\").to_pandas()\n\n\n# In[68]:\n\n\nprint(df.shape)\nnew_df = pd.DataFrame(columns=('month', 'week_day', 'half_hour', 'efficiency', 'unix', 'views'), )\n\n\n# In[69]:\n\n\nfor i in range(df.shape[0]):\n if (i % 1000 == 0):\n print(i // 1000)\n obj = df.loc[i]\n in_t = datetime.datetime.fromtimestamp(obj['DateTimeInTick'] / 1000)\n out_t = datetime.datetime.fromtimestamp(obj['DateTimeOutTick'] / 1000)\n month_1 = in_t.month\n month_2 = out_t.month\n weekday_1 = in_t.weekday()\n weekday_2 = out_t.weekday()\n halfhour_1 = 2 * in_t.hour + 1 + int(in_t.minute >= 30)\n halfhour_2 = 2 * in_t.hour + 1 + int(in_t.minute >= 30)\n efficiency = obj['MacCountAll'] / obj['Duration']\n\n new_df = new_df.append({'month': month_1, 'week_day': weekday_1, 'half_hour': halfhour_1, 'efficiency': efficiency,\n 'unix': obj['DateTimeOutTick'], 'views': int(obj['MacCountAll'])},\n ignore_index=True)\n\n if month_1 != month_2 or weekday_1 != weekday_2 or halfhour_1 != halfhour_2:\n new_df = new_df.append(\n {'month': month_2, 'week_day': weekday_2, 'half_hour': halfhour_2, 'efficiency': efficiency,\n 'unix': obj['DateTimeOutTick'], 'views': int(obj['MacCountAll'])},\n ignore_index=True)\n\n\n# In[70]:\n\n\nnew_df.shape\n\n\n# In[71]:\n\n\nnew_df\n\n\n# In[72]:\n\n\nnew_df.to_csv('/home/toliman/Desktop/preprocessing_data.csv')\n\n"
},
{
"alpha_fraction": 0.5522674918174744,
"alphanum_fraction": 0.5937740206718445,
"avg_line_length": 30.731706619262695,
"blob_id": "b2dd9940add5cb8085e465ceb19530a5c28650f1",
"content_id": "a66f1e0856e1f0ea3f8ce7fa2b89da61284024b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2602,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 82,
"path": "/models/usage.py",
"repo_name": "pip-install-HSE/GalleryHackathon",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom tensorflow.keras import models\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport time\n\ninput_data = 1\nnetwork = models.load_model('billyboard.v3.h5')\ndata = pd.read_csv('/home/toliman/Desktop/holidays_new_mean_sum_dist_month.csv')\n\nK: int = 3 # is hyper parameter how much samples generate to make predict\n\nnetwork.predict(input_data)\n\ndef unix_to_month(int_unix):\n return datetime.datetime.fromtimestamp(int_unix).month - 1\n\n\ndef unix_to_day(int_unix):\n return datetime.datetime.fromtimestamp(int_unix).day - 1\n\n\ndef unix_to_half_hour_index(int_unix):\n obj = datetime.datetime.fromtimestamp(int_unix)\n return 1 + obj.hour * 2 + obj.minute // 30 - 1\n\n\ndef get_parameters():\n return {'month': -1,\n 'week_day': -1,\n 'half_hour': -1,\n 'efficiency': .75,\n 'views': .5,\n 'distances': .99,\n 'mean_half_hour': .35,\n 'mean_weekday': .35,\n 'holidays': .1, }\n\n\ndef transform_predict_to_real_value(val):\n pass\n\n\ndef prepare_stats_dataset(dates: List):\n stats_data = np.zeros(shape=(len(dates), 48, K))\n for i, date in enumerate(dates):\n week_day = date[0].weekday()\n int_unix_start = int(date[0].timestamp())\n int_unix_finish = int(date[1].timestamp())\n h_half_start = unix_to_half_hour_index(int_unix_start)\n h_half_finish = unix_to_half_hour_index(int_unix_finish)\n\n for h_half_param in range(h_half_start, h_half_finish + 1):\n for j in range(K): # get k last samples\n params = get_parameters()\n params['month'] = (date[0] - datetime.timedelta(days=i + 1)).month / 11\n params['week_day'] = (date[0] - datetime.timedelta(days=i + 1)).weekday() / 6\n params['half_hour'] = h_half_param / 47\n sample = np.array([x for x in params.values()])\n predicted_val = network.predict(sample.reshape((1, 9)))[0]\n print(predicted_val)\n\n for j in range(K): # get k last the same week day\n pass\n\n kwargs = get_parameters()\n kwargs['month'] = date\n # k_last_samples = np.zeros(shape=)\n pass\n\n\ndef make_predict(sample, stats_data):\n pass\n\n\ndate_arr = [(datetime.datetime(2021, 3, 21, 11, 30), datetime.datetime(2021, 3, 21, 16, 30)),\n (datetime.datetime(2021, 4, 21, 11, 30), datetime.datetime(2021, 4, 21, 16, 30)),\n (datetime.datetime(2021, 5, 21, 11, 30), datetime.datetime(2021, 5, 21, 16, 30))]\n\nprepare_stats_dataset(date_arr)\n"
},
{
"alpha_fraction": 0.6756756901741028,
"alphanum_fraction": 0.7297297120094299,
"avg_line_length": 20.14285659790039,
"blob_id": "1b98a1432956fffc33466c438dcfae6744630bc4",
"content_id": "d799ebecbda6e8220b6e8de931643f6c221443c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 148,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 7,
"path": "/models/main.py",
"repo_name": "pip-install-HSE/GalleryHackathon",
"src_encoding": "UTF-8",
"text": "import pandas as pd\n\npath = '/home/toliman/Desktop/month=2021-6 (1).parquet'\n\ndata = pd.read_parquet(path)\nprint(data.columns)\nprint(data.head(20))\n"
},
{
"alpha_fraction": 0.6086776852607727,
"alphanum_fraction": 0.6429752111434937,
"avg_line_length": 17.05223846435547,
"blob_id": "1e475238931dfa1263c8db7510e687323d59d097",
"content_id": "2e12872ad5ee5059d12df8a3d5fb563e2004f962",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2420,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 134,
"path": "/models/billyboard_preprocessing_array.py",
"repo_name": "pip-install-HSE/GalleryHackathon",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[200]:\n\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.losses import MSE, MAE\nfrom tensorflow.keras.activations import elu\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport tqdm\n\n\n# In[201]:\n\n\ndef unix_to_month(int_unix):\n return datetime.datetime.fromtimestamp(int_unix).month - 1\n\n\ndef unix_to_day(int_unix):\n return datetime.datetime.fromtimestamp(int_unix).day - 1\n\n\ndef unix_to_half_hour_index(int_unix):\n obj = datetime.datetime.fromtimestamp(int_unix)\n return 1 + obj.hour * 2 + obj.minute // 30 - 1\n\n\ndef fill_targets(data: pd.DataFrame):\n targets = np.zeros(shape=(12, 31, 49))\n for i in tqdm.tqdm(range(max(data.shape[0], 100))):\n obj = data.loc[i]\n day = unix_to_day(int(obj['unix']) // 1000)\n half_hour = unix_to_half_hour_index(int(obj['unix']) // 1000)\n month = unix_to_month(int(obj['unix']) // 1000)\n targets[month][day][half_hour] += int(obj['views'])\n return targets\n\n\n# In[7]:\n\n\n\n\nimport pandas as pd\n\ndata = pd.read_csv('/home/toliman/Desktop/holidays_new_mean_sum_dist_month.csv')\n\n\n# In[8]:\n\n\ndata\n\n\n# In[ ]:\n\n\n# print(data.columns)\n# print(data.values)\n# data = data.drop(columns=['unix', ], axis=1)\n\ntargets = fill_targets(data)\n\n\n# In[203]:\n\n\n\n# for x in targets:\n# print(x.max())\n\n\n# In[204]:\n\n\ntrain_y = np.zeros(shape=(data.shape[0]))\n\n\n# In[205]:\n\n\nfor i in tqdm.tqdm(range(max(data.shape[0], 100))):\n obj = data.loc[i]\n day = unix_to_day(int(obj['unix']) // 1000)\n half_hour = unix_to_half_hour_index(int(obj['unix']) // 1000)\n month = unix_to_month(int(obj['unix']) // 1000)\n train_y[i] = targets[month][day][half_hour]\n\n\n# In[206]:\n\n\ntrain_y -= train_y.min()\ntrain_y /= train_y.max()\n\nnp.save('/home/toliman/Desktop/targets', targets)\nnp.save('/home/toliman/Desktop/train_y', train_y)\n\ndata = data.drop('unix', axis=1)\n\n\n# In[208]:\n\n\ntrain_x = data.values\n\nfor i in tqdm.tqdm(range(max(train_x.shape[0], 100))):\n train_x[i] -= train_x[i].min()\n train_x[i] /= train_x[i].max()\n\nnp.save('/home/toliman/Desktop/train_x', train_x)\n\nexit(0)\n\n\n# In[ ]:\n\n\nstats = np.zeros(shape=(49,))\nfor x in targets:\n stats += x\n\nstats /= targets.shape[0]\n\nfrom matplotlib import pyplot as plt\n\nplt.plot(list(range(0, 49)), stats, color='red')\nplt.show()\n\n"
},
{
"alpha_fraction": 0.6307692527770996,
"alphanum_fraction": 0.6639053225517273,
"avg_line_length": 15.859999656677246,
"blob_id": "17f758b4d3fa1ab2f0f0f796cea27368bb898a44",
"content_id": "b6954b7599848a3230a9218f8c5a47f3e1729208",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1690,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 100,
"path": "/models/billyboard_net.py",
"repo_name": "pip-install-HSE/GalleryHackathon",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[260]:\n\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.losses import MSE, MAE\nfrom tensorflow.keras.activations import elu\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nimport numpy as np\nimport pandas as pd\nimport datetime\n\n\n# In[261]:\n\n\ntrain_x = np.load('/home/toliman/Desktop/train_x.npy')\ntrain_y = np.load('/home/toliman/Desktop/train_y.npy')\ntargets = np.load('/home/toliman/Desktop/targets.npy')\n\n\n# In[265]:\n\n\ntrain_x\n\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nx_train, x_val, y_train, y_val = train_test_split(train_x, train_y, shuffle=False)\n\n\n# In[ ]:\n\n\nnetwork = Sequential(\n [\n layers.Dense(64, input_shape=(train_x.shape[1],), activation='relu'),\n layers.Dense(96, activation='relu'),\n layers.Dense(128, activation='relu'),\n layers.Dense(128, activation='relu'),\n layers.Dense(64, activation='relu'),\n layers.Dense(32, activation='relu'),\n layers.Dense(16, activation='relu'),\n layers.Dense(1, activation='sigmoid'),\n ]\n)\n\nnetwork.summary()\n\n\n# In[211]:\n\n\nfrom tensorflow.keras.metrics import RootMeanSquaredError\n\nnetwork.compile(\n optimizer=RMSprop(learning_rate=.00025),\n loss=MAE,\n metrics=[RootMeanSquaredError(), ],\n)\n\n\n# In[212]:\n\n\nnetwork.fit(\n x_train,\n y_train,\n batch_size=128,\n epochs=30,\n validation_data=(x_val, y_val,),\n)\n\nnetwork.save('billyboard.v4.h5')\n\n\n# In[229]:\n\n\nx = np.random.choice(train_x.shape[0], 100, replace=False)\n\n\n# In[ ]:\n\n\npred = network.predict(train_x[x])\n\nar = train_y[x]\n\nprint(list(zip(pred, ar)))\n\n\n# In[214]:\n\n\n\n\n"
}
] | 5 |
IvannLovich/todoList-API | https://github.com/IvannLovich/todoList-API | 0067228144b57737ae99cce0e2327dc38d3828ff | d76b8a774b6a86e5b84ad4901dcae0f3f873b594 | 3b638dc76edd70a4682f72eda5b96f53425d6bc0 | refs/heads/master | 2022-12-19T19:22:54.090487 | 2020-10-04T23:20:01 | 2020-10-04T23:20:01 | 292,096,034 | 0 | 0 | null | 2020-09-01T20:03:41 | 2020-10-04T23:02:40 | 2020-10-04T23:20:02 | Python | [
{
"alpha_fraction": 0.6924428939819336,
"alphanum_fraction": 0.7029876708984375,
"avg_line_length": 32.52941131591797,
"blob_id": "60eb092237fcfe32059eaac958c9083da9557425",
"content_id": "39fdcce8d480bc2cacd8e3868c6843e6c0192206",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 569,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 17,
"path": "/todo/tests.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom django.test import Client\nfrom .models import Folder\n\nclient = Client()\n\nclass MainEndpointsTestCase(TestCase):\n\n def test_folder_endpoint(self):\n response = client.get('/api/todo/folders/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['content-type'], 'application/json')\n\n def test_task_endpoint(self):\n response = client.get('/api/todo/tasks/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['content-type'], 'application/json')"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 17.66666603088379,
"blob_id": "f0d72643f05eb975cb47e94303a38aca96d59a37",
"content_id": "b897de97dbfcd1a032aa30106fec2b2977f7adbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 280,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 15,
"path": "/users/urls.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import UserViewSet\n\n\nrouter = DefaultRouter()\n\nrouter.register('', UserViewSet, basename='users')\nurlpatterns = router.urls\n\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n"
},
{
"alpha_fraction": 0.7114285826683044,
"alphanum_fraction": 0.7200000286102295,
"avg_line_length": 28.16666603088379,
"blob_id": "ab2b23788764032ee59094a2e5d520c303a6b4d7",
"content_id": "4fea5ea3dcaacbc968936509cc8da28640a9d8ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 12,
"path": "/users/tests.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from django.test import TestCase\nfrom django.test import Client\nfrom .models import User\n\nclient = Client()\n\nclass UserEndpointTestCase(TestCase):\n\n def test_user_endpoint(self):\n response = client.get('/api/users/')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['content-type'], 'application/json')\n"
},
{
"alpha_fraction": 0.7864963412284851,
"alphanum_fraction": 0.7864963412284851,
"avg_line_length": 31.235294342041016,
"blob_id": "d5278839c3c7cac3a2cf2ec3acc75d0131241cd8",
"content_id": "f41c3be1655bd2c90e52763672e3295042446e8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 548,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 17,
"path": "/todo/views.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import viewsets\nfrom .serializers import TaskSerializer, FolderSerializer\nfrom .models import Task, Folder\n\n\nclass FolderViewSet(viewsets.ModelViewSet):\n serializer_class = FolderSerializer\n queryset = Folder.objects.all()\n\n\nclass TaskViewSet(viewsets.ModelViewSet):\n serializer_class = TaskSerializer\n queryset = Task.objects.all()\n filter_backends = (DjangoFilterBackend, )\n filterset_fields = ('folder', )\n"
},
{
"alpha_fraction": 0.7078947424888611,
"alphanum_fraction": 0.7078947424888611,
"avg_line_length": 32.043479919433594,
"blob_id": "ed3a6b207e3328b7319a3c6aaa45dc0434cb0f98",
"content_id": "f720f6dbf69080b7e3f51c78884bff4704319d65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 760,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 23,
"path": "/todo/serializers.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from rest_framework import serializers\nfrom .models import Task, Folder\nfrom users.models import User\nfrom users.serializers import UserSerializer\n\n\nclass FolderSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n user_id = serializers.PrimaryKeyRelatedField(\n write_only=True, queryset=User.objects.all(), source='user')\n class Meta:\n model = Folder\n fields = '__all__'\n\n\nclass TaskSerializer(serializers.ModelSerializer):\n folder = FolderSerializer(read_only=True)\n folder_id = serializers.PrimaryKeyRelatedField(\n write_only=True, queryset=Folder.objects.all(), source='folder')\n\n class Meta:\n model = Task\n fields = ('id', 'title', 'completed', 'folder', 'folder_id')\n"
},
{
"alpha_fraction": 0.7644927501678467,
"alphanum_fraction": 0.7644927501678467,
"avg_line_length": 41.46154022216797,
"blob_id": "09ec7fff588c140157d9ac19ff1af6cac769df2a",
"content_id": "87c12d24df4043614f217f89088ccaeaac32bb21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 13,
"path": "/README.md",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "[](https://travis-ci.com/IvannLovich/todoList-API)\n\n### TODO LIST DJANGO API REST\n\n#### Description\n\nRESTful services for a simple toDo list make with Django and Django-Rest-Framework and deployed by Heroku. You can find the client for this API Rest in this other [git repo](https://github.com/IvannLovich/todoList-Client)\n\n---\n\n#### Documentation\n\nThe API documentation is through open API standard with swagger in this [url](https://dj-todolist.herokuapp.com/swagger/)\n"
},
{
"alpha_fraction": 0.7651933431625366,
"alphanum_fraction": 0.7651933431625366,
"avg_line_length": 21.625,
"blob_id": "03b6f8270306dd5e98960531eb8e75b494bde9db",
"content_id": "19d977ed2ebe71545a556490d85b57acbe93f22b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 16,
"path": "/todo/urls.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import TaskViewSet, FolderViewSet\n\n\nrouter = DefaultRouter()\n\nrouter.register('folders', FolderViewSet, basename='folders')\nrouter.register('tasks', TaskViewSet, basename='tasks')\nurlpatterns = router.urls\n\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n"
},
{
"alpha_fraction": 0.6252390146255493,
"alphanum_fraction": 0.6252390146255493,
"avg_line_length": 23.952381134033203,
"blob_id": "9a2fa156d7652ac92285fd1b7c1ef8a8d85f222d",
"content_id": "fefcc5bf3bfb735d51db2a795ecce4b7d08b670c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 523,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/users/models.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n# Create your models here.\nclass User(AbstractUser):\n\n email = models.EmailField(\n 'email address',\n unique=True,\n error_messages={\n 'unique': 'A user with that email is already exists.'\n }\n )\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['username', 'first_name', 'last_name']\n\n def __str__(self):\n return self.username\n\n def get_short_name(self):\n return self.username"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6782273650169373,
"avg_line_length": 26.3157901763916,
"blob_id": "d18e2461692ae7daf8af8e61e4c34bc370d15fe6",
"content_id": "c3ee4151d9854af8e3f0b5cd881158deeb8a6a59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 519,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 19,
"path": "/todo/models.py",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n\nclass Folder(models.Model):\n name = models.TextField(max_length=100)\n user = models.ForeignKey('users.User', related_name='folders', on_delete=models.CASCADE)\n\n def __repr__(self):\n return self.name\n\n\nclass Task(models.Model):\n title = models.CharField(max_length=100)\n completed = models.BooleanField(default=False)\n folder = models.ForeignKey(\n 'Folder', related_name='tasks', on_delete=models.CASCADE)\n\n def __repr__(self):\n return self.title\n"
},
{
"alpha_fraction": 0.5085065960884094,
"alphanum_fraction": 0.7051039934158325,
"avg_line_length": 16.66666603088379,
"blob_id": "f71474a4feadd2580f4e343833de89d4e03955ba",
"content_id": "43921d63f0dca5da864fac8d577f62de8ad4eb55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 30,
"path": "/requirements.txt",
"repo_name": "IvannLovich/todoList-API",
"src_encoding": "UTF-8",
"text": "boto3==1.10.6\nbotocore==1.13.6\ncertifi==2019.9.11\nchardet==3.0.4\ndj-database-url==0.5.0\nDjango==2.2.6\ndjango-braces==1.13.0\ndjango-crispy-forms==1.8.0\ndjango-extensions==2.2.5\ndjango-heroku==0.3.1\ndjangorestframework==3.11.0\ndocutils==0.15.2\necdsa==0.13.3\nenvs==1.3\nfuture==0.18.2\ngunicorn==20.0.4\nidna==2.8\njmespath==0.9.4\npython-dateutil==2.8.0\npsycopg2==2.8.5\npytz==2019.3\nrequests==2.22.0\ns3transfer==0.2.1\nsix==1.12.0\nsqlparse==0.3.0\nurllib3==1.25.6\nwhitenoise==5.0.1\ndjango-filter==2.1.*\ndjango-cors-headers==3.2.*\ndrf-yasg"
}
] | 10 |
superoven/tormon | https://github.com/superoven/tormon | 467ab198010d7d4fc65a193774ce10f8fc9a1884 | 8f8449a649bce82c8d8eaf83c7620ea183a05883 | b12df5ef2fc9f5e89eacdf46530435f49318c29b | refs/heads/master | 2021-01-01T17:32:13.859404 | 2015-03-03T17:09:05 | 2015-03-03T17:09:05 | 31,610,357 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5478008389472961,
"alphanum_fraction": 0.5507025122642517,
"avg_line_length": 34.78142166137695,
"blob_id": "aa7598229cda24b4105f4f7331e794d0a3ba8314",
"content_id": "d9518942a98bf4687200a766ecbb92d6c0397556",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6548,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 183,
"path": "/tormon",
"repo_name": "superoven/tormon",
"src_encoding": "UTF-8",
"text": "#!/home/taylor/Envs/tormon/bin/python\n\nimport os\nimport optparse\nimport feedparser\nimport re\nimport arrow\nimport uuid\nimport requests\nimport logging\n\n\n__usage__='\\nUsage: tormon -O /output/folder -u \"http://rss.feed.example\"'\n\nMAX_LOG_LINES = 1000\nMAX_ARCHIVED_ANIME = 300\n\n\ndef load_list(filename):\n try:\n return filter(lambda x: x != '',\n map(lambda x: x.lower(),\n open(filename, 'r').read().split('\\n')))\n except IOError:\n return []\n\n\ndef strip_to_line_count(lines, limit):\n if len(lines) > limit:\n return lines[len(lines) - limit:]\n return lines\n\n\nclass Main(object):\n '''\n tormon checks an rss feed for new torrents. When it finds a new .torrent, to\n downloads it to a specified output directory, where (presumably) a monitoring\n torrent program will download the corresponding file. \n '''\n def parse_options(self):\n usage = 'usage: %prog [options]'+__usage__\n parser = optparse.OptionParser(usage=usage)\n parser.add_option(\n '-O', '--output_dir', dest='output_dir', \n help='directory into which new torrents are saved',\n default='/media/taylor/stuff/Anime/torrent-files',\n metavar='DIR')\n parser.add_option(\n '-f', '--filetype', dest='filetype',\n action='append',\n default=['.torrent'],\n help='admissible file types', \n metavar='TYPE')\n parser.add_option(\n '-d', '--downloaded_torrents', dest='downloaded_torrents',\n default='/home/taylor/.downloaded_torrents',\n help='log of already downloaded torrents', \n metavar='FILE')\n parser.add_option(\n '-l', '--log', dest='log',\n help='log of tormons activity',\n default='/home/taylor/.log',\n metavar='FILE')\n parser.add_option(\n '-b', '--batch', dest='batch',\n help='file containing list of rss feed urls', \n metavar='FILE')\n parser.add_option(\n '-u', '--url', dest='url',\n action='append',\n default=[],\n help='url of the rss feed', \n metavar='URL')\n parser.add_option(\n '-e', '--error_log', dest='error_log',\n default='/home/taylor/.error_log',\n help='destination of the error_log',\n metavar='FILE')\n parser.add_option(\n '-W', '--watch-list', dest='watch_list',\n action='append',\n default=os.path.expanduser('~/.watch_list'),\n help='the file listing currently watching shows',\n metavar='FILE')\n parser.add_option(\n '-m','--mark_all_downloaded', dest='mark_all_downloaded',\n action='store_true', \n default=False,\n help=\"mark all torrents as already downloaded\")\n parser.add_option(\n '-M','--match_by_filename', dest='match_by_filename',\n action='store_true', \n default=False,\n help=\"recognize downloaded files by filename, not URL. Matching by URL is the default.\") \n (self.opt, args) = parser.parse_args()\n if self.opt.batch:\n for line in open(self.opt.batch, 'r'):\n line = line.strip()\n if line and not line.startswith('#'):\n self.opt.url.append(line)\n try:\n os.makedirs(self.opt.output_dir)\n except Exception:\n pass\n if len(self.opt.url) <= 0:\n logging.error(__usage__)\n logging.error(\"User must pass a valid RSS feed\")\n exit(1)\n logging.basicConfig(filename=self.opt.log, level=logging.DEBUG)\n self.shows = load_list(self.opt.watch_list)\n self.downloaded = load_list(self.opt.downloaded_torrents)\n self.errors = load_list(self.opt.error_log)\n\n def update_downloaded(self, url):\n self.downloaded.append(url)\n try:\n self.errors.remove(url)\n except ValueError:\n pass\n\n def clear_logs(self):\n self.downloaded = strip_to_line_count(self.downloaded, MAX_ARCHIVED_ANIME)\n self.errors = strip_to_line_count(self.errors, MAX_ARCHIVED_ANIME)\n\n def download_torrent(self, url):\n req = requests.get(url)\n filename = self.url2filename(url)\n target_file = os.path.join(self.opt.output_dir, filename + \".torrent\")\n logging.info('Downloading to: %s', target_file)\n with open(target_file, 'w+') as f:\n f.write(req.content)\n self.update_downloaded(url)\n\n def url2filename(self, url):\n return str(uuid.uuid4())\n\n def has_been_downloaded(self, url):\n if self.opt.match_by_filename:\n filename = self.url2filename(url)\n return (filename in [self.url2filename(link) for link in self.downloaded])\n else:\n return (url in self.downloaded)\n\n def parse_rss_feed(self):\n for url in self.opt.url:\n logging.info(arrow.utcnow().to('US/Pacific').format('YYYY-MM-DD HH:mm:ss'))\n parsed = feedparser.parse(url)\n new_count = 0\n for entry in parsed.entries:\n if not self.has_been_downloaded(entry.link)\\\n and self.check_title(entry.title):\n new_count += 1\n if self.opt.mark_all_downloaded:\n logging.info('Marking %s as downloaded', entry.link)\n self.update_downloaded(entry.link)\n else:\n logging.info('Downloading Episode of %s', entry.title)\n self.download_torrent(entry.link)\n logging.info('Torrent total: %s', new_count)\n\n def check_title(self, title):\n m = re.search('^.*\\[.+\\](.+) \\- [0-9]+.*$', title)\n ans = m and m.group(1).lstrip().rstrip()\n return ans and ans.lower() in self.shows\n\n def save_list_of_already_downloaded_torrents(self):\n with open(self.opt.downloaded_torrents, 'w+') as f:\n f.write('\\n'.join(self.downloaded))\n with open(self.opt.error_log, 'w+') as f:\n f.write('\\n'.join(self.errors))\n\n def __init__(self):\n self.parse_options()\n try:\n self.parse_rss_feed()\n except KeyboardInterrupt:\n pass\n finally:\n self.clear_logs()\n self.save_list_of_already_downloaded_torrents()\n\nif __name__ == '__main__':\n Main()\n"
}
] | 1 |
IROCX/SudokuSolverPython | https://github.com/IROCX/SudokuSolverPython | e7da6f4731fd8c299a3fbaf60ccea34a44c87e20 | 430cff1a6a388f19350b6ff4c2a1ac4b3604cb18 | ed7ad3888be93c133b744726ce1f70d99500d2af | refs/heads/master | 2022-11-24T07:59:30.645916 | 2020-07-26T05:48:09 | 2020-07-26T05:48:09 | 282,584,012 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7848410606384277,
"alphanum_fraction": 0.7958435416221619,
"avg_line_length": 57.42856979370117,
"blob_id": "048ee24ec386bdbb7606985689c30821e3456683",
"content_id": "9f48bf2102bed5d36d83d9dad1e0f47768737727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 14,
"path": "/README.md",
"repo_name": "IROCX/SudokuSolverPython",
"src_encoding": "UTF-8",
"text": "# SudokuSolverPython\nA simple python script to solve sudoku for you.\n\nThis illustrates a simple 9x9 Sudoku Solving algorithm python script.\n\nIt uses a possible() function to check the row, column and 3x3 square corresponding to the current block for a number n.\n\nFunction possible is then incorporated in another function solver() which goes through the grid and searches for 0s that represent empty blocks.\nIt then tries all the numbers 1 through 9 for a particular empty block and selects a possible value x for it. It then solves the grid recursively \nfor that selection of x for that block.\nIf it reaches dead end it makes the block 0 again and checks for next block containing 0 and solves the entire grid again for \na selection of x for that block.\n\nAs a result, at the end we wil get a solved Sudoku certainly.\n"
},
{
"alpha_fraction": 0.44410568475723267,
"alphanum_fraction": 0.4684959352016449,
"avg_line_length": 20.39130401611328,
"blob_id": "a1558f6c787fa018e60c0617caffd9640726e5a7",
"content_id": "9b7a8d484739e09fc00a0068c3a8c784165501ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 984,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 46,
"path": "/SudokuSolver.py",
"repo_name": "IROCX/SudokuSolverPython",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\ndef possible(x, y, n):\n global example\n for i in range(0,9):\n if example[x][i] == n:\n return False\n for i in range(0,9):\n if example[i][y] == n:\n return False\n x0 = (x//3)*3\n y0 = (y//3)*3\n\n for i in range(0,3):\n for j in range(0,3):\n if example[x0+i][y0+j]==n:\n return False\n return True\n\ndef solver(grid):\n for i in range(9):\n for j in range(9):\n if grid[i][j] == 0:\n for k in range(1,10):\n if possible(i,j,k):\n grid[i][j] = k\n solver(grid)\n grid[i][j] = 0\n return\n print(np.matrix(grid))\n\n\nexample = []\nprint(\"Input Sudoku : \")\n\nfor i in range(9):\n arr = list(map(int, input().split()))\n example.append(arr)\n\nprint()\nprint(\"Problem Sudoku : \")\nprint(np.matrix(example))\nprint()\n\nprint(\"Solution Sudoku : \")\nsolver(example)\n"
}
] | 2 |
Teszko/dlfninja | https://github.com/Teszko/dlfninja | 016e6cf0896834f869a71ee3f9130e1a6c881a6f | b4b5f8bbbe89dcae7086208cf05acfa047b87dc9 | e89e25dd9487f626db37a410b87702fdde426528 | refs/heads/master | 2021-01-23T21:42:53.416637 | 2018-06-21T13:41:47 | 2018-06-21T13:41:47 | 102,900,625 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6220043301582336,
"alphanum_fraction": 0.6263616681098938,
"avg_line_length": 15.981481552124023,
"blob_id": "332c3e1db990187fb5a9fc9f5afc3ed2a608ab30",
"content_id": "7d5d61515415f25ba51abebfaf711b74ae5dda13",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 918,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 54,
"path": "/dlfninja/audio.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "\nimport gi\ngi.require_version('Gst', '1.0')\nfrom gi.repository import Gst\n\n\nplayer = None\nplaying = False\n\n\ndef on_tag(bus, msg):\n taglist = msg.parse_tag()\n print('on_tag:')\n for key in taglist.keys():\n print('\\t%s = %s' % (key, taglist[key]))\n\n\ndef init_player():\n global player\n Gst.init([])\n\n player = Gst.ElementFactory.make(\"playbin\", \"player\")\n\n bus = player.get_bus()\n bus.enable_sync_message_emission()\n bus.add_signal_watch()\n bus.connect('message::tag', on_tag)\n\n\ndef set_uri(uri):\n global player\n player.set_property('uri', uri)\n\n\ndef set_playing(bool_playing):\n global playing\n if bool_playing:\n playing = True\n playing = False\n\n\ndef is_playing():\n return playing\n\n\ndef play():\n global player\n set_playing(1)\n player.set_state(Gst.State.PLAYING)\n\n\ndef null():\n global player\n set_playing(0)\n player.set_state(Gst.State.NULL)\n"
},
{
"alpha_fraction": 0.6790205240249634,
"alphanum_fraction": 0.6836532354354858,
"avg_line_length": 22.625,
"blob_id": "774c01d4635ba69d70449313d3b363fce6a94a79",
"content_id": "2b1cc1006554ff1abb2f8ce25fc715c7c03ebd0d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1512,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 64,
"path": "/dlfninja/helpers.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "# coding=utf-8\n\nimport pickle\n\nimport requests\n\nXPATH_URL_OVERVIEW = '//ul/li/a[text()=\"Nachhören\"]/@href'\n\nXPATH_DATE_OVERVIEW = '//span[@class=\"date\"]/text()'\n\nXPATH_NAME_OVERVIEW = '//h3/text()'\n\n\ndef write_page_content_to_file(file, page_content):\n with open(file, 'wb') as f:\n pickle.dump(page_content, f)\n\n\ndef get_page_from_file(file):\n page = lambda: None\n with open(file, 'rb') as f:\n page.content = pickle.load(f)\n return page\n\n\ndef xpath_query(html_tree, xpath_str):\n \"\"\"Apply xpath query to html tree, return list of elements\"\"\"\n return html_tree.xpath(xpath_str)\n\n\ndef xpath_query_single_element(html_tree, xpath_str):\n query = xpath_query(html_tree, xpath_str)\n query_result = None\n if len(query):\n query_result = query[0]\n return query_result\n\n\ndef query_url_overview(subtree):\n program_url = xpath_query_single_element(subtree, XPATH_URL_OVERVIEW)\n return program_url\n\n\ndef query_date_overview(subtree):\n program_date = xpath_query_single_element(subtree, XPATH_DATE_OVERVIEW)\n return program_date\n\n\ndef query_name_overview(subtree):\n program_name = xpath_query_single_element(subtree, XPATH_NAME_OVERVIEW)\n return program_name\n\n\ndef request_page_content(url):\n req = requests.get(url)\n return req\n\n\ndef query_name_episode(html_tree):\n return xpath_query_single_element(html_tree, '//div[2]/h3/a/span/text()')\n\n\ndef query_url_episode(html_tree):\n return xpath_query_single_element(html_tree, '//div[2]/h3/a/@href')"
},
{
"alpha_fraction": 0.6737188696861267,
"alphanum_fraction": 0.6760695576667786,
"avg_line_length": 33.86885070800781,
"blob_id": "bc8572e6bad4a8bcdb203fbd5bd68a0f1202f058",
"content_id": "685e4068f4f3a85c6a4bbf77219aa40e70042dd0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2127,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 61,
"path": "/dlfninja/core.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "import os.path\n\nfrom lxml import html, etree\n\nfrom dlfninja.episode import Episode\nfrom dlfninja.helpers import xpath_query, query_url_overview, query_date_overview, \\\n query_name_overview, get_page_from_file, request_page_content, write_page_content_to_file, \\\n query_name_episode, query_url_episode\nfrom dlfninja.program import Program\n\nXPATH_SUBTREE_PROGRAM = '//*[@id=\"content\"]/div/section[1]/div[1]/article'\n\nDLF_URL = 'http://www.deutschlandfunk.de/'\n\n\nprograms = []\n\n\ndef update_episode_list(program, html_tree):\n \"\"\"Scraps episodes from DLF page 'Nachhoeren' for a specific program\"\"\"\n program.clear_episodes()\n episode_trees = xpath_query(html_tree, '//*[@id=\"content\"]/div/section[1]/div[1]/ul/li')\n for i, episode_tree in enumerate(episode_trees):\n new_episode = Episode(id=i)\n subtree = etree.ElementTree(episode_tree)\n new_episode.set_name(query_name_episode(subtree))\n new_episode.set_url(query_url_episode(subtree))\n program.add_episode(new_episode)\n\n\ndef print_programs():\n for program in programs:\n print(program)\n\n\ndef get_page_tree(url):\n \"\"\"Returns the html tree for a given url and caches the page\"\"\"\n file_name = url.split('/')[-1]\n if os.path.isfile('data/'+file_name):\n page = get_page_from_file('data/'+file_name)\n else:\n page = request_page_content(url)\n write_page_content_to_file('data/'+file_name, page.content)\n html_tree = html.fromstring(page.content)\n return html_tree\n\n\ndef update_programs_list(overview_tree):\n \"\"\"Scraps programs from DLF page 'Alle Sendungen'\"\"\"\n del programs[:]\n program_trees = xpath_query(overview_tree, XPATH_SUBTREE_PROGRAM)\n for i, program_tree in enumerate(program_trees):\n new_program = Program(id=i)\n subtree = etree.ElementTree(program_tree)\n new_program.set_name(query_name_overview(subtree))\n new_program.set_date(query_date_overview(subtree))\n url = query_url_overview(subtree)\n new_program.set_url(url)\n if url is None:\n new_program.set_disabled()\n programs.append(new_program)\n"
},
{
"alpha_fraction": 0.5458515286445618,
"alphanum_fraction": 0.5458515286445618,
"avg_line_length": 21.341463088989258,
"blob_id": "f2d15dd9ba04028d80069b5d15b376b1e814a514",
"content_id": "3e0fb8791a4601fcfb82a922da2fb83778f1209d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 916,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 41,
"path": "/dlfninja/program.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "class Program:\n name = 'unknown'\n url = None\n date = None\n details = None\n id = None\n episodes = []\n disabled = False\n\n def __init__(self, name=None, id=None):\n if name is not None:\n self.name = name\n self.id = id\n\n def __str__(self):\n return \"<id=\\\"%d\\\" name=\\\"%s\\\" date=\\\"%s\\\" url=\\\"%s\\\">\" % (self.id, self.name, self.date, self.url)\n\n def set_name(self, name):\n self.name = name\n\n def set_url(self, url):\n self.url = url\n\n def set_date(self, date):\n self.date = date\n\n def set_details(self, details):\n self.details = details\n\n def add_episode(self, episode):\n self.episodes.append(episode)\n\n def clear_episodes(self):\n del self.episodes[:]\n\n def print_episodes(self):\n for episode in self.episodes:\n print(episode)\n\n def set_disabled(self):\n self.disabled = True\n"
},
{
"alpha_fraction": 0.5960490703582764,
"alphanum_fraction": 0.5994550585746765,
"avg_line_length": 24.75438690185547,
"blob_id": "9434d2a557144907a8ffc10a41cfc15999971c80",
"content_id": "0398258ee4cbd4979c6b5cdff597ca57060fe4de",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1468,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 57,
"path": "/dlfninja.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "import curses\n\nimport dlfninja.core as dlf\nimport dlfninja.curses as dlfcurses\nimport dlfninja.audio as audio\n\n\ndef main(stdscr):\n\n stdscr.keypad(True)\n curses.init_pair(1, curses.COLOR_YELLOW, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)\n\n stdscr.clear()\n\n scr_height = curses.LINES\n scr_width = curses.COLS\n\n dlfcurses.init_overview_menu(dlf.programs, scr_width, scr_height)\n\n banner = dlfcurses.Banner()\n\n stdscr.refresh()\n dlfcurses.overview_menu.draw()\n banner.draw()\n\n while True:\n c = stdscr.getch()\n\n if c == ord('q'):\n break # Exit Program\n elif c == curses.KEY_UP or c == ord('k'):\n dlfcurses.active_menu.scroll_up()\n elif c == curses.KEY_DOWN or c == ord('j'):\n dlfcurses.active_menu.scroll_down()\n elif c == curses.KEY_RIGHT:\n dlfcurses.active_menu.expand_element()\n elif c == curses.KEY_LEFT:\n dlfcurses.active_menu = dlfcurses.overview_menu\n elif c == ord('s'):\n if audio.is_playing():\n audio.null()\n else:\n audio.null()\n\n stdscr.refresh()\n dlfcurses.active_menu.draw()\n banner.draw()\n\n\nif __name__ == '__main__':\n overview_tree = dlf.get_page_tree('http://www.deutschlandfunk.de/sendungen-a-z.348.de.html')\n dlf.update_programs_list(overview_tree)\n\n audio.init_player()\n\n curses.wrapper(main)\n"
},
{
"alpha_fraction": 0.5055837631225586,
"alphanum_fraction": 0.515059232711792,
"avg_line_length": 30.604278564453125,
"blob_id": "2e9ae3f060aeb32df4974de0d847da6d1e4120ac",
"content_id": "131c808b5bb7e3ad1d86941ced9d5bd8516044cb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5911,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 187,
"path": "/dlfninja/curses.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "import curses\nfrom dlfninja.core import get_page_tree, update_episode_list\nimport dlfninja.audio as audio\n\nBANNER = \"\"\"\n ______ _____ ________ ____ _____ _____ ____ _____ _____ _ \n|_ _ `.|_ _| |_ __ | |_ \\|_ _||_ _||_ \\|_ _| |_ _| / \\ \n | | `. \\ | | | |_ \\_| | \\ | | | | | \\ | | | | / _ \\ \n | | | | | | _ | _| | |\\ \\| | | | | |\\ \\| | _ | | / ___ \\ \n _| |_.' /_| |__/ | _| |_ _| |_\\ |_ _| |_ _| |_\\ |_ | |__' | _/ / \\ \\_ \n|______.'|________||_____| |_____|\\____||_____||_____|\\____|`.____.'|____| |____|\n v0.1.0\"\"\"\n\noverview_menu = None\nactive_menu = None\nepisodes_menu = None\n\n\nclass Banner:\n win = None\n\n def __init__(self):\n self.win = curses.newwin(10, 86, ((curses.LINES // 4) - 5), (curses.COLS - 86)//2 - 1)\n\n def draw(self):\n self.win.addstr(0, 0, BANNER, curses.color_pair(1))\n self.win.refresh()\n\n\nclass Entry:\n text = None\n text_right = None\n url = None\n program = None\n\n def set_text(self, text):\n self.text = text\n\n def set_text_right(self, text):\n if text is not None:\n self.text_right = text.strip().rstrip('.')\n\n def set_url(self, url):\n self.url = url\n\n def set_program(self, program):\n self.program = program\n\n\nclass Menu:\n win = None\n entries = None\n selected = 0\n title = None\n subtext = None\n width = 0\n height = 0\n pos_x = 0\n pos_y = 0\n max_lines = 0\n type = None\n\n def __init__(self, type):\n self.entries = []\n self.type = type\n\n def set_title(self, title):\n self.title = title\n\n def set_subtext(self, subtext):\n self.subtext = subtext\n\n def scroll_down(self):\n num_elements = len(self.entries)\n if num_elements:\n self.selected = (self.selected + 1) % num_elements\n\n def scroll_up(self):\n num_elements = len(self.entries)\n if not num_elements:\n return None\n self.selected = self.selected - 1\n if self.selected < 0:\n self.selected = num_elements - 1\n\n def init(self, x, y, w, h):\n self.height = h\n self.width = w\n self.pos_x = x\n self.pos_y = y\n self.win = curses.newwin(h, w, y, x)\n self.win.border(0)\n self.max_lines = self.height - 2\n\n def draw(self):\n if self.win is None:\n return None\n\n self.win.clear()\n self.win.border(0)\n\n if self.title is not None:\n self.win.addstr(0, 2, self.title, curses.color_pair(1))\n\n if self.subtext is not None:\n self.win.addstr(self.height - 1, 2, self.subtext, curses.color_pair(1))\n\n if self.subtext is not None:\n pass\n\n for i in range(0, self.max_lines):\n j = self.max_lines * (self.selected // self.max_lines)\n if j+i >= len(self.entries):\n break\n\n entry = self.entries[j+i]\n if j+i == self.selected:\n if entry.text is not None:\n self.win.addstr(i+1, 1, \"> \" + entry.text, curses.color_pair(1))\n if entry.text_right is not None:\n self.win.addstr(i+1, self.width - len(entry.text_right) - 2, entry.text_right, curses.color_pair(1))\n else:\n color = curses.color_pair(0)\n if entry.program and entry.program.disabled:\n color = curses.color_pair(2)\n if entry.text is not None:\n self.win.addstr(i+1, 3, entry.text, color)\n if entry.text_right is not None:\n self.win.addstr(i+1, self.width - len(entry.text_right) - 2, entry.text_right, color)\n\n self.win.refresh()\n\n def add_entry(self, entry):\n self.entries.append(entry)\n\n def expand_element(self):\n selected_entry = self.entries[self.selected]\n if self.type == 'overview':\n if selected_entry.url is not None:\n init_episodes_menu(selected_entry, curses.COLS, curses.LINES)\n if self.type == 'episodes':\n if selected_entry.url is not None:\n audio.null()\n audio.set_uri(selected_entry.url)\n audio.play()\n\n\ndef init_overview_menu(programs, scr_width, scr_height):\n global overview_menu\n global active_menu\n overview_menu = Menu('overview')\n active_menu = overview_menu\n overview_menu.init(0, scr_height // 2, scr_width, scr_height - scr_height // 2)\n overview_menu.set_title(\" Alle Sendungen({}) \".format(len(programs)))\n overview_menu.set_subtext(\" quit(q) play/pause(space) \")\n\n for i, prog in enumerate(programs):\n entry = Entry()\n entry.set_text(str(i+1)+\"\\t\"+prog.name)\n entry.set_text_right(prog.date)\n entry.set_url(prog.url)\n entry.set_program(prog)\n overview_menu.add_entry(entry)\n\n\ndef init_episodes_menu(entry, scr_width, scr_height):\n global episodes_menu\n global active_menu\n active_menu.win.clear()\n episodes_menu = Menu('episodes')\n active_menu = episodes_menu\n\n episodes_tree = get_page_tree('http://www.deutschlandfunk.de/'+entry.url)\n update_episode_list(entry.program, episodes_tree)\n\n episodes_menu.init(0, scr_height // 2, scr_width, scr_height - scr_height // 2)\n episodes_menu.set_title(\" {}({}) \".format(entry.text, len(entry.program.episodes)))\n episodes_menu.set_subtext(\" zurück(left) play/pause(space) \")\n episodes_menu.draw()\n\n for i, episode in enumerate(entry.program.episodes):\n ep = Entry()\n ep.set_text(str(i+1)+\"\\t\"+episode.name)\n # ep.set_text_right(episode.date)\n ep.set_url(episode.url)\n # ep.set_program(episode)\n episodes_menu.add_entry(ep)\n"
},
{
"alpha_fraction": 0.5169946551322937,
"alphanum_fraction": 0.5169946551322937,
"avg_line_length": 20.5,
"blob_id": "82114f9c5864504007290031c1a47b4ed0805027",
"content_id": "560fe938bc334871c11c127423c6fd03e9938f3c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 559,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 26,
"path": "/dlfninja/episode.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "class Episode:\n name = None\n date = None\n url = None\n id = None\n length = None\n author = None\n available_until = None\n program = None\n\n def __init__(self, name=None, id=None):\n if name is not None:\n self.name = name\n self.id = id\n\n def __str__(self):\n return \"<name=\\\"%s\\\" date=\\\"%s\\\" url=\\\"%s\\\">\" % (self.name, self.date, self.url)\n\n def set_name(self, name):\n self.name = name\n\n def set_url(self, url):\n self.url = url\n\n def set_date(self, date):\n self.date = date\n"
},
{
"alpha_fraction": 0.6333333253860474,
"alphanum_fraction": 0.6333333253860474,
"avg_line_length": 14,
"blob_id": "d90da8efe5cd345eec5222c45f09f5cfe8fbf728",
"content_id": "53cd9681fb314609e1edc7b182fdafc0afba5b75",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 30,
"license_type": "permissive",
"max_line_length": 20,
"num_lines": 2,
"path": "/dlfninja/input.py",
"repo_name": "Teszko/dlfninja",
"src_encoding": "UTF-8",
"text": "def handle_input(c):\n pass\n"
}
] | 8 |
PedroPF/hackerrank | https://github.com/PedroPF/hackerrank | 0c118e488e76b35fe8090d0958ba3460deb0ab1d | 045fc95abe336acea1511d1d8dbeadd5e13ac11d | 799764a4e0b102b869b11c7ba7de2de8a5675251 | refs/heads/master | 2020-05-30T03:08:35.176818 | 2020-03-17T21:31:48 | 2020-03-17T21:31:48 | 189,510,057 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5614525079727173,
"alphanum_fraction": 0.5698323845863342,
"avg_line_length": 21.375,
"blob_id": "77cc9314a7e014a240cb02a55a1dc1d26eee6069",
"content_id": "e8abf3d6b6091dcea95dae3615086fcfe38c4aef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 716,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 32,
"path": "/min_swaps_2.py",
"repo_name": "PedroPF/hackerrank",
"src_encoding": "UTF-8",
"text": "#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the minimumSwaps function below.\ndef minimumSwaps(arr):\n visited = set()\n count = 0\n for pos, val in enumerate(arr):\n if(pos in visited): # already visited this position, so ignore\n continue\n visited.add(pos)\n if(pos+1 == val): # no swap needed\n continue\n i = arr[pos]-1\n while(i != pos): # runs the whole cycle counting the number of swaps\n visited.add(i)\n count += 1\n i = arr[i]-1\n return count\n\nif __name__ == '__main__':\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n print(minimumSwaps(arr))\n"
}
] | 1 |
gakonst/raiden-contracts | https://github.com/gakonst/raiden-contracts | 0df75a3e89379579a45d5735ffe5e7b50f72c2e2 | ef6f108cdf16c75b963b2c6317e698b7c8d44042 | 8af0a106c1ab28ed6264e653f3c74bd85884770a | refs/heads/master | 2020-03-22T01:33:45.076701 | 2018-06-29T16:09:42 | 2018-06-29T16:09:42 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6145365834236145,
"alphanum_fraction": 0.6304320096969604,
"avg_line_length": 30.582996368408203,
"blob_id": "32d61934e3122fcc2e69d643c019a7392e257304",
"content_id": "5cadd9462f54b75b6a0925dd55ea83ffb43a50e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7801,
"license_type": "permissive",
"max_line_length": 94,
"num_lines": 247,
"path": "/raiden_contracts/tests/test_channel_settle.py",
"repo_name": "gakonst/raiden-contracts",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom copy import deepcopy\nfrom random import randint\nfrom raiden_contracts.constants import (\n EVENT_CHANNEL_SETTLED,\n TEST_SETTLE_TIMEOUT_MIN,\n)\nfrom raiden_contracts.utils.events import check_channel_settled\nfrom raiden_contracts.tests.fixtures.channel_test_values import channel_settle_test_values\nfrom raiden_contracts.tests.fixtures.channel import call_settle\nfrom raiden_contracts.tests.fixtures.config import fake_hex, fake_bytes\nfrom raiden_contracts.tests.utils import (\n MAX_UINT256,\n get_settlement_amounts,\n get_onchain_settlement_amounts,\n)\n\n\ndef test_max_safe_uint256(token_network, token_network_test):\n max_safe_uint256 = token_network_test.functions.get_max_safe_uint256().call()\n\n assert token_network.functions.MAX_SAFE_UINT256().call() == max_safe_uint256\n assert max_safe_uint256 == MAX_UINT256\n\n\ndef test_settle_no_bp_success(\n web3,\n token_network,\n create_channel_and_deposit,\n get_accounts,\n create_balance_proof,\n):\n (A, B) = get_accounts(2)\n deposit_A = 10\n deposit_B = 6\n settle_timeout = TEST_SETTLE_TIMEOUT_MIN\n locksroot = fake_bytes(32)\n additional_hash = fake_bytes(32)\n create_channel_and_deposit(A, B, deposit_A, deposit_B)\n\n # Close channel with no balance proof\n token_network.functions.closeChannel(\n B,\n locksroot,\n 0,\n additional_hash,\n fake_bytes(64),\n ).transact({'from': A})\n\n # Do not call updateNonClosingBalanceProof\n\n # Settlement window must be over before settling the channel\n web3.testing.mine(settle_timeout)\n\n # Settling the channel should work with no balance proofs\n token_network.functions.settleChannel(\n A,\n 0,\n 0,\n locksroot,\n B,\n 0,\n 0,\n locksroot,\n ).transact({'from': A})\n\n\[email protected]('channel_test_values', channel_settle_test_values)\ndef test_settle_channel_state(\n web3,\n get_accounts,\n custom_token,\n token_network,\n create_channel_and_deposit,\n withdraw_channel,\n close_and_update_channel,\n settle_state_tests,\n channel_test_values,\n):\n number_of_channels = 5\n accounts = get_accounts(2 * number_of_channels)\n (vals_A0, vals_B0) = channel_test_values\n\n # We mimic old balance proofs here, with a high locked amount and lower transferred amount\n # We expect to have the same settlement values as the original values\n\n def equivalent_transfers(balance_proof):\n new_balance_proof = deepcopy(balance_proof)\n new_balance_proof.locked = randint(\n balance_proof.locked,\n balance_proof.transferred + balance_proof.locked,\n )\n new_balance_proof.transferred = (\n balance_proof.transferred +\n balance_proof.locked -\n new_balance_proof.locked\n )\n return new_balance_proof\n\n vals_A_reversed = deepcopy(vals_A0)\n vals_A_reversed.locked = vals_A0.transferred\n vals_A_reversed.transferred = vals_A0.locked\n\n vals_B_reversed = deepcopy(vals_B0)\n vals_B_reversed.locked = vals_B0.transferred\n vals_B_reversed.transferred = vals_B0.locked\n\n new_values = [\n (vals_A0, vals_B0),\n (vals_A_reversed, vals_B_reversed),\n ] + [\n sorted(\n [\n equivalent_transfers(vals_A0),\n equivalent_transfers(vals_B0),\n ],\n key=lambda x: x.transferred + x.locked,\n reverse=False,\n ) for no in range(0, number_of_channels - 1)\n ]\n\n # Calculate how much A and B should receive\n settlement = get_settlement_amounts(vals_A0, vals_B0)\n # Calculate how much A and B receive according to onchain computation\n settlement2 = get_onchain_settlement_amounts(vals_A0, vals_B0)\n\n for no in range(0, number_of_channels + 1):\n A = accounts[no]\n B = accounts[no + 1]\n (vals_A, vals_B) = new_values[no]\n vals_A.locksroot = fake_bytes(32, '02')\n vals_B.locksroot = fake_bytes(32, '03')\n\n create_channel_and_deposit(A, B, vals_A.deposit, vals_B.deposit)\n\n withdraw_channel(A, vals_A.withdrawn, B)\n withdraw_channel(B, vals_B.withdrawn, A)\n\n close_and_update_channel(\n A,\n vals_A,\n B,\n vals_B,\n )\n\n web3.testing.mine(TEST_SETTLE_TIMEOUT_MIN)\n\n pre_balance_A = custom_token.functions.balanceOf(A).call()\n pre_balance_B = custom_token.functions.balanceOf(B).call()\n pre_balance_contract = custom_token.functions.balanceOf(token_network.address).call()\n\n call_settle(token_network, A, vals_A, B, vals_B)\n\n # We do the balance & state tests here for each channel and also compare with\n # the expected settlement amounts\n settle_state_tests(\n A,\n vals_A,\n B,\n vals_B,\n pre_balance_A,\n pre_balance_B,\n pre_balance_contract,\n )\n\n # We compute again the settlement amounts here to compare with the other channel\n # settlement test values, which should be equal\n\n # Calculate how much A and B should receive\n settlement_equivalent = get_settlement_amounts(vals_A, vals_B)\n assert (\n settlement.participant1_balance +\n settlement.participant2_locked == settlement_equivalent.participant1_balance +\n settlement_equivalent.participant2_locked\n )\n assert (\n settlement.participant2_balance +\n settlement.participant1_locked == settlement_equivalent.participant2_balance +\n settlement_equivalent.participant1_locked\n )\n\n # Calculate how much A and B receive according to onchain computation\n settlement2_equivalent = get_onchain_settlement_amounts(vals_A, vals_B)\n assert (\n settlement2.participant1_balance +\n settlement2.participant2_locked == settlement2_equivalent.participant1_balance +\n settlement2_equivalent.participant2_locked\n )\n assert (\n settlement2.participant2_balance +\n settlement2.participant1_locked == settlement2_equivalent.participant2_balance +\n settlement2_equivalent.participant1_locked\n )\n\n\ndef test_settle_channel_event(\n web3,\n get_accounts,\n token_network,\n create_channel,\n channel_deposit,\n create_balance_proof,\n create_balance_proof_update_signature,\n event_handler,\n):\n ev_handler = event_handler(token_network)\n (A, B) = get_accounts(2)\n deposit_A = 10\n settle_timeout = TEST_SETTLE_TIMEOUT_MIN\n locksroot = fake_hex(32, '00')\n\n channel_identifier = create_channel(A, B)[0]\n channel_deposit(A, deposit_A, B)\n\n balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 1, locksroot)\n balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, locksroot)\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n\n token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})\n token_network.functions.updateNonClosingBalanceProof(\n A, B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': B})\n\n web3.testing.mine(settle_timeout)\n txn_hash = token_network.functions.settleChannel(\n B,\n 5,\n 0,\n locksroot,\n A,\n 10,\n 0,\n locksroot,\n ).transact({'from': A})\n\n ev_handler.add(txn_hash, EVENT_CHANNEL_SETTLED, check_channel_settled(\n channel_identifier,\n 5,\n 5,\n ))\n ev_handler.check()\n"
},
{
"alpha_fraction": 0.5901140570640564,
"alphanum_fraction": 0.6031263470649719,
"avg_line_length": 29.038070678710938,
"blob_id": "4126ef612de2f45e36cede37f51c91d4fb3166bb",
"content_id": "6332bbf6957613fa107ed9baedcb42b0a15bef79",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11835,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 394,
"path": "/raiden_contracts/tests/test_channel_update_transfer.py",
"repo_name": "gakonst/raiden-contracts",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom eth_tester.exceptions import TransactionFailed\nfrom raiden_contracts.constants import (\n EVENT_CHANNEL_BALANCE_PROOF_UPDATED,\n CHANNEL_STATE_OPENED,\n CHANNEL_STATE_NONEXISTENT,\n)\nfrom raiden_contracts.utils.events import check_transfer_updated\nfrom .fixtures.config import fake_bytes, empty_address\n\n\ndef test_update_call(\n get_accounts,\n token_network,\n create_channel,\n channel_deposit,\n create_balance_proof,\n create_balance_proof_update_signature,\n):\n (A, B, C) = get_accounts(3)\n channel_identifier = create_channel(A, B)[0]\n channel_deposit(A, 15, B)\n token_network.functions.closeChannel(\n B,\n fake_bytes(32),\n 0,\n fake_bytes(32),\n fake_bytes(64),\n ).transact({'from': A})\n\n balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n (balance_hash, nonce, additional_hash, closing_signature) = balance_proof_A\n\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n empty_address,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': C})\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n empty_address,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': C})\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n fake_bytes(64),\n ).transact({'from': C})\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n fake_bytes(32),\n nonce,\n additional_hash,\n closing_signature,\n balance_proof_update_signature_B,\n ).transact({'from': C})\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n balance_hash,\n 0,\n additional_hash,\n closing_signature,\n balance_proof_update_signature_B,\n ).transact({'from': C})\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n balance_hash,\n nonce,\n additional_hash,\n fake_bytes(64),\n balance_proof_update_signature_B,\n ).transact({'from': C})\n\n\ndef test_update_nonexistent_fail(\n get_accounts,\n token_network,\n create_balance_proof,\n create_balance_proof_update_signature,\n):\n (A, B, C) = get_accounts(3)\n\n (_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()\n assert settle_block_number == 0\n assert state == CHANNEL_STATE_NONEXISTENT\n\n channel_identifier = token_network.functions.getChannelIdentifier(A, B).call()\n\n balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': C})\n\n\ndef test_update_notclosed_fail(\n get_accounts,\n token_network,\n create_channel,\n channel_deposit,\n create_balance_proof,\n create_balance_proof_update_signature,\n):\n (A, B, C) = get_accounts(3)\n channel_identifier = create_channel(A, B)[0]\n channel_deposit(A, 25, B)\n\n balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n\n (_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()\n assert settle_block_number > 0\n assert state == CHANNEL_STATE_OPENED\n\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': C})\n\n\ndef test_update_wrong_nonce_fail(\n token_network,\n create_channel,\n channel_deposit,\n get_accounts,\n create_balance_proof,\n create_balance_proof_update_signature,\n updateBalanceProof_state_tests,\n):\n (A, B, Delegate) = get_accounts(3)\n settle_timeout = 6\n deposit_A = 20\n channel_identifier = create_channel(A, B, settle_timeout)[0]\n channel_deposit(A, deposit_A, B)\n balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))\n balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n txn_hash1 = token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})\n\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': Delegate})\n\n balance_proof_A_same_nonce = balance_proof_A\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A_same_nonce,\n balance_proof_update_signature_B,\n ).transact({'from': Delegate})\n\n balance_proof_A_lower_nonce = create_balance_proof(\n channel_identifier,\n A,\n 10,\n 0,\n 4,\n fake_bytes(32, '02'),\n )\n\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A_lower_nonce,\n )\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A_lower_nonce,\n balance_proof_update_signature_B,\n ).transact({'from': A})\n updateBalanceProof_state_tests(\n A, balance_proof_A,\n B, balance_proof_B,\n settle_timeout,\n txn_hash1,\n )\n\n\ndef test_update_wrong_signatures(\n token_network,\n create_channel,\n channel_deposit,\n get_accounts,\n create_balance_proof,\n create_balance_proof_update_signature,\n):\n (A, B, C) = get_accounts(3)\n channel_identifier = create_channel(A, B)[0]\n channel_deposit(A, 25, B)\n\n balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))\n balance_proof_A_fake = create_balance_proof(\n channel_identifier,\n A,\n 10,\n 0,\n 5,\n fake_bytes(32, '02'),\n signer=C,\n )\n\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n balance_proof_update_signature_B_fake = create_balance_proof_update_signature(\n C,\n channel_identifier,\n *balance_proof_A,\n )\n\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A_fake,\n balance_proof_update_signature_B,\n ).transact({'from': C})\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B_fake,\n ).transact({'from': C})\n\n\ndef test_update_channel_state(\n token_network,\n create_channel,\n channel_deposit,\n get_accounts,\n create_balance_proof,\n create_balance_proof_update_signature,\n updateBalanceProof_state_tests,\n):\n (A, B, Delegate) = get_accounts(3)\n settle_timeout = 6\n deposit_A = 20\n channel_identifier = create_channel(A, B, settle_timeout)[0]\n channel_deposit(A, deposit_A, B)\n balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))\n balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n\n txn_hash1 = token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})\n\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': Delegate})\n\n updateBalanceProof_state_tests(\n A, balance_proof_A,\n B, balance_proof_B,\n settle_timeout,\n txn_hash1,\n )\n\n\ndef test_update_channel_fail_no_offchain_transfers(\n get_accounts,\n token_network,\n create_channel,\n create_balance_proof,\n create_balance_proof_update_signature,\n):\n (A, B) = get_accounts(2)\n\n channel_identifier = create_channel(A, B)[0]\n balance_proof_A = create_balance_proof(channel_identifier, A, 0, 0, 0)\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n\n token_network.functions.closeChannel(\n B,\n fake_bytes(32),\n 0,\n fake_bytes(32),\n fake_bytes(64),\n ).transact({'from': A})\n\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n fake_bytes(32),\n 0,\n fake_bytes(32),\n fake_bytes(64),\n fake_bytes(64),\n ).transact({'from': B})\n\n with pytest.raises(TransactionFailed):\n token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': B})\n\n\ndef test_update_channel_event(\n get_accounts,\n token_network,\n create_channel,\n channel_deposit,\n create_balance_proof,\n create_balance_proof_update_signature,\n event_handler,\n):\n ev_handler = event_handler(token_network)\n (A, B) = get_accounts(2)\n deposit_A = 10\n deposit_B = 10\n\n channel_identifier = create_channel(A, B)[0]\n channel_deposit(A, deposit_A, B)\n channel_deposit(B, deposit_B, A)\n balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3)\n balance_proof_A = create_balance_proof(channel_identifier, A, 2, 0, 1)\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n\n token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})\n txn_hash = token_network.functions.updateNonClosingBalanceProof(\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': B})\n\n ev_handler.add(\n txn_hash,\n EVENT_CHANNEL_BALANCE_PROOF_UPDATED,\n check_transfer_updated(channel_identifier, A),\n )\n ev_handler.check()\n"
},
{
"alpha_fraction": 0.7431749105453491,
"alphanum_fraction": 0.7431749105453491,
"avg_line_length": 27.257143020629883,
"blob_id": "54394dd8cbcdccb5451558a3001cfbd551943b5d",
"content_id": "1d58eda70f63e4fba7196979a881b9f5f0c88e06",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 989,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 35,
"path": "/raiden_contracts/cm_test/test_contract_manager.py",
"repo_name": "gakonst/raiden-contracts",
"src_encoding": "UTF-8",
"text": "import pytest\n\nfrom raiden_contracts.contract_manager import (\n ContractManager,\n CONTRACTS_SOURCE_DIRS,\n)\nfrom raiden_contracts.constants import (\n CONTRACT_TOKEN_NETWORK,\n EVENT_CHANNEL_CLOSED,\n)\n\nPRECOMPILED_CONTRACTS_PATH = 'raiden_contracts/data/contracts.json'\n\n\ndef contract_manager_meta(contracts_path):\n manager = ContractManager(contracts_path)\n\n abi = manager.get_contract_abi(CONTRACT_TOKEN_NETWORK)\n assert isinstance(abi, list)\n with pytest.raises(KeyError):\n manager.get_contract_abi('SomeName')\n\n abi = manager.get_event_abi(CONTRACT_TOKEN_NETWORK, EVENT_CHANNEL_CLOSED)\n assert isinstance(abi, dict)\n with pytest.raises(ValueError):\n manager.get_event_abi(CONTRACT_TOKEN_NETWORK, 'NonExistant')\n\n\ndef test_contract_manager_compile():\n contract_manager_meta(CONTRACTS_SOURCE_DIRS)\n\n\ndef test_contract_manager_json():\n # try to load contracts from a precompiled file\n contract_manager_meta(PRECOMPILED_CONTRACTS_PATH)\n"
},
{
"alpha_fraction": 0.5918022394180298,
"alphanum_fraction": 0.5932812094688416,
"avg_line_length": 35.1297721862793,
"blob_id": "1f0a2aeb55978d669e1f157a32faa946c7e35a1b",
"content_id": "3bed5367b8f022562bb102a9b4b95ce97ac3b7f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4733,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 131,
"path": "/raiden_contracts/contract_manager.py",
"repo_name": "gakonst/raiden-contracts",
"src_encoding": "UTF-8",
"text": "import os\nimport json\nimport logging\nfrom typing import Union, List, Dict\n\nfrom solc import compile_files\nfrom web3.utils.contracts import find_matching_event_abi\n\nlog = logging.getLogger(__name__)\nCONTRACTS_DIR = os.path.join(os.path.dirname(__file__), 'data/contracts.json')\nCONTRACTS_SOURCE_DIRS = {\n 'raiden': os.path.join(os.path.dirname(__file__), 'contracts/'),\n 'test': os.path.join(os.path.dirname(__file__), 'contracts/test'),\n}\nCONTRACTS_SOURCE_DIRS = {\n k: os.path.normpath(v) for k, v in CONTRACTS_SOURCE_DIRS.items()\n}\n\n\ndef fix_contract_key_names(input: Dict) -> Dict:\n result = {}\n\n for k, v in input.items():\n name = k.split(':')[1]\n result[name] = v\n\n return result\n\n\nclass ContractManager:\n def __init__(self, path: Union[str, List[str]]) -> None:\n \"\"\"Params:\n path: either path to a precompiled contract JSON file, or a list of\n directories which contain solidity files to compile\n \"\"\"\n self.contracts_source_dirs = None\n self.abi = dict()\n if isinstance(path, dict):\n self.contracts_source_dirs = path\n for dir_path in path.values():\n self.abi.update(\n ContractManager.precompile_contracts(dir_path, self.get_mappings()),\n )\n elif os.path.isdir(path):\n ContractManager.__init__(self, {'smart_contracts': path})\n else:\n with open(path, 'r') as json_file:\n self.abi = json.load(json_file)\n\n def compile_contract(self, contract_name: str, libs=None, *args):\n \"\"\"Compile contract and return JSON containing abi and bytecode\"\"\"\n contract_json = compile_files(\n [self.get_contract_path(contract_name)[0]],\n output_values=('abi', 'bin', 'ast'),\n import_remappings=self.get_mappings(),\n optimize=False,\n )\n contract_json = {\n os.path.basename(key).split('.', 1)[0]: value\n for key, value in contract_json.items()\n }\n return contract_json.get(contract_name, None)\n\n def get_contract_path(self, contract_name: str):\n return sum(\n (self.list_contract_path(contract_name, x)\n for x in self.contracts_source_dirs.values()),\n [],\n )\n\n @staticmethod\n def list_contract_path(contract_name: str, directory: str):\n \"\"\"Get contract source file for a specified contract\"\"\"\n return [\n os.path.join(directory, x)\n for x in os.listdir(directory)\n if os.path.basename(x).split('.', 1)[0] == contract_name\n ]\n\n def get_mappings(self) -> List[str]:\n \"\"\"Return dict of mappings to use as solc argument.\"\"\"\n return ['%s=%s' % (k, v) for k, v in self.contracts_source_dirs.items()]\n\n @staticmethod\n def precompile_contracts(contracts_dir: str, map_dirs: List) -> Dict:\n \"\"\"\n Compile solidity contracts into ABI. This requires solc somewhere in the $PATH\n and also ethereum.tools python library.\n Parameters:\n contracts_dir: directory where the contracts are stored.\n All files with .sol suffix will be compiled.\n The method won't recurse into subdirectories.\n Return:\n map (contract_name => ABI)\n \"\"\"\n files = []\n for contract in os.listdir(contracts_dir):\n contract_path = os.path.join(contracts_dir, contract)\n if not os.path.isfile(contract_path) or not contract_path.endswith('.sol'):\n continue\n files.append(contract_path)\n\n try:\n res = compile_files(\n files,\n output_values=('abi', 'bin', 'ast'),\n import_remappings=map_dirs,\n optimize=False,\n )\n return fix_contract_key_names(res)\n except FileNotFoundError:\n raise Exception('Could not compile the contract. Check that solc is available.')\n\n def get_contract(self, contract_name: str) -> Dict:\n \"\"\"Return bin+abi of the contract\"\"\"\n return self.abi[contract_name]\n\n def get_contract_abi(self, contract_name: str) -> Dict:\n \"\"\" Returns the ABI for a given contract. \"\"\"\n return self.abi[contract_name]['abi']\n\n def get_event_abi(self, contract_name: str, event_name: str) -> Dict:\n \"\"\" Returns the ABI for a given event. \"\"\"\n contract_abi = self.get_contract_abi(contract_name)\n return find_matching_event_abi(contract_abi, event_name)\n\n\nif os.path.isfile(CONTRACTS_DIR):\n CONTRACT_MANAGER = ContractManager(CONTRACTS_DIR)\nelse:\n CONTRACT_MANAGER = ContractManager(CONTRACTS_SOURCE_DIRS)\n"
}
] | 4 |
rdarekar/selenium-python-framework | https://github.com/rdarekar/selenium-python-framework | f635241ca6990439183e32bc308e1889e3445922 | 4899e01242c53554684bb69beb60ac959ae0603e | 042bd6fb8130b72008445a1eb08c860560222180 | refs/heads/master | 2022-10-28T16:26:29.732050 | 2020-06-16T05:45:51 | 2020-06-16T05:45:51 | 272,613,511 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6800775527954102,
"alphanum_fraction": 0.6810470223426819,
"avg_line_length": 34.568965911865234,
"blob_id": "97b04a849cc1604ab39e13b876dcf4dd1e799afe",
"content_id": "8b0b8cf53e686a6265adba75323ee52dddcbdb08",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2063,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 58,
"path": "/tests/conftest.py",
"repo_name": "rdarekar/selenium-python-framework",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom selenium import webdriver\nfrom base.webdriverfactory import WebDriverFactory\nfrom pages.home.login_page import LoginPage\n\[email protected]_fixture()\ndef setUp():\n print(\"Running method level setUp\")\n yield\n print(\"Running method level tearDown\")\n\[email protected]_fixture(scope=\"class\")\ndef oneTimeSetUp(request, browser, osType):\n #print(\"Running conftest demo one time setUp\")\n print(\"Running one time setUp\")\n wdf = WebDriverFactory(browser)\n driver = wdf.getWebDriverInstance()\n lp = LoginPage(driver)\n lp.login(\"[email protected]\", \"abcabc\")\n\n\n # if browser == \"firefox\":\n # baseURL = \"https://letskodeit.teachable.com/\"\n # driver = webdriver.Firefox()\n # driver.maximize_window()\n # driver.implicitly_wait(3)\n # driver.get(baseURL)\n # print(\"Running tests on FF\")\n # else:\n # baseURL = \"https://letskodeit.teachable.com/\"\n # driver = webdriver.Chrome()\n # driver.maximize_window()\n # driver.implicitly_wait(3)\n # driver.get(baseURL)\n # print(\"Running tests on Chrome\")\n\n # We need to add \"value\" attribute, the one we created above to the test class using \"request\" keyword\n # If the class attribute from the request we are getting is not None then make the \"value\" guy as a class attribute\n # so that it can be available to all the instance, the complete class we are going to use\n if request.cls is not None:\n request.cls.driver = driver\n # The yield will return the value where a \"fixture\" is used. And \"fixture\" is used in \"TestClassDemo\"\n yield driver\n #driver.quit()\n #print(\"Running conftest demo one time tearDown\")\n print(\"Running one time tearDown\")\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\")\n parser.addoption(\"--osType\", help=\"Type of Operating System\")\n\[email protected](scope=\"session\")\ndef browser(request):\n return request.config.getoption(\"--browser\")\n\[email protected](scope=\"session\")\ndef osType(request):\n return request.config.getoption(\"--osType\")\n"
},
{
"alpha_fraction": 0.7130987048149109,
"alphanum_fraction": 0.7135874629020691,
"avg_line_length": 42.553192138671875,
"blob_id": "677e5bc639b337533cc7fb45e4d7d0518477a340",
"content_id": "3352c3c3981bb7b10dc398a650979c598cf47771",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2046,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 47,
"path": "/tests/courses/register_courses_csv_data.py",
"repo_name": "rdarekar/selenium-python-framework",
"src_encoding": "UTF-8",
"text": "from pages.home.login_page import LoginPage\nfrom pages.courses.register_courses_page import RegisterCoursesPage\nfrom utilities.teststatus import TestStatus\nimport unittest\nimport pytest\nfrom ddt import ddt, data, unpack\nfrom utilities.read_data import getCSVData\nfrom pages.home.navigation_page import NavigationPage\n\[email protected](\"oneTimeSetUp\", \"setUp\")\n@ddt\nclass RegisterCoursesCSVDataTests(unittest.TestCase):\n\n @pytest.fixture(autouse=True)\n def classSetUp(self, oneTimeSetUp):\n # self.lp = LoginPage(self.driver)\n self.courses = RegisterCoursesPage(self.driver)\n self.ts = TestStatus(self.driver)\n self.nav = NavigationPage(self.driver)\n\n def setUp(self):\n self.driver.find_element_by_xpath(\"//a[@class='navbar-brand header-logo']\").click()\n\n # self.driver.get(\"https://learn.letskodeit.com/courses\")\n\n self.nav.navigateToAllCourses()\n\n\n @pytest.mark.run(order=1)\n @data(*getCSVData(\"D:\\\\sachin_thakare\\\\python_programs\\\\letskodeit\\\\framework_pom_screenshot\\\\testdata.csv\"))\n @unpack # This decorator will unpack all the tuple / list elements into multiple arguments\n def test_invalidEnrollment(self, courseName, ccNum, ccExp, ccCVV, zip):\n # self.lp.login(\"[email protected]\", \"abcabc\")\n self.courses.clickSearchBox()\n self.courses.enterCourseName(courseName)\n # self.courses.selectCourseToEnroll()\n self.courses.selectCourseToEnroll(courseName)\n self.courses.clickEnrollButton()\n self.courses.enrollCourse(num=ccNum, exp=ccExp, cvv=ccCVV, zip=zip)\n result = self.courses.verifyEnrollFailed()\n self.ts.markFinal(\"test_invalidEnrollment\", result, \"Enrollment Failed Verification\")\n\n # self.driver.find_element_by_link_text(\"All Courses\").click()\n\n # Commenting the below lines, as we are using the same lines under setUp()\n # self.driver.find_element_by_xpath(\"//a[@class='navbar-brand header-logo']\").click()\n # self.driver.get(\"https://learn.letskodeit.com/courses\")"
},
{
"alpha_fraction": 0.6656644344329834,
"alphanum_fraction": 0.7029464840888977,
"avg_line_length": 46.5428581237793,
"blob_id": "87ecf93cb52dafd6e74a75614d4d9611ff1a99e3",
"content_id": "759e71bdeb24030350f1954e66cc2879317f5491",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1663,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 35,
"path": "/tests/courses/register_courses_multiple_data_set.py",
"repo_name": "rdarekar/selenium-python-framework",
"src_encoding": "UTF-8",
"text": "from pages.home.login_page import LoginPage\nfrom pages.courses.register_courses_page import RegisterCoursesPage\nfrom utilities.teststatus import TestStatus\nimport unittest\nimport pytest\nfrom ddt import ddt, data, unpack\n\[email protected](\"oneTimeSetUp\", \"setUp\")\n@ddt\nclass RegisterMultipleCoursesTests(unittest.TestCase):\n\n @pytest.fixture(autouse=True)\n def classSetUp(self, oneTimeSetUp):\n # self.lp = LoginPage(self.driver)\n self.courses = RegisterCoursesPage(self.driver)\n self.ts = TestStatus(self.driver)\n\n @pytest.mark.run(order=1)\n @data((\"JavaScript for beginners\", \"41811 5705 8102 5900\", \"10/21\", \"665\", \"411018\"),\n (\"Learn Python 3 from scratch\", \"41811 5705 8102 5900\", \"10/21\", \"665\", \"411018\"))\n @unpack # This decorator will unpack all the tuple / list elements into multiple arguments\n def test_invalidEnrollment(self, courseName, ccNum, ccExp, ccCVV, zip):\n # self.lp.login(\"[email protected]\", \"abcabc\")\n self.courses.clickSearchBox()\n self.courses.enterCourseName(courseName)\n # self.courses.selectCourseToEnroll()\n self.courses.selectCourseToEnroll(courseName)\n self.courses.clickEnrollButton()\n self.courses.enrollCourse(num=ccNum, exp=ccExp, cvv=ccCVV, zip=zip)\n result = self.courses.verifyEnrollFailed()\n self.ts.markFinal(\"test_invalidEnrollment\", result, \"Enrollment Failed Verification\")\n\n # self.driver.find_element_by_link_text(\"All Courses\").click()\n self.driver.find_element_by_xpath(\"//a[@class='navbar-brand header-logo']\").click()\n # self.driver.get(\"https://learn.letskodeit.com/courses\")"
},
{
"alpha_fraction": 0.6508799195289612,
"alphanum_fraction": 0.6547619104385376,
"avg_line_length": 38.835052490234375,
"blob_id": "3e8c6ee397925420ddf3df3ea8b3e984c58a8710",
"content_id": "3056a81ef1795eeda827603ef536fc5482eba718",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3864,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 97,
"path": "/pages/courses/register_courses_page.py",
"repo_name": "rdarekar/selenium-python-framework",
"src_encoding": "UTF-8",
"text": "import utilities.custom_logger as cl\nimport logging\nfrom base.basepage import BasePage\nimport time\n\nclass RegisterCoursesPage(BasePage):\n log = cl.customLogger(logging.DEBUG)\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n # Locators\n _search_box = \"//input[@id='search-courses']\"\n _search_icon = \"//button[@id='search-course-button']\"\n # _course = \"//div[@data-course-id='56740']//div[contains(text(), 'JavaScript for beginners')]\"\n # _course = \"//div[contains(@class,'course-listing-title') and contains(text(), 'JavaScript for beginners')]\"\n _course = \"//div[contains(@class,'course-listing-title') and contains(text(), '{0}')]\"\n _all_courses = \"//div[@class='course-listing-title']\"\n _enroll_button = \"//button[@id='enroll-button-top']\"\n _cc_num = \"//input[@aria-label='Credit or debit card number']\"\n _exp_date = \"//input[@name='exp-date']\"\n _cc_cvv = \"//input[@name='cvc']\"\n _zip = \"//input[@name='postal']\"\n _terms_and_policies = \"//input[@id='agreed_to_terms_checkbox']\"\n # _submit_enroll = \"//button[@class='btn btn-primary spc__button is-disabled']\"\n _submit_enroll = \"//button[@id='confirm-purchase']\"\n\n # Perform Actions\n # Enter course name\n def clickSearchBox(self):\n self.elementClick(self._search_box, \"xpath\")\n\n def enterCourseName(self, name):\n self.sendKeys(name, self._search_box, \"xpath\")\n self.elementClick(self._search_icon, \"xpath\")\n\n # def selectCourseToEnroll(self):\n # self.elementClick(self._course, \"xpath\")\n\n def selectCourseToEnroll(self, fullCourseName):\n self.elementClick(locator=self._course.format(fullCourseName), locatorType=\"xpath\")\n\n def clickEnrollButton(self):\n self.elementClick(self._enroll_button, \"xpath\")\n\n def enterCardNum(self, num):\n # self.switchToFrame(name=\"__privateStripeFrame16\")\n # self.sendKeys(num, self._cc_num, \"xpath\")\n time.sleep(7)\n self.switchFrameByIndex(self._cc_num, locatorType=\"xpath\")\n self.sendKeysWhenReady(num, locator=self._cc_num, locatorType=\"xpath\")\n # self.sendKeysCustom(num, self._cc_num, \"xpath\")\n self.switchToDefaultContent()\n\n def enterCardExp(self, exp):\n # self.switchToFrame(name=\"__privateStripeFrame17\")\n self.switchFrameByIndex(self._exp_date, locatorType=\"xpath\")\n self.sendKeys(exp, self._exp_date, \"xpath\")\n self.switchToDefaultContent()\n\n def enterCardCVV(self, cvv):\n # self.switchToFrame(name=\"__privateStripeFrame18\")\n self.switchFrameByIndex(self._cc_cvv, locatorType=\"xpath\")\n self.sendKeys(cvv, self._cc_cvv, \"xpath\")\n self.switchToDefaultContent()\n\n def enterZip(self, zip):\n # self.switchToFrame(name=\"__privateStripeFrame19\")\n self.switchFrameByIndex(self._zip, locatorType=\"xpath\")\n self.sendKeys(zip, self._zip, \"xpath\")\n self.switchToDefaultContent()\n\n def checkTermsAndPolicy(self):\n self.elementClick(self._terms_and_policies, \"xpath\")\n\n def clickEnrollSubmitButton(self):\n self.elementClick(self._submit_enroll, \"xpath\")\n\n def enterCreditCardInformation(self, num, exp, cvv, zip):\n self.enterCardNum(num)\n self.enterCardExp(exp)\n self.enterCardCVV(cvv)\n self.enterZip(zip)\n self.checkTermsAndPolicy()\n\n def enrollCourse(self, num=\"\", exp=\"\", cvv=\"\", zip=\"\"):\n self.clickEnrollButton()\n self.webScroll(\"down\")\n self.enterCreditCardInformation(num, exp, cvv, zip)\n\n # This step of clicking on Enroll Submit button is not included in the framework by tutor\n # self.clickEnrollSubmitButton()\n\n def verifyEnrollFailed(self):\n result = self.isEnabled(locator=self._submit_enroll, locatorType=\"xpath\", info=\"Enroll Button\")\n return not result\n"
},
{
"alpha_fraction": 0.6997193694114685,
"alphanum_fraction": 0.7268475294113159,
"avg_line_length": 41.79999923706055,
"blob_id": "a0fe3437a70383d96c0db013f16ffec19da78082",
"content_id": "ae209fee6c1fe2ce43d304f9df59470e78326088",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1069,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 25,
"path": "/tests/courses/register_courses_tests.py",
"repo_name": "rdarekar/selenium-python-framework",
"src_encoding": "UTF-8",
"text": "from pages.home.login_page import LoginPage\nfrom pages.courses.register_courses_page import RegisterCoursesPage\nfrom utilities.teststatus import TestStatus\nimport unittest\nimport pytest\n\[email protected](\"oneTimeSetUp\", \"setUp\")\nclass RegisterCoursesTests(unittest.TestCase):\n\n @pytest.fixture(autouse=True)\n def classSetUp(self, oneTimeSetUp):\n self.lp = LoginPage(self.driver)\n self.courses = RegisterCoursesPage(self.driver)\n self.ts = TestStatus(self.driver)\n\n def test_invalidEnrollment(self):\n self.lp.login(\"[email protected]\", \"abcabc\")\n self.courses.clickSearchBox()\n self.courses.enterCourseName(\"JavaScript\")\n # self.courses.selectCourseToEnroll()\n self.courses.selectCourseToEnroll(\"JavaScript for beginners\")\n self.courses.clickEnrollButton()\n self.courses.enrollCourse(num=\"4181 5705 8102 5900\", exp=\"10/21\", cvv=\"665\", zip=\"411018\")\n result = self.courses.verifyEnrollFailed()\n self.ts.markFinal(\"test_invalidEnrollment\", result, \"Enrollment Failed Verification\")"
}
] | 5 |
he-xu/TransferRNN | https://github.com/he-xu/TransferRNN | 152f8a71bcce4244a19e0ff7e3290011aee4380e | b2ec95088cc33cb05aacdaac4d3326978bf7571f | 2c2332f627f5b58bf53ec70975f08eaa2499674a | refs/heads/master | 2020-05-17T01:03:38.376244 | 2019-06-21T15:59:43 | 2019-06-21T15:59:43 | 183,415,612 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6903614401817322,
"alphanum_fraction": 0.7265059947967529,
"avg_line_length": 19.268293380737305,
"blob_id": "ce8dfa12728a664aec273318e0c6c9091f931936",
"content_id": "7fe2c99254738b84a483e352bb2352ec376da0fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 830,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 41,
"path": "/run_rnn_in_ctxctl.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 22:23:18 2019\n\n@author: dzenn\n\"\"\"\n\n#import TransferRNN.run_rnn_in_ctxctl\n\nimport sys\nsys.path.append('/home/dzenn/anaconda3/envs/ctxctl/lib/python3.7/site-packages')\n\nfrom TransferRNN.MainRNNLoop import MainRNNLoop\nfrom TransferRNN.bias_tuning_tools import BiasTools\n\n\n\nprint(\"Successful import\")\n \nbt = BiasTools()\nexec(open(\"TransferRNN/biases/256_biases.py\").read())\n#bt.copy_biases(4,5)\n#bt.copy_biases(4,12)\nprint(\"Clearing CAMs...\")\nbt.clear_cams(1)\nbt.clear_cams(3)\n\nprint(\"Initializing the main loop\")\nMainLoop = MainRNNLoop(backend = \"ctxctl\")\n\nprint(\"Loading the dataset\")\nMainLoop.prepare_dataset(\"TransferRNN/data/\")\n\nprint(\"Loading complete. Starting...\")\n\nMainLoop.run_loop(100)\n# \n \nMainLoop.export_error()\nMainLoop.export_conn_log()"
},
{
"alpha_fraction": 0.5592532753944397,
"alphanum_fraction": 0.5754870176315308,
"avg_line_length": 38.54838562011719,
"blob_id": "0391cdbf9ac8917480a8d7980f1993f19d8a4ef9",
"content_id": "2e028cb97352abaa7f9ce019f5e3f45ef341a086",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1232,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 31,
"path": "/force.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "def ternarize(w_new, cam_num):\n w_order = np.argsort(np.abs(w_new.T), axis=0)\n w_sorted = w_new.T[w_order, np.arange(w_order.shape[1])]\n w_sorted[:-cam_num, :]=0\n w_order_order = np.argsort(w_order, axis=0)\n w_undone = w_sorted[w_order_order, np.arange(w_order_order.shape[1])].T\n w_undone[w_undone>0] = 1\n w_undone[w_undone<0] = -1\n return w_undone\n\ndef update_weight(rate_psc, rate_teacher, w_real, w_ternary, n=6, m=1, cam_num=63, learning_rate=0.1):\n rate_recurrent = w_ternary.dot(rate_psc)\n rate_teacher_tile = np.tile(rate_teacher, (n,m))\n error = rate_recurrent - rate_teacher_tile\n d_w = 0\n for t in range(num_timesteps):\n r_t = rate_psc[:, t][:,np.newaxis]\n P_up = P_prev.dot(r_t.dot(r_t.T.dot(P_prev)))\n P_down = 1 + r_t.T.dot(P_prev.dot(r_t))\n P_t = P_prev - P_up / P_down\n e_t = error[:, t][:,np.newaxis]\n d_w += e_t.dot(r_t.T.dot(P_t))\n d_w = d_w / num_timesteps\n w_new = w_ternary - learning_rate*d_w\n w_ternary = ternarize(w_new, cam_num)\n norm_ratio = np.linalg.norm(w_new, 'fro')/np.linalg.norm(w_ternary, 'fro')\n if norm_ratio > 1:\n c_grad = 1\n else:\n c_grad = -1\n return w_ternary, c_grad\n\n \n"
},
{
"alpha_fraction": 0.579474687576294,
"alphanum_fraction": 0.5842033624649048,
"avg_line_length": 38.04753112792969,
"blob_id": "9fa86fe49018861f8c3eea9f864fedc36d74f0fe",
"content_id": "42ee1f130c4673db7763f39b0d8832d09291d0ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21359,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 547,
"path": "/NeuronNeuronConnector.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "import CtxDynapse\n\n\nclass DynapseConnector:\n \"\"\"\n Connector for DYNAP-se chip\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the class with empty connections\n sending_connections_to: map of (key, value), where key is the neuron sending connection and value is a list of all connections that start with the key neuron\n receiving_connections_from: map of (key, value), where key is the neuron receiving connection and value is a list of all connections that end with the key neuron\n \"\"\"\n\n self.sending_connections_to = {}\n self.receiving_connections_from = {}\n self.sending_virtual_connections_to = {}\n self.receiving_virtual_connections_from = {}\n\n def _save_connection(self, pre_neuron, post_neuron):\n self._add_to_list(self.sending_connections_to, pre_neuron, post_neuron)\n self._add_to_list(self.receiving_connections_from, post_neuron, pre_neuron)\n\n def _save_connection(self, pre_neuron, post_neuron):\n self._add_to_list(self.sending_connections_to, pre_neuron, post_neuron)\n self._add_to_list(self.receiving_connections_from, post_neuron, pre_neuron)\n\n def _save_virtual_connection(self, pre_neuron, post_neuron):\n self._add_to_list(self.sending_virtual_connections_to, pre_neuron, post_neuron)\n self._add_to_list(\n self.receiving_virtual_connections_from, post_neuron, pre_neuron\n )\n\n def _add_to_list(self, list_name, key, value):\n if key in list_name:\n list_name[key].append(value)\n else:\n list_name[key] = [value]\n\n def _remove_connection(self, pre_neuron, post_neuron):\n self._remove_from_list(self.sending_connections_to, pre_neuron, post_neuron)\n self._remove_from_list(self.receiving_connections_from, post_neuron, pre_neuron)\n\n def _remove_virtual_connection(self, pre_neuron, post_neuron):\n self._remove_from_list(\n self.sending_virtual_connections_to, pre_neuron, post_neuron\n )\n self._remove_from_list(\n self.receiving_virtual_connections_from, post_neuron, pre_neuron\n )\n\n def _remove_from_list(self, list_name, key, value):\n if key in list_name:\n try:\n list_name[key].remove(value)\n if not list_name[key]:\n list_name.pop(key)\n except (ValueError, KeyError):\n print(\n \"The neuron {} has no connection with neuron {}\".format(key, value)\n )\n else:\n raise IndexError(\"The neuron {} has no connections to remove\".format(key))\n\n def add_connection(self, pre, post, synapse_type):\n \"\"\"\n Connect two neurons.\n Attributes:\n pre: neuron that sends the connection\n post: neuron that receives the connection\n synapse_type: one of the four expected synapse types\n \"\"\"\n\n # check if one of the neurons is virtual\n pre_virtual = pre.is_virtual()\n post_virtual = post.is_virtual()\n\n # if post_neuron is virtual, raise an error.\n # Virtual neurons cannot receive connections\n if post_virtual:\n raise Exception(\n \"post neuron cannot be virtual. Virtual neurons do not receive connections.\"\n )\n\n if pre_virtual:\n self.add_virtual_connection(pre, post, synapse_type)\n return\n\n pre_srams = pre.get_srams()\n pre_core_id = pre.get_core_id()\n pre_chip_id = pre.get_chip_id()\n pre_neuron_id = pre.get_neuron_id()\n\n post_cams = post.get_cams()\n post_core_id = post.get_core_id()\n post_chip_id = post.get_chip_id()\n post_neuron_id = post.get_neuron_id()\n\n target_chip = post_chip_id\n\n # check if the pre can send a connection to target chip\n pre_available = False\n pre_sram = 0\n for i in range(len(pre_srams)):\n if pre_srams[i].get_target_chip_id() == target_chip:\n pre_available = True\n pre_sram = i\n break\n\n if not pre_available:\n for i in range(len(pre_srams)):\n if not pre_srams[i].is_used():\n pre_available = True\n pre_sram = i\n break\n\n if not pre_available:\n raise Exception(\"pre neuron has no available outputs\")\n\n # check if the post can receive a connection\n post_available = False\n for i in range(len(post_cams)):\n\n # if there is space left on post_cams\n if (\n post_cams[i].get_pre_neuron_id()\n + post_cams[i].get_pre_neuron_core_id() * 256\n ) == 0:\n post_available = True\n post_cam = i\n break\n\n if not post_available:\n raise Exception(\"post neuron has no available inputs\")\n\n # connect\n pre_neuron_address = pre_core_id * 256 + pre_neuron_id\n post_neuron_address = post_core_id * 256 + post_neuron_id\n virtual_core_id = 0\n if pre_srams[pre_sram].is_used():\n virtual_core_id = pre_srams[pre_sram].get_virtual_core_id()\n else:\n virtual_core_id = pre_core_id\n\n core_mask = pre_srams[pre_sram].get_core_mask() | (1 << post_core_id)\n\n d = (post_chip_id & 1) - (pre_chip_id & 1)\n if d < 0:\n sx = True\n else:\n sx = False\n\n dx = abs(d)\n\n d = ((post_chip_id & 2)>>1) - ((pre_chip_id & 2)>>1)\n\n if d < 0:\n sy = False\n else:\n sy = True\n\n dy = abs(d)\n\n pre_srams[pre_sram].set_virtual_core_id(virtual_core_id)\n pre_srams[pre_sram].set_target_chip_id(post_chip_id)\n pre_srams[pre_sram].set_sx(sx)\n pre_srams[pre_sram].set_sy(sy)\n pre_srams[pre_sram].set_dx(dx)\n pre_srams[pre_sram].set_dy(dy)\n pre_srams[pre_sram].set_used(True)\n pre_srams[pre_sram].set_core_mask(core_mask)\n\n post_cams[post_cam].set_pre_neuron_id(pre_neuron_id)\n post_cams[post_cam].set_pre_neuron_core_id(pre_core_id)\n post_cams[post_cam].set_type(synapse_type)\n\n #CtxDynapse.dynapse.set_config_chip_id(pre_chip_id)\n #CtxDynapse.dynapse.write_sram(\n # pre_neuron_address, pre_sram, virtual_core_id, sx, dx, sy, dy, core_mask\n #)\n #\n #if pre_chip_id != post_chip_id:\n # CtxDynapse.dynapse.set_config_chip_id(post_chip_id)\n\n #CtxDynapse.dynapse.write_cam(\n # pre_neuron_address, post_neuron_address, post_cam, synapse_type\n #)\n\n self._save_connection(pre, post)\n\n def add_connection_from_list(\n self, pre_neurons_list, post_neuron_list, synapse_types\n ):\n \"\"\"\n Connect neurons using a python list of pre, post and synapse types.\n Attributes:\n pre_neurons_list: list with neurons that send the connection\n post_neuron_list: list with neurons that receive the connection\n synapse_types: list with the connection type between the neurons. It can be a list with one single\n element indicating all the connections are of the same type, otherwise, the size of the synapse types list must\n match the size of pre and post neurons list.\n \"\"\"\n\n if len(pre_neurons_list) != len(post_neuron_list):\n print(\n \"The number of pre and post neurons must be the same. No connection will be created.\"\n )\n return\n\n same_synapse_type = False\n if len(synapse_types) == 1:\n same_synapse_type = True\n\n if (len(pre_neurons_list) != len(synapse_types)) and (not same_synapse_type):\n print(\n \"The number of synapses type must match the number of connections. No connection will be created.\"\n )\n return\n\n for i in range(len(pre_neurons_list)):\n self.add_connection(\n pre_neurons_list[i],\n post_neuron_list[i],\n synapse_types[0] if same_synapse_type else synapse_types[i],\n )\n\n def add_connection_from_file(self, connection_file_path):\n \"\"\"\n Connects neurons reading the values from a file. The file must contain three elements per line: pre neuron, post neuron and synapse type.\n Attributes:\n connection_file: file that contains three elements in each row: pre neuron, post neuron and synapse type.\n \"\"\"\n number_of_connections = 0\n\n with open(connection_file_path, \"r\") as fp:\n for i in fp.readlines():\n tmp = i.split(\" \")\n\n # verify if there is 3 elements in the line\n if len(tmp) == 3:\n # connect\n self.add_connection(tmp[0], tmp[1], tmp[2])\n number_of_connections += 1\n else:\n print(\n \"Bad format error. Error in the line {}. The connections before this point were created.\".format(\n number_of_connections + 1\n )\n )\n\n def add_virtual_connection(self, pre, post, synapse_type):\n \"\"\"\n Connect a virtual neuron with a real (on chip) neuron.\n Attributes:\n pre: neuron that sends the connection, it must be virtual\n post: neuron that receives the connection, it must not be virtual\n synapse_type: one of the four expected synapse types\n \"\"\"\n\n if not pre.is_virtual():\n raise Exception(\"pre neuron must be virtual\")\n\n if post.is_virtual():\n raise Exception(\"post neuron must not be virtual\")\n\n pre_core_id = pre.get_core_id()\n pre_chip_id = pre.get_chip_id()\n pre_neuron_id = pre.get_neuron_id()\n\n post_cams = post.get_cams()\n post_core_id = post.get_core_id()\n post_chip_id = post.get_chip_id()\n post_neuron_id = post.get_neuron_id()\n\n # check if the post can receive a connection\n post_available = False\n for i in range(len(post_cams)):\n # if there is space left on post_cams\n if (\n post_cams[i].get_pre_neuron_id()\n + post_cams[i].get_pre_neuron_core_id() * 256\n ) == 0:\n post_available = True\n post_cam = i\n break\n\n if not post_available:\n raise Exception(\"post neuron has no available inputs\")\n\n # connect\n pre_neuron_address = pre_core_id * 256 + pre_neuron_id\n post_neuron_address = post_core_id * 256 + post_neuron_id\n virtual_core_id = pre_core_id\n\n post_cams[post_cam].set_pre_neuron_id(pre_neuron_id)\n post_cams[post_cam].set_pre_neuron_core_id(pre_core_id)\n post_cams[post_cam].set_type(synapse_type)\n\n #CtxDynapse.dynapse.set_config_chip_id(post_chip_id)\n #CtxDynapse.dynapse.write_cam(\n # pre_neuron_address, post_neuron_address, post_cam, synapse_type\n #)\n\n self._save_virtual_connection(pre, post)\n\n def remove_connection(self, pre_neuron, post_neuron):\n \"\"\"\n Delete the connection between two neurons.\n Attributes:\n pre_neuron: neuron that sends the connection\n post_neuron: neuron that receives the connection\n \"\"\"\n\n # check if one of the neurons is virtual\n pre_virtual = pre_neuron.is_virtual()\n post_virtual = post_neuron.is_virtual()\n\n # if post_neuron is virtual, raise an error.\n # Virtual neurons do not receive connections, thus there is no connection to remove\n if post_virtual:\n raise Exception(\"post neuron is virtual, there is no connection to remove.\")\n\n if pre_virtual:\n self.remove_virtual_connection(pre_neuron, post_neuron)\n return\n\n # first, try to remove the neurons from the lists. This will raise an exception if the neurons aren't connected.\n # todo: handle exception\n self._remove_connection(pre_neuron, post_neuron)\n\n # now, we can remove the connections on chip\n\n # get info about pre and post neurons\n pre_srams = pre_neuron.get_srams()\n pre_core_id = pre_neuron.get_core_id()\n pre_chip_id = pre_neuron.get_chip_id()\n pre_neuron_id = pre_neuron.get_neuron_id()\n\n post_cams = post_neuron.get_cams()\n post_core_id = post_neuron.get_core_id()\n post_chip_id = post_neuron.get_chip_id()\n post_neuron_id = post_neuron.get_neuron_id()\n\n pre_neuron_address = pre_core_id * 256 + pre_neuron_id\n post_neuron_address = post_core_id * 256 + post_neuron_id\n\n # check what sram sends a connection to post neuron\n pre_sram = 0\n for i in range(len(pre_srams)):\n if pre_srams[i].get_target_chip_id() == post_chip_id:\n pre_sram = i\n break\n\n pre_virtual_core_id = pre_srams[pre_sram].get_virtual_core_id()\n\n # check what cam receives a connection from pre neuron\n post_cam = 0\n for i in range(len(post_cams)):\n if (post_cams[i].get_pre_neuron_core_id() * 256 + post_cams[i].get_pre_neuron_id()) == pre_neuron_address:\n post_cam = i\n break\n\n #CtxDynapse.dynapse.set_config_chip_id(post_chip_id)\n\n ## information of post-synaptic neuron, setting the address of pre-synaptic neuron to zero\n #CtxDynapse.dynapse.write_cam(0, post_neuron_address, post_cam, 0)\n post_cams[post_cam].set_pre_neuron_id(0)\n post_cams[post_cam].set_pre_neuron_core_id(0)\n\n ## updating pre-synaptic neuron\n #if pre_chip_id != post_chip_id:\n # CtxDynapse.dynapse.set_config_chip_id(pre_chip_id)\n\n # if there is no other connections from pre neuron, set it to zero and mark it as unused\n if pre_neuron not in self.sending_connections_to:\n # information of pre-synaptic neuron, setting the address of post-synaptic neuron to zero\n #CtxDynapse.dynapse.write_sram(\n # pre_neuron_address, pre_sram, 0, 0, 0, 0, 0, 0\n #)\n pre_srams[pre_sram].set_used(False)\n pre_srams[pre_sram].set_virtual_core_id(0)\n pre_srams[pre_sram].set_target_chip_id(0)\n pre_srams[pre_sram].set_sx(0)\n pre_srams[pre_sram].set_sy(0)\n pre_srams[pre_sram].set_dx(0)\n pre_srams[pre_sram].set_dy(0)\n pre_srams[pre_sram].set_core_mask(0)\n\n # if there are other connections, check if they are projecting to the same core as the post-neuron\n else:\n post_list = self.sending_connections_to[pre_neuron]\n found_post_same_core = False\n for element in post_list:\n if element.get_core_id() == post_core_id:\n found_post_same_core = True\n\n # if none of the connection go to the same core as the post_neuron, we set the corresponding bit of the core mask to 0\n if not found_post_same_core:\n core_mask = pre_srams[pre_sram].get_core_mask() & ~(0 << post_core_id)\n #CtxDynapse.dynapse.write_sram(\n # pre_neuron_address, pre_sram, 0, 0, 0, 0, 0, core_mask\n #)\n pre_srams[pre_sram].set_core_mask(core_mask)\n\n def remove_connection_from_list(self, pre_neurons_list, post_neuron_list):\n \"\"\"\n Delete the connection between two list of neurons. The number of elements in each list must be the same.\n Attributes:\n pre_neurons_list: list of neurons that send the connection\n post_neurons_list: list of neuron that receive the connection\n \"\"\"\n if len(pre_neurons_list) != len(post_neuron_list):\n print(\n \"The number of pre and post neurons must be the same. No connection was removed.\"\n )\n return\n\n for i in range(len(pre_neurons_list)):\n # todo: handle exception\n self.remove_connection(pre_neurons_list[i], post_neuron_list[i])\n\n def remove_connection_from_file(self, unconnect_file_path):\n \"\"\"\n Delete the connection between neurons reading the values from a file. The file must contain two elements per line: pre neuron and post neuron.\n Attributes:\n connection_file: file that contains two elements in each row: pre neuron and post neuron.\n \"\"\"\n number_of_connections_removed = 0\n\n with open(unconnect_file_path, \"r\") as fp:\n for i in fp.readlines():\n tmp = i.split(\" \")\n\n # verify if there is 2 elements in the line\n if len(tmp) == 2:\n # unconnect\n # todo: catch exception\n self.remove_connection(tmp[0], tmp[1])\n number_of_connections_removed += 1\n else:\n print(\n \"Bad format error. Error in the line {}. The connections before this point were removed.\".format(\n number_of_connections_removed + 1\n )\n )\n\n def remove_virtual_connection(self, pre_neuron, post_neuron):\n \"\"\"\n Delete the connection between a virtual neuron and a real (on chip) neuron.\n Attributes:\n pre_neuron: neuron that sends the connection - must be virtual\n post_neuron: neuron that receives the connection - must not be virtual\n \"\"\"\n\n if not pre_neuron.is_virtual():\n raise Exception(\"pre neuron must be virtual\")\n\n if post_neuron.is_virtual():\n raise Exception(\"post neuron must not be virtual\")\n\n # check if this connection is on the lists.\n # This will raise an exception if the neurons aren't connected.\n # todo: handle exception\n self._remove_virtual_connection(pre_neuron, post_neuron)\n\n # now, we can remove the connection on chip\n # we just need to clean the cam of the post, pre neuron is virtual\n\n # get info about pre and post neurons\n pre_core_id = pre_neuron.get_core_id()\n pre_neuron_id = pre_neuron.get_neuron_id()\n\n post_cams = post_neuron.get_cams()\n post_core_id = post_neuron.get_core_id()\n post_chip_id = post_neuron.get_chip_id()\n post_neuron_id = post_neuron.get_neuron_id()\n\n pre_neuron_address = pre_core_id * 256 + pre_neuron_id\n post_neuron_address = post_core_id * 256 + post_neuron_id\n\n # check what cam receives a connection from pre neuron\n post_cam = 0\n for i in range(len(post_cams)):\n if post_cams[i] == pre_neuron_address:\n post_cam = i\n break\n\n #CtxDynapse.dynapse.set_config_chip_id(post_chip_id)\n # information of post-synaptic neuron, setting the address of pre-synaptic neuron to zero\n #CtxDynapse.dynapse.write_cam(0, post_neuron_address, post_cam, 0)\n post_cams[post_cam].set_pre_neuron_id(0)\n post_cams[post_cam].set_pre_neuron_core_id(0)\n\n def remove_sending_connections(self, neuron):\n \"\"\"\n Remove all connections leaving the informed neuron\n Attributes:\n neuron: the neuron passed as parameter will be considered the pre-synaptic neuron, and all the connections that leave this neuron will be removed.\n \"\"\"\n\n # todo: handle exception\n if neuron in self.sending_connections_to:\n connections = self.sending_connections_to[neuron]\n for i in connections:\n self.remove_connection(neuron, i)\n\n def remove_receiving_connections(self, neuron):\n \"\"\"\n Remove all connections arriving in the informed neuron\n Attributes:\n neuron: the neuron passed as parameter will be considered the post-synaptic neuron, and all the connections that are sent to this neuron will be removed.\n \"\"\"\n if neuron.is_virtual:\n raise Exception(\n \"neuron {} is virtual and receives no connection\".format(neuron)\n )\n\n # todo: handle exception\n if neuron in self.receiving_connections_from:\n connections = self.receiving_connections_from[neuron]\n for i in connections:\n self.remove_connection(i, neuron)\n\n def remove_all_connections(self, neuron):\n \"\"\"\n Remove all connections of a neuron, i.e., all the connections that the neuron send and receive will be removed.\n Attributes:\n neuron: the neuron that will have all its connections removed.\n \"\"\"\n self.remove_sending_connections(neuron)\n\n if not neuron.is_virtual:\n self.remove_receiving_connections(neuron)\n\n\nif __name__ == \"__main__\":\n\n model = CtxDynapse.model\n neurons = model.get_shadow_state_neurons()\n dynapse_connector = DynapseConnector()\n\n if len(neurons) > 2:\n dynapse_connector.add_connection(neurons[0], neurons[1], 3)\n dynapse_connector.add_connection(neurons[0], neurons[2], 3)\n else:\n print(\"missing neurons to connect\")\n"
},
{
"alpha_fraction": 0.5585944652557373,
"alphanum_fraction": 0.5696431994438171,
"avg_line_length": 37.15277862548828,
"blob_id": "df14a5d95139044a05323786a9da47516cc6153f",
"content_id": "b7220f9e7ca012b97f94521d74042d2af613d357",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5521,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 144,
"path": "/MainRNNLoop.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 29 15:21:22 2019\n\n@author: dzenn\n\"\"\"\n\n#exec(open(\"MainRNNLoop.py\").read())\n\n\ntry:\n from DynapseRNN import DynapseRNN\n from matplotlib import pyplot as plt\n from random import sample\n from IPython import get_ipython\n get_ipython().run_line_magic('matplotlib', 'inline')\n\nexcept ModuleNotFoundError:\n print(\"Trying to load Dynapse RNN from outside TransferRNN directory\")\n from TransferRNN.DynapseRNN import DynapseRNN\n \n#try:\n# from bias_tuning_tools import BiasTools\n#except ModuleNotFoundError:\n# from TransferRNN.bias_tuning_tools import BiasTools\n \nimport numpy as np\nimport datetime\n\nimport pickle\n\n\n\nclass MainRNNLoop():\n \n def __init__(self, num_inputs = 128, timesteps = 28, multiplex_factor = 2, backend = \"ctxctl\", c = None): \n \n self.RNNController = DynapseRNN(num_inputs=num_inputs, timesteps=timesteps, multiplex_factor=multiplex_factor, backend=backend, c=c, debug=True)\n self.backend = backend\n self.num_inputs = num_inputs\n self.timesteps = timesteps\n self.multiplex_factor = multiplex_factor\n self.c = c\n self.recorded_error = []\n \n def prepare_dataset(self, dataset_path = \"\"):\n \"\"\"\n Loads datasets (input, teacher and mnist) into MainRNNLoop internal variables \n \n Args:\n dataset_path (str, optional) : path to datasets\n \"\"\"\n \n projection_train = open(dataset_path + 'x_projection_train.pkl', 'rb')\n state_train = open(dataset_path + 'state_train.pkl', 'rb')\n mnist = open(dataset_path + 'mnist.pkl','rb')\n\n u_1 = pickle._Unpickler(projection_train)\n u_2 = pickle._Unpickler(state_train)\n u_3 = pickle._Unpickler(mnist)\n u_1.encoding = 'latin1'\n u_2.encoding = 'latin1'\n u_3.encoding = 'latin1'\n \n self.projection_train_data = u_1.load()\n self.state_train_data = u_2.load()\n# self.mnist_train, self.mnist_test, self.mnist_validate = u_3.load() # load validation set\n \n def export_error(self, filename = \"RNNError.dat\"):\n \"\"\"\n Export mean error log to a file\n \n Args:\n filename (str, optional)\n \"\"\"\n# f_out = open(str(datetime.datetime.now()) + \" \" + filename, \"wb\") \n f_out = open(filename, \"w\")\n for err in self.recorded_error:\n f_out.write(str(err) + \"\\n\")\n f_out.close()\n \n def export_conn_log(self, filename = \"RNNConnLog.dat\"):\n \"\"\"\n Export recorded connectivity changes\n \n Args:\n filename (str, optional)\n \"\"\"\n# f_out = open(str(datetime.datetime.now()) + \" \" + filename, \"wb\") \n f_out = open(filename, \"w\")\n for item in self.RNNController.conn_log:\n f_out.write(str(item[0]) + \" \" + str(item[1]) + \"\\n\")\n f_out.close()\n \n \n def start(self):\n pass\n \n def run_loop(self, num_images):\n \"\"\"\n Run the image presentation loop num_images times.\n The loop consists of 6 steps:\n 1. Start recording spikes (initialize event filters)\n 2. Present the input spike trains (actual realtime image presetation)\n 3. Stop recording (clear event filters)\n 4. Compute rates based on recorded events by binning spikes by neuron indices and timesteps\n 5. Compute the new ternary weight matrix based on the rates and the state_train_data (takes top cam_num largest gradients per neuron with stochastic rounding)\n 6. Apply the new weight matrix to the chip\n \n Note:\n Steps 2 and 6 take the most amount of time \n \"\"\"\n \n for image_idx in range(num_images):\n print(\"Start recording\")\n self.RNNController.start_recording_spikes()\n print(\"Showing digit %d\" % (image_idx))\n self.RNNController.present_stimulus(self.projection_train_data[image_idx], 2/6)\n print(\"Stopping the recording\")\n self.RNNController.stop_recording_spikes()\n print(\"Processing recorded evts...\")\n rates = self.RNNController.process_recorded_evts()\n print(np.array(rates)/100)\n \n if self.backend == 'rpyc':\n ids = sample(range(self.num_inputs), 4)\n rate_recurrent = self.RNNController.w_ternary.dot(np.array(rates)/100)\n rate_teacher = self.state_train_data[0]\n rate_teacher_tile = np.tile(rate_teacher.T, (self.multiplex_factor,1))\n for idx in ids:\n plt.figure(idx)\n plt.plot(range(len(rate_recurrent[idx])), rate_recurrent[idx], 'r--', range(len(rate_teacher_tile[idx])), rate_teacher_tile[idx], 'b--')\n plt.show()\n \n print(\"Computing gradients...\")\n c_grad, mean_error = self.RNNController.update_weight(np.array(rates)/100, (self.state_train_data[0]), learning_rate = 0.01)\n \n self.recorded_error.append(mean_error)\n \n self.RNNController.apply_new_matrix(self.RNNController.w_ternary, False)\n \n print(\"C_grad: %g, mean_error %g\" % (c_grad, mean_error))\n print(\"Done\") \n \n\n \n "
},
{
"alpha_fraction": 0.5551360845565796,
"alphanum_fraction": 0.5682177543640137,
"avg_line_length": 33.35951614379883,
"blob_id": "39030779553327b8f21a36f4e4ea6c0d2b37c600",
"content_id": "9207355b9ebe96c0d4798c389f0860317678e510",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11390,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 331,
"path": "/bias_tuning_tools.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 22 12:51:51 2018\n\nSet of tools to simplify the bias tuning process of the DYNAP-SE chip\nusing aiCTX's cortexcontrol.\n\nLoad and instantiate Bias Tuning Tools (BiTTs) in cortexcontrol console:\n >>> import bias_tuning_tools\n >>> bt = bias_tuning_tools.BiasTools()\n \nSeveral actions available:\n \n Monitoring I_mem:\n Use bt.monitor(core_id, neuron_id) to output I_mem to the respective DYNAP-SE output\n \n Stimulating a core with steps of DC current:\n Use bt.dc_steps(core_id, period, time_interval, coarse_val) to run steps of current with\n specified period and aplitude for the duration of the specified time interval.\n \n Stimulating a chip with a periodic spike train:\n Step 1:\n Use bt.connect_spikegen(chip_id, syn_type) to select the chip and the synapse to use.\n For convinience, syn_type is integer, 0 is SLOW_INH, 1 is FAST_INH, 2 is SLOW_EXC, 3 is FAST_EXC\n Step 2:\n Use bt.set_rate(rate) to set the rate of the spike train and start spikegen.\n \n You can always start and stop spiken with bt.spikegen.start() and bt.spikegen.stop()\n and check the status with bt.spikegen.is_running()\n \n Saving and loading biases with wrapper functions:\n bt.save_biases(filename) and bt.load_biases(filename).\n IMPORTANT: Make sure the filename ends with *.py\n ##TODO: Add a check for \"*.py\"\n \n Copying biases:\n Use bt.copy_biases(source_core, target_core) to copy the full set of biases from one\n core to another.\n\n@author: dzenn\n\"\"\"\n\n\n# import bias_tuning_tools; bt = bias_tuning_tools.BiasTools();\n# import imp; imp.reload(bias_tuning_tools);\n\n#import sys\n#sys.path.append('/home/dzenn/anaconda3/envs/ctxctl3.7/lib/python3.7/site-packages')\n#sys.path.append('/home/dzenn/anaconda3/envs/ctxctl/lib/python3.7/site-packages')\n#sys.path.append('/home/theiera/gitlab/NCS/CTXCTL/cortexcontrol')\n\n#import numpy as np\nfrom time import sleep, clock\nimport CtxDynapse\nfrom NeuronNeuronConnector import DynapseConnector\nimport PyCtxUtils\nfrom CtxDynapse import DynapseCamType as SynTypes\n#import numpy as np\n\n\nclass BiasTools(object):\n\n def __init__(self):\n \n self.model = CtxDynapse.model\n self.virtual_model = CtxDynapse.VirtualModel()\n self.groups = self.model.get_bias_groups()\n self.neurons = self.model.get_shadow_state_neurons()\n self.virtual_neurons = self.virtual_model.get_neurons()\n self.buf_evt_filter = None\n self.spikes = []\n self.event_filters = []\n self.core_rate = None\n self.coarse_value = None\n self.coarse_value_set = False\n self.connector = DynapseConnector()\n self.poisson_spike_gen = self.model.get_fpga_modules()[0]\n self.spikegen = self.model.get_fpga_modules()[1]\n self.syn_types = SynTypes\n self.spikegen_target_chip = None\n \n \n def off_core(self, core_id):\n \"\"\"\n Switch the core off using by setting TAU1 to a large value\n \n Args:\n core_id (int): core index [0-15]\n \"\"\"\n\n self.groups[core_id].set_bias(\"IF_TAU1_N\", 100, 7)\n \n def save_biases(self, filename):\n \"\"\"\n A wrapper to save the biases to a file\n \n Args:\n filename (str): filename should end *.py\n \"\"\"\n PyCtxUtils.save_biases(filename)\n \n def load_biases(self, filename):\n \"\"\"\n A wrapper to load the biases from file\n \n Args:\n filename (str): filename should end *.py\n \"\"\" \n exec(open(filename).read())\n\n def clear_all_cams(self):\n \"\"\"\n Clear cams of the whole board\n \"\"\"\n for chip_id in range(4):\n CtxDynapse.dynapse.clear_cam(chip_id)\n \n def clear_cams(self, chip_id):\n \"\"\"\n Clear cams of the specified chip\n \n Args:\n chip_id (int): chip index\n \"\"\"\n CtxDynapse.dynapse.clear_cam(chip_id)\n\n def clear_all_srams(self):\n \"\"\"\n Clear srams of the whole board\n \"\"\"\n for chip_id in range(4):\n CtxDynapse.dynapse.clear_sram(chip_id)\n \n def clear_sram(self, chip_id):\n \"\"\"\n Clear srams of the specified chip\n \n Args:\n chip_id (int): chip index\n \"\"\"\n CtxDynapse.dynapse.clear_sram(chip_id)\n\n \n def copy_biases(self, source_core, target_core):\n \"\"\"\n Copies the full set of biases from one core to another\n \n Args:\n source_core (int): core_id from 0 to 15 from where to copy the biases\n target_core (int): core_id from 0 to 15 where to write the biases\n \"\"\"\n \n source_biases = self.groups[source_core].get_biases()\n \n for bias in source_biases:\n self.groups[target_core].set_bias(bias.get_bias_name(), bias.get_fine_value(), bias.get_coarse_value())\n\n \n \n def connect_spikegen(self, chip_id, syn_type_int):\n \"\"\" Creates connections from virtual neuron 1 to all neurons of the selected chip\n using selected syn_type.\n \n Args:\n chip_id (int): stimulated chip ID\n syn_type_int (int): integer synapse type to be converted to DynapseCamType withing the method\n \"\"\"\n \n self.spikegen_target_chip = chip_id\n \n if syn_type_int == 0:\n syn_type = SynTypes.SLOW_INH\n elif syn_type_int == 1:\n syn_type = SynTypes.FAST_INH\n elif syn_type_int == 2:\n syn_type = SynTypes.SLOW_EXC\n elif syn_type_int == 3:\n syn_type = SynTypes.FAST_EXC\n else:\n print(\"Unable syn type, please try again\")\n return\n \n for n in range(1024):\n self.connector.add_connection(pre=self.virtual_neurons[1],\n post=self.neurons[n + 1024*chip_id],\n synapse_type=syn_type)\n \n self.model.apply_diff_state()\n \n def disconnect_spikegen(self):\n \"\"\"\n Removes all connections from the spikegen to physical neurons.\n \n ##TODO: Seems not to work, needs fixing\n \"\"\"\n \n self.spikegen_target_chip = None\n \n for n in range(1024):\n self.connector.remove_sending_connections(self.virtual_neurons[n])\n \n self.model.apply_diff_state()\n \n\n def set_rate(self, rate):\n \"\"\"\n Sets spiking rate of the spikegen and starts it.\n \n Args:\n rate (int): Spiking rate in Hz, can't be lower ~5 Hz\n \"\"\"\n \n self.spikegen.stop()\n \n\n \n isi_base = 900\n unit_mult = isi_base/90\n \n sleep((rate**(-1)))\n \n fpga_event = CtxDynapse.FpgaSpikeEvent()\n fpga_event.core_mask = 15\n fpga_event.target_chip = self.spikegen_target_chip\n fpga_event.neuron_id = 1\n fpga_event.isi = int(((rate*1e-6)**(-1))/unit_mult)\n \n \n self.spikegen.set_variable_isi(False)\n self.spikegen.preload_stimulus([fpga_event])\n self.spikegen.set_isi(int(((rate*1e-6)**(-1))/unit_mult))\n self.spikegen.set_isi_multiplier(isi_base)\n self.spikegen.set_repeat_mode(True)\n\n self.spikegen.start()\n\n\n \n def dc_steps(self, core_id, period, time_interval, coarse_val):\n \"\"\" Create square steps of DC current using the IF_DC_P bias with\n the specified period and within the specified time_interval, with coarse_val amplitude.\n \n Args:\n core_id (int): target core\n period (int): period of DC steps, in seconds\n time_interval (int): time interval for the method to run, in seconds\n coarse_val (int): amplitude of DC steps set by the coarse value of the IF_DC_P bias (fine value is set to 128)\n \"\"\"\n start_time = clock()\n while (clock() - start_time < time_interval):\n self.groups[core_id].set_bias(\"IF_DC_P\", 128, coarse_val)\n sleep(period)\n self.groups[core_id].set_bias(\"IF_DC_P\", 0, 0)\n sleep(period)\n \n def get_core_rate(self, core_id, time_interval):\n \"\"\" Create square steps of DC current using the IF_DC_P bias with\n the specified period and within the specified time_interval, with coarse_val amplitude.\n \n Args:\n core_id (int): target core\n period (int): period of DC steps, in seconds\n time_interval (int): time interval for the method to run, in seconds\n coarse_val (int): amplitude of DC steps set by the coarse value of the IF_DC_P bias (fine value is set to 128)\n \"\"\"\n buf_evt_filter = CtxDynapse.BufferedEventFilter(self.model, [idx for idx in range(core_id*256, core_id*256 + 256)])\n start_time = clock()\n \n while (clock() - start_time < time_interval):\n evts = buf_evt_filter.get_events()\n print(\"Core %d average rate is %g Hz\" % (core_id, len(evts)/256))\n sleep(1)\n \n buf_evt_filter.clear()\n \n def get_rates(self, n_ids = None, measurement_period = 1):\n \n if n_ids is None:\n n_ids = [l for l in range(len(self.neurons))]\n \n buf_evt_filter = CtxDynapse.BufferedEventFilter(self.model, n_ids)\n evts = buf_evt_filter.get_events()\n sleep(measurement_period)\n evts = buf_evt_filter.get_events()\n buf_evt_filter.clear()\n \n rates = [0 for l in range(len(self.neurons))]\n if len(evts) != 0:\n for spike in evts:\n rates[spike.neuron.get_neuron_id() + 256*spike.neuron.get_core_id() + 1024*spike.neuron.get_core_id()] += 1\n \n for l in range(len(self.neurons)):\n rates[l] = rates[l]/measurement_period\n \n return rates\n \n \n def r(self):\n print(self.get_rates([idx for idx in range(0, 10)]))\n \n \n \n \n def get_core_rate_ts(self, core_id, time_interval):\n\n buf_evt_filter = CtxDynapse.BufferedEventFilter(self.model, [idx for idx in range(core_id*256, core_id*256 + 25)])\n start_time = clock()\n \n while (clock() - start_time < time_interval):\n evts = buf_evt_filter.get_events()\n if len(evts) != 0: \n print(\"Core %d average rate is %g Hz\" % (core_id, len(evts)/(((evts[len(evts)-1].timestamp - evts[0].timestamp) / 1e6)*25)))\n sleep(1)\n \n buf_evt_filter.clear()\n \n \n \n \n \n def monitor(self, core_id, neuron_id):\n \"\"\"\n Wrapper function for monitoring I_mem. Accepts core_id and neuron_id from 0 to 255 instead\n of chip_id and neuron_id from 0 to 1023.\n \n Args:\n core_id (int): core ID to be monitored\n neuron_id (int): neuron index within the selected core\n \"\"\"\n CtxDynapse.dynapse.monitor_neuron(int(core_id / 4), neuron_id + 256*(core_id % 4))\n \n "
},
{
"alpha_fraction": 0.701646089553833,
"alphanum_fraction": 0.722908079624176,
"avg_line_length": 27.05769157409668,
"blob_id": "adc968cb5d16f1bdf5a7990766f86daeb9efcbce",
"content_id": "852f8376cf64a83eb4cc539be73a45ea40ce578b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1458,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 52,
"path": "/run_rnn_with_rpyc.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 14 23:45:13 2019\n\n@author: dzenn\n\"\"\"\n\n#import PyCtxUtils; PyCtxUtils.start_rpyc_server()\n\nimport MainRNNLoop\n\nimport rpyc\n\nprint(\"Successful import\")\nself_path = MainRNNLoop.__file__\nself_path = self_path[0:self_path.rfind('/')]\nexecute_bias_load_string = 'exec(open(\"' + self_path + '/biases/256_biases.py\").read())'\nexecute_rate_tracker_load_string = 'exec(open(\"' + self_path + '/ctxctl_rate_tracker.py\").read())'\n\n\n\nc = rpyc.classic.connect(\"localhost\", 1300)\nRPYC_TIMEOUT = 1000 #defines a higher timeout\nc._config[\"sync_request_timeout\"] = RPYC_TIMEOUT # Set timeout to higher level\nif c:\n print(\"RPyC connection established\")\n\nc.execute(execute_bias_load_string)\nc.execute(execute_rate_tracker_load_string)\n\nprint(\"Clearing CAMs...\")\nc.execute(\"CtxDynapse.dynapse.clear_cam(1)\")\nc.execute(\"CtxDynapse.dynapse.clear_cam(3)\")\n\nprint(\"Initializing the main loop\")\nMainLoop = MainRNNLoop.MainRNNLoop(backend = \"rpyc\", c = c)\nc.namespace['neuron_ids'] = MainLoop.RNNController.neuron_ids\nc.namespace['lookup'] = MainLoop.RNNController.rnn_neurons_idx_lookup\nc.namespace['timesteps'] = MainLoop.RNNController.timesteps\nc.execute(\"rt = RateTracker(neuron_ids, lookup, timesteps, debug = True)\")\n\nprint(\"Loading the dataset\")\nMainLoop.prepare_dataset(\"data/\")\n\nprint(\"Loading complete. Starting...\")\n\nMainLoop.run_loop(100)\n# \n \nMainLoop.export_error()\nMainLoop.export_conn_log()"
},
{
"alpha_fraction": 0.5306777954101562,
"alphanum_fraction": 0.5406360626220703,
"avg_line_length": 31.778947830200195,
"blob_id": "32679e19cd0b466d146832d570c0da8a096f0fda",
"content_id": "eeed4b9865fd52bc937ef78719ac56b7f9f6d374",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3113,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 95,
"path": "/ctxctl_rate_tracker.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 19:14:16 2019\n\n@author: dzenn\n\"\"\"\n\n\nimport CtxDynapse\nfrom time import time\n\nclass RateTracker(object):\n def __init__(self, neuron_ids, lookup, timesteps, debug = False):\n \n self.timesteps = timesteps\n self.neuron_ids = neuron_ids\n self.num_neurons = len(neuron_ids)\n self.lookup = lookup\n self.debug = debug\n\n def start_recording_spikes(self):\n \"\"\"\n Initializes the event filter\n \"\"\"\n model = CtxDynapse.model\n self.buf_evt_filter = CtxDynapse.BufferedEventFilter(model, self.neuron_ids)\n evts = self.buf_evt_filter.get_events() # flush the event filter to start a new recording\n self.recording_start_time = time()\n \n def stop_recording_spikes(self):\n \"\"\"\n Stores all recorded events and clears the event filter\n \"\"\"\n self.evts = self.buf_evt_filter.get_events()\n self.recording_stop_time = time()\n self.buf_evt_filter.clear()\n\n \n def process_recorded_evts(self):\n \"\"\"\n Returns firing rates AND input rates based on recorded events and current\n weight matrix\n \"\"\"\n\n lookup = self.lookup\n \n rates = []\n input_rates = []\n \n if self.debug:\n print(\"Counting the spikes...\")\n \n # Preparing arrays and helper variables\n for i in range(self.num_neurons):\n rates.append([])\n input_rates.append([])\n for ts in range(self.timesteps):\n rates[i].append(0)\n input_rates[i].append(0)\n \n if len(self.evts) != 0:\n ref_timestamp = self.evts[0].timestamp\n time_bin_size = int((self.recording_stop_time - self.recording_start_time)*1e+06/self.timesteps)\n \n if self.debug:\n print(\"Binning...\")\n # Placing spikes in bins\n for evt in self.evts:\n n_id = evt.neuron.get_neuron_id() + 256*evt.neuron.get_core_id() + 1024*evt.neuron.get_chip_id()\n idx = lookup[n_id]\n \n time_bin = (evt.timestamp - ref_timestamp)//time_bin_size\n# print(idx, time_bin)\n if time_bin < self.timesteps:\n rates[idx][time_bin] += 1\n \n if self.debug:\n print(\"Normalizing...\")\n # Normalizing spike counts to rates\n for i in range(self.num_neurons):\n for ts in range(self.timesteps):\n rates[i][ts] = rates[i][ts]/(time_bin_size/1e+06)\n \n# # Computing weighted input rate sums\n# for i in range(self.num_neurons):\n# pre_id = self.neuron_ids[i]\n# for post_id in self.post_lookup[pre_id]:\n# for ts in range(self.timesteps):\n# input_rates[self.neuron_ids.index(post_id)][ts] += rates[i][ts]*self.current_weight_matrix[(pre_id, post_id)]\n \n if self.debug:\n print(\"Returning rates...\")\n \n return rates"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7640243768692017,
"avg_line_length": 44.55555725097656,
"blob_id": "811a3f058735adc6fb7c7a3c4551813e9c620abc",
"content_id": "e5d8bf3e52ba98ee993e510bb795e09e6f45eb4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1640,
"license_type": "no_license",
"max_line_length": 347,
"num_lines": 36,
"path": "/README.md",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "# TransferRNN\nWorkgroup Project at CapoCaccia 2019\n\nTransferRNN framework implements an RNN running on Dynapse chip using CortexControl Python API\n\n## Installation\n\n1. Download the nightly build of CortexControl from http://ai-ctx.gitlab.io/ctxctl/primer.html#downloads\n\n2. Clone TransferRNN inside the main CortexControl folder\n```\nclone https://github.com/he-xu/TransferRNN.git\n```\n3. Prepare the `x_projection_train.pkl` and `state_train.pkl` files (*by default the script will look for them in `TranserRNN/data/`*)\n\n## Running\n\nThe framework can run in two modes:\n- ctxctl : inside CortexControl (faster but trickier to setup)\n- rpyc : in a remote python console using RPyC connection (more stable but slower because of data transfering between python threads)\n\n### `ctxctl` mode\n\n1. Start Cortexcontrol: `./cortexcontrol`\n2. In the CortexControl console run `import TransferRNN.run_rnn_in_ctxctl`\n\n**Note:** Most likely, you will get an error while importing numpy. To fix that, you will have to make sure you have `numpy` version `1.15.4` (with `pip install numpy=1.15.4`) in your installation of python and then append the path to `site-packages` of this python installation inside the `TransferRNN/run_rnn_in_ctxctl.py` script on **line 12**.\n\n### `rpyc` mode\n0. Install `RPyC` with `pip install rpyc`\n\n1. Start CortexControl: `./cortexcontrol`\n2. In the CortexControl console run `PyCtxUtils; PyCtxUtils.start_rpyc_server()`\n3. Run `run_rnn_with_rpyc.py` in a python console of your choice (tested in Spyder)\n\n**Note:** Expect the spike counting phase to be slow. Optimization of this phase is the next framework development step.\n"
},
{
"alpha_fraction": 0.5007284879684448,
"alphanum_fraction": 0.5069659352302551,
"avg_line_length": 39.919776916503906,
"blob_id": "dc89483b9045768d5ccf7ac5efb1b4392827e2c0",
"content_id": "d9953ec4575f3fd7d83785ca053cada90801e57b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21964,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 536,
"path": "/DynapseRNN.py",
"repo_name": "he-xu/TransferRNN",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 22 16:09:09 2019\n\n@author: dzenn\n\"\"\"\n\n\nfrom time import clock, sleep, time\nfrom math import exp\n\ntry:\n import CtxDynapse\n import NeuronNeuronConnector\n from CtxDynapse import DynapseCamType as SynapseType\nexcept ModuleNotFoundError:\n import rpyc\n print(\"CtxDynapse module not imported. Expecting to run in RPyC mode\")\n\nimport numpy as np\n\nclass DynapseRNN(object):\n \"\"\"\n A controller class for learning-in-the-loop RNN on the Dynapse chip using\n CortexControl Python API.\n \n Weight update is defined by FORCE algorithm and performed by\n generating a ternary weight matrix after gradient descent with\n stochastic rounding within the limitation of number of inputs used per neuron.\n \n The controller places the RNN on one chip and a proxy input population on another.\n \n The controller can be used both through rpyc or directly in CTXCTL console.\n \n \"\"\"\n \n def __init__(self, num_inputs, timesteps, multiplex_factor = 1, chip_id_proxy = 3, chip_id = 1, backend = \"ctxctl\", c = None, debug = False):\n \n \"\"\"\n Args:\n num_inputs (int) : number of input channels (i.e. spike generators)\n timesteps (int) : length of each input sequence\n multiplex_factor (int, optional) : number of silicon neurons on the chip\n which represent a single logical neuron\n (i.e. multiplies the size of the RNN)\n chip_id_proxy (int, optional) : index of the chip where the proxy input\n population is located (should not be the same\n as the RNN chip)\n chip_id (int, optional) : index of the chip where the RNN is located\n backend (str, optional) : sets whether the controller will use RPyC\n or try to import modules directly in the console\n c (RPyC connection, optional) : RPyC connection object, expected if \"rpyc\" is\n set as backend\n debug (bool, optional) : print additional debug info\n \"\"\"\n \n self.creation_time = time()\n \n if backend == \"ctxctl\":\n self.model = CtxDynapse.model\n self.v_model = CtxDynapse.VirtualModel()\n self.connector = NeuronNeuronConnector.DynapseConnector()\n self.SynapseType = SynapseType\n elif backend == \"rpyc\":\n if c is not None:\n self.c = c\n self.model = c.modules.CtxDynapse.model\n self.v_model = c.modules.CtxDynapse.VirtualModel()\n self.connector = c.modules.NeuronNeuronConnector.DynapseConnector()\n self.SynapseType = c.modules.CtxDynapse.DynapseCamType\n# c.execute(\"from time import time, clock, sleep\")\n# c.execute(\"import CtxDynapse\")\n else:\n raise ValueError(\"Selected backend is 'rpyc' but no connection object is given to DynapseRNN\")\n else:\n raise ValueError(\"Unknown backend identifier '\" + backend + \"'. Use 'ctxctl' or 'rpyc' only.\")\n \n self.backend = backend\n \n self.neurons = self.model.get_shadow_state_neurons()\n self.virtual_neurons = self.v_model.get_neurons()\n self.neuron_ids = []\n self.poisson_spike_gen = self.model.get_fpga_modules()[0]\n \n self.buf_evt_filter = None\n self.evts = []\n self.recording_start_time = None\n self.recording_stop_time = None\n \n self.teleported_functions = [] # list of functions teleported with rpyc\n \n self.num_inputs = num_inputs\n self.timesteps = timesteps\n \n self.pre_lookup = {}\n self.post_lookup = {}\n self.print = debug\n self.last_pre_timestamp = {}\n self.last_post_timestamp = {}\n self.last_timestamp = {}\n self.last_clock_time = {}\n \n \n self.learn = True\n \n self.debug = debug\n\n self.current_weight_matrix = {}\n \n self.conns_created = 0\n \n self.pre_evts = []\n self.post_evts = []\n self.pre_times = []\n self.post_times = []\n self.da_evts = []\n self.pre_diff = []\n self.post_diff = []\n self.conn_log = []\n \n ### GD Learning parameters\n \n self.error = None\n self.learning_rate = 0.1\n self.regularizer = 1\n\n self.multiplex_factor = multiplex_factor\n \n ### Allocating neurons\n if self.debug:\n print(\"Allocating populations\")\n \n self.rnn_neurons = [n for n in self.neurons if n.get_chip_id()==chip_id\n and (n.get_neuron_id() + n.get_core_id()*256) < self.num_inputs * self.multiplex_factor]\n \n self.proxy_neurons = [n for n in self.neurons if n.get_chip_id()==chip_id_proxy\n and (n.get_neuron_id() + n.get_core_id()*256) < self.num_inputs*2] \n \n self.rnn_neurons_idx_lookup = {}\n i = 0\n for neuron in self.rnn_neurons:\n self.rnn_neurons_idx_lookup.update({neuron.get_neuron_id() + 256*neuron.get_core_id() + 1024*neuron.get_chip_id() : i})\n i += 1\n ### Creating spikegen -> proxy_neurons connections\n \n for n in self.proxy_neurons:\n self.connector.add_connection(self.virtual_neurons[(n.get_neuron_id())],\n n,\n self.SynapseType.SLOW_EXC)\n self.connector.add_connection(self.virtual_neurons[(n.get_neuron_id())],\n n,\n self.SynapseType.SLOW_EXC)\n \n if self.debug:\n print(\"Connected spikegen\")\n \n ### Creating proxy_neurons -> rnn_neurons connections\n \n for n in self.rnn_neurons:\n self.neuron_ids.append(n.get_neuron_id() + n.get_core_id()*256 + n.get_chip_id()*1024)\n self.connector.add_connection(self.proxy_neurons[(n.get_neuron_id()) % self.num_inputs],\n n,\n self.SynapseType.SLOW_EXC)\n self.connector.add_connection(self.proxy_neurons[(n.get_neuron_id()) % self.num_inputs],\n n,\n self.SynapseType.SLOW_EXC)\n self.connector.add_connection(self.proxy_neurons[((n.get_neuron_id()) % self.num_inputs) + self.num_inputs],\n n,\n self.SynapseType.FAST_INH)\n self.connector.add_connection(self.proxy_neurons[((n.get_neuron_id()) % self.num_inputs) + self.num_inputs],\n n,\n self.SynapseType.FAST_INH)\n \n self.model.apply_diff_state() \n self.num_neurons = len(self.neuron_ids) \n \n# self.P_prev = self.regularizer*np.eye(self.num_neurons) \n self.poisson_spike_gen.set_chip_id(chip_id_proxy)\n \n if self.debug:\n print(\"Connected proxy population\")\n \n pre_id_list = []\n post_id_list = []\n \n for i in range(self.num_inputs * self.multiplex_factor):\n for j in range(self.num_inputs * self.multiplex_factor):\n pre_id_list.append(self.neuron_ids[i])\n post_id_list.append(self.neuron_ids[j])\n \n \n ## Prepare connectivity matrix\n for pre_id, post_id in zip(pre_id_list, post_id_list):\n add_to_dict(self.post_lookup, pre_id, post_id)\n add_to_dict(self.pre_lookup, post_id, pre_id)\n self.last_pre_timestamp[(pre_id, post_id)] = 0\n self.last_post_timestamp[(pre_id, post_id)] = 0\n self.last_timestamp[(pre_id, post_id)] = 0\n self.last_clock_time[(pre_id, post_id)] = 0\n\n self.current_weight_matrix[(pre_id, post_id)] = 0\n \n# self.init_weights()\n self.w_ternary = np.zeros([self.num_neurons, self.num_neurons])\n self.apply_new_matrix(self.w_ternary)\n \n# if self.backend == 'rpyc':\n# self.teleported_functions.append(rpyc.utils.classic.teleport_function(c, self.start_recording_spikes))\n# self.teleported_functions.append(rpyc.utils.classic.teleport_function(c, self.stop_recording_spikes))\n# self.teleported_functions.append(rpyc.utils.classic.teleport_function(c, self.process_recorded_evts))\n# self.start_recording_spikes = lambda: self.teleported_functions[0](self)\n# self.stop_recording_spikes = lambda: self.teleported_functions[1](self)\n# self.process_recorded_evts = lambda: self.teleported_functions[2](self)\n \n\n \n if self.debug:\n print(\"RNN Init Complete\")\n \n def init_weights(self, cam_num=60):\n \"\"\"\n Random ternary weight initialization\n \"\"\"\n w_ternary = np.zeros([self.num_neurons, self.num_neurons])\n w_ternary[:,:cam_num//2]=1\n w_ternary[:,cam_num//2:cam_num]=-1\n w_rand = np.random.rand(self.num_neurons, self.num_neurons)\n w_order = np.argsort(w_rand, axis=0)\n self.w_ternary = w_ternary.T[w_order, np.arange(w_order.shape[1])].T\n \n \n def start_recording_spikes(self):\n \"\"\"\n Initializes the event filter\n \"\"\"\n if self.backend == 'rpyc':\n self.c.execute('rt.start_recording_spikes()')\n else: \n model = CtxDynapse.model\n self.buf_evt_filter = CtxDynapse.BufferedEventFilter(model, self.neuron_ids)\n evts = self.buf_evt_filter.get_events() # flush the event filter to start a new recording\n self.recording_start_time = time()\n \n def stop_recording_spikes(self):\n \"\"\"\n Stores all recorded events and clears the event filter\n \"\"\"\n if self.backend == 'rpyc':\n self.c.execute('rt.stop_recording_spikes()')\n else:\n self.evts = self.buf_evt_filter.get_events()\n self.recording_stop_time = time()\n self.buf_evt_filter.clear()\n\n \n def process_recorded_evts(self):\n \"\"\"\n Returns firing rates AND input rates based on recorded events and current\n weight matrix\n \"\"\"\n if self.backend == 'rpyc':\n self.c.execute('rates = rt.process_recorded_evts()')\n return self.c.namespace['rates']\n else:\n \n lookup = self.rnn_neurons_idx_lookup\n evts = self.evts\n rates = []\n input_rates = []\n \n if self.debug:\n print(\"Counting the spikes...\")\n \n # Preparing arrays and helper variables\n for i in range(self.num_neurons):\n rates.append([])\n input_rates.append([])\n for ts in range(self.timesteps):\n rates[i].append(0)\n input_rates[i].append(0)\n \n if len(evts) != 0:\n ref_timestamp = evts[0].timestamp\n time_bin_size = int((self.recording_stop_time - self.recording_start_time)*1e+06/self.timesteps)\n \n if self.debug:\n print(\"Binning...\")\n # Placing spikes in bins\n for evt in evts:\n n_id = evt.neuron.get_neuron_id() + 256*evt.neuron.get_core_id() + 1024*evt.neuron.get_chip_id()\n idx = lookup[n_id]\n \n time_bin = (evt.timestamp - ref_timestamp)//time_bin_size\n # print(idx, time_bin)\n if time_bin < self.timesteps:\n rates[idx][time_bin] += 1\n \n if self.debug:\n print(\"Normalizing...\")\n # Normalizing spike counts to rates\n for i in range(self.num_neurons):\n for ts in range(self.timesteps):\n rates[i][ts] = rates[i][ts]/(time_bin_size/1e+06)\n \n # # Computing weighted input rate sums\n # for i in range(self.num_neurons):\n # pre_id = self.neuron_ids[i]\n # for post_id in self.post_lookup[pre_id]:\n # for ts in range(self.timesteps):\n # input_rates[self.neuron_ids.index(post_id)][ts] += rates[i][ts]*self.current_weight_matrix[(pre_id, post_id)]\n \n if self.debug:\n print(\"Returning rates...\")\n \n return rates #, input_rates\n \n \n def apply_new_matrix(self, w_ternary, print_w = False):\n \"\"\"\n Applies the new weight matrix to the chip\n \"\"\"\n \n if self.debug:\n print(\"Applying connectivity changes...\")\n \n num_conns_removed = 0\n num_conns_created = 0\n \n \n for i in range(len(self.pre_lookup)):\n for j in range(len(self.post_lookup)):\n pre_id = self.neuron_ids[i]\n post_id = self.neuron_ids[j]\n current_w = self.current_weight_matrix[(pre_id, post_id)]\n delta_w = w_ternary[j][i] - current_w\n \n# if self.debug:\n# print(w_ternary[j][i], current_w)\n# print(\"Delta: \", int(abs(delta_w)))\n \n for conn_idx in range(int(abs(delta_w))):\n if print_w:\n print(\"removal phase\")\n print(delta_w, current_w, w_ternary[j][i], i, j)\n \n if delta_w > 0:\n if current_w < 0:\n self.connector.remove_connection(self.neurons[pre_id], self.neurons[post_id])\n num_conns_removed += 1\n self.current_weight_matrix[(pre_id, post_id)] += 1\n \n elif delta_w < 0:\n if current_w > 0:\n self.connector.remove_connection(self.neurons[pre_id], self.neurons[post_id])\n num_conns_removed += 1\n self.current_weight_matrix[(pre_id, post_id)] -= 1\n \n current_w = self.current_weight_matrix[(pre_id, post_id)]\n \n\n \n for i in range(len(self.pre_lookup)):\n for j in range(len(self.post_lookup)):\n pre_id = self.neuron_ids[i]\n post_id = self.neuron_ids[j]\n current_w = self.current_weight_matrix[(pre_id, post_id)]\n delta_w = w_ternary[j][i] - current_w\n \n# if self.debug:\n# print(w_ternary[j][i], current_w)\n# print(\"Delta: \", int(abs(delta_w)))\n \n for conn_idx in range(int(abs(delta_w))):\n if print_w:\n print(\"addition phase\")\n print(delta_w, current_w, w_ternary[j][i], i, j)\n \n if delta_w > 0:\n if current_w >= 0:\n self.connector.add_connection(self.neurons[pre_id], self.neurons[post_id], self.SynapseType.FAST_EXC)\n num_conns_created += 1\n self.current_weight_matrix[(pre_id, post_id)] += 1\n \n elif delta_w < 0:\n if current_w <= 0:\n self.connector.add_connection(self.neurons[pre_id], self.neurons[post_id], self.SynapseType.FAST_INH)\n num_conns_created += 1\n self.current_weight_matrix[(pre_id, post_id)] -= 1\n \n current_w = self.current_weight_matrix[(pre_id, post_id)]\n \n \n \n self.model.apply_diff_state()\n \n if self.debug:\n print(\"Done.\")\n print(\"Neuron 0 matrix sum\", np.abs(w_ternary[0, :]).sum())\n print(\"%d conns removed, %d conns created\" % (num_conns_removed, num_conns_created))\n \n self.conn_log.append([num_conns_removed, num_conns_created])\n \n \n \n def present_stimulus(self, stim_array, timestep_length):\n \"\"\"\n Presents the array of rates to the virtual neuron population\n \"\"\"\n \n if self.debug:\n print(\"Presenting the digit...\")\n \n self.poisson_spike_gen.start()\n \n for ts in range(self.timesteps): \n for i in range(self.num_inputs):\n rate = stim_array[ts, i]*100\n if rate >= 0:\n self.poisson_spike_gen.write_poisson_rate_hz(i, rate)\n else:\n self.poisson_spike_gen.write_poisson_rate_hz(i + self.num_inputs, abs(rate))\n \n sleep(timestep_length)\n \n for i in range(self.num_inputs):\n self.poisson_spike_gen.write_poisson_rate_hz(i, 0)\n \n self.poisson_spike_gen.stop()\n \n if self.debug:\n print(\"Done.\")\n \n def ternarize(self, w_new, cam_num):\n \"\"\"\n \n \"\"\"\n w_order = np.argsort(np.abs(w_new.T), axis=0)\n w_sorted = w_new.T[w_order, np.arange(w_order.shape[1])]\n w_sorted[:-cam_num, :]=0\n w_order_order = np.argsort(w_order, axis=0)\n w_undone = w_sorted[w_order_order, np.arange(w_order_order.shape[1])].T\n w_undone[w_undone>0] = 1\n w_undone[w_undone<0] = -1\n return w_undone\n \n \n def stochastic_round(self, w_ternary, d_w, cam_num):\n \"\"\"\n Stochastically rounds the ternary connectivity matrix \n \"\"\"\n w_new = w_ternary - d_w\n w_uniform = np.random.uniform(size=d_w.shape)\n d_w_rounded = ((w_uniform < np.abs(d_w))*np.sign(d_w)).astype(np.int)\n w_new_rounded = w_ternary - d_w_rounded\n w_new_rounded[w_new_rounded>1] = 1\n w_new_rounded[w_new_rounded<-1] = -1\n \n w_order = np.argsort(np.abs(w_new.T), axis=0)\n w_new_rounded_sorted = w_new_rounded.T[w_order, np.arange(w_order.shape[1])]\n num_neuron = w_order.shape[1]\n for idx_post in range(num_neuron):\n cam_used = 0\n for idx_pre in range(num_neuron):\n w_ij = w_new_rounded_sorted[-idx_pre, idx_post]\n if np.abs(w_ij) > 0.1:\n cam_used += 1\n if cam_used >= cam_num:\n w_new_rounded_sorted[:-idx_pre, idx_post] = 0\n break\n w_order_order = np.argsort(w_order, axis=0)\n w_undone = w_new_rounded_sorted[w_order_order, np.arange(w_order_order.shape[1])].T\n return w_undone\n\n def update_weight(self, rate_psc, rate_teacher, cam_num=60, learning_rate=0.1):\n \"\"\"\n Generates the new ternary connectivity matrix based on measured on-chip rates and teacher signal\n \n Args:\n rate_psc (numpy array) : array of rates of shape (num_neurons, timesteps)\n rate_teacher (numpy array) : array of teacher rates of shape (num_inputs, timesteps)\n cam_num (int, optional) : maximum number of CAMs used by each neurons\n learning_rate (float, optional) : scaler of the weight change gradients\n \n Returns:\n w_ternary : new on-chip connectivity matrix\n c_grad : increase\\decrease global activity level\n \"\"\"\n rate_recurrent = self.w_ternary.dot(rate_psc)\n rate_teacher_tile = np.tile(rate_teacher.T, (self.multiplex_factor,1))\n self.error = rate_recurrent - rate_teacher_tile\n d_w = 0\n for t in range(self.timesteps):\n r_t = rate_psc[:, t][:,np.newaxis]\n# P_up = self.P_prev.dot(r_t.dot(r_t.T.dot(self.P_prev)))\n# P_down = 1 + r_t.T.dot(self.P_prev.dot(r_t))\n# self.P_prev = self.P_prev - P_up / P_down\n e_t = self.error[:, t][:,np.newaxis]\n# d_w += e_t.dot(r_t.T.dot(self.P_prev))\n d_w += e_t.dot(r_t.T)\n d_w = d_w / self.timesteps\n w_new = self.w_ternary - learning_rate*d_w\n norm_ratio = np.linalg.norm(w_new, 'fro')/np.linalg.norm(self.w_ternary, 'fro')\n self.w_ternary = self.stochastic_round(self.w_ternary, learning_rate*d_w, cam_num)\n \n #self.w_ternary = self.ternarize(w_new, cam_num)\n \n print(d_w.mean(), d_w.max(), d_w.min())\n print(rate_recurrent.mean(), rate_teacher.mean())\n\n if norm_ratio > 1:\n c_grad = 1\n else:\n c_grad = -1\n return c_grad, np.abs(self.error).mean() \n \n \n \n\ndef relaxate(A, tau, delta_t):\n \"\"\"\n Computes the exponential\n \"\"\"\n return A*exp(-delta_t/tau)\n# print(len(evts))\n \ndef add_to_dict(dct, key, value):\n \"\"\"\n A tool to add elements to dictionaries of lists.\n \n Appends a value to the list dct[key], otherwise creates it.\n \"\"\"\n if key in dct:\n dct[key].append(value)\n else:\n dct[key] = [value]\n \n \n \n "
}
] | 9 |
FlameDavid92/Python-ProgettazioneDiAlgoritmi | https://github.com/FlameDavid92/Python-ProgettazioneDiAlgoritmi | 451876fb1b6d7ed6c4696666befc9baaa2ee6208 | d56be11679f5bc89aa5ebe587ec51959909a25be | 9159e7d523cf162de772e1605806bc202929821f | refs/heads/main | 2023-01-09T02:59:44.834332 | 2020-11-14T21:17:44 | 2020-11-14T21:17:44 | 312,903,506 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6485294103622437,
"alphanum_fraction": 0.6644607782363892,
"avg_line_length": 41.94736862182617,
"blob_id": "385a9930aac8de91b915b0193e0261bf37ab709f",
"content_id": "1a453baf576b7d287636311c632fec5b6e175f63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4099,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 95,
"path": "/puntiDiArticolazione.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "'''\nC'è una certa parentela tra i punti di articolazione e i ponti. Se {u, v} è un ponte tale che u ha grado almeno 2, allora\nu è un punto di articolazione. Però se u è un punto di articolazione, non è detto che qualche arco incidente in u sia\nun ponte.\n\nPunti di articolazione\nVediamo allora come trovare i punti di articolazione. Facciamo una DFS di un grafo non diretto e connesso G,\npartendo da un nodo u. Come possiamo riconoscere se u è un punto di articolazione? Chiaramente, una\ncondizione sufficiente affinché non lo sia è che la rimozione di u non sconnetta l'albero di visita (se la rimozione non\nsconnette l'albero di visita a maggior ragione non sconnette G). È anche una condizione necessaria perché u non\nsia un punto di articolazione? In altri termini, se la rimozione di u sconnette l'albero di visita, sconnette anche il\ngrafo?\nIniziamo con la radice u dell'albero della DFS. Se la rimozione di u sconnette l'albero, allora u ha almeno due\nsottoalberi figli. Se eliminiamo u i sottografi relativi a questi sottoalberi saranno connessi solo se ci sono archi tra di\nessi. Ma non ci possono essere tali archi perchè non sarebbero archi all'indietro. Quindi, se la rimozione di u\nsconnette l'albero di visita, sconnette anche il grafo. Ne segue che la radice della DFS è un punto di articolazione\nse e solo se ha almeno due figli. Vediamo ora gli altri nodi. Se un nodo v è un punto di articolazione, la sua\nrimozione necessariamente sconnette almeno un sottoalbero S della DFS da v. Nel senso che i nodi di S non sono\npiù raggiungibili da u, nel grafo senza v. Questo accade se e solo se non ci sono archi all'indietro da nodi di S a\nantenati di v. Quindi, un nodo v, diverso dalla radice della DFS, è un punto di articolazione se e solo se esiste un\nsottoalbero della DFS da v che non ha archi all'indietro verso antenati di v.\nPossiamo incorporare queste osservazioni in un algoritmo per trovare i punti di articolazione. Modifichiamo la DFS\nper mantenere i tempi di inizio visita dei nodi in un array tt . Inoltre, per determinare le condizioni circa gli archi\nall'indietro dei sottoalberi, facciamo sì che la procedura modificata di visita DFS da v ritorni il minimo tempo di inizio\nvisita tra quelli di tutti i nodi toccati durante la DFS da v. Così, un nodo v è un punto di articolazione se e solo se\nesiste un figlio w di v per cui il valore b ritornato dalla visita modificata da w soddisfa b >= tt[v] .\n'''\n\n\ndef dfsPA(G, u, tt, C, A):\n C[0] += 1\n tt[u] = C[0]\n back = C[0]\n children = 0\n for adjacent in G[u]:\n if tt[adjacent] == 0:\n children += 1\n b = dfsPA(G, adjacent, tt, C, A)\n if tt[u] > 1 and b >= tt[u]:\n A.add(u)\n back = min(back, b)\n else:\n back = min(back, tt[adjacent])\n if tt[u] == 1 and children >= 2:\n A.add(u)\n return back\n\n\ndef trovaPuntiArticolazione(G):\n tt = [0 for _ in G] # array dei tempi di inizio visita\n C = [0] # contatore dei nodi visitati\n A = set() # insieme dei punti di articolazione\n dfsPA(G, 0, tt, C, A)\n return A\n\n\ndef dfsPAAtoB(G, a, b, tt, C, A):\n C[0] += 1\n tt[a][0] = C[0]\n back = C[0]\n children = 0\n for adjacent in G[a]:\n if tt[adjacent][0] == 0:\n children += 1\n bc = dfsPAAtoB(G, adjacent, b, tt, C, A)\n if tt[a][0] > 1 and bc >= tt[a][0] and tt[b][0] > tt[a][0] and tt[b][1] <= tt[a][1]:\n A.add(a)\n back = min(back, bc)\n else:\n back = min(back, tt[adjacent][0])\n return back\n\n\ndef trovaPuntiCriticiAtoB(G, a, b):\n tt = [[0, 0] for _ in G] # array dei tempi di inizio visita\n C = [0] # contatore dei nodi visitati\n A = set() # insieme dei punti di articolazione\n dfsPAAtoB(G, a, b, tt, C, A)\n return A\n\n\ngrfNonDirCicl = {\n 0: [1, 7],\n 1: [0, 2],\n 2: [1, 3],\n 3: [2, 4, 7],\n 4: [3, 5, 6],\n 5: [4, 6],\n 6: [4, 5],\n 7: [0, 3, 8],\n 8: [7],\n}\n\nprint(trovaPuntiArticolazione(grfNonDirCicl))\nprint(trovaPuntiCriticiAtoB(grfNonDirCicl, 0, 4))\n"
},
{
"alpha_fraction": 0.6120218634605408,
"alphanum_fraction": 0.627364456653595,
"avg_line_length": 32.04861068725586,
"blob_id": "7b58b31c9aac18e0be1e86bda83ed266108d54f5",
"content_id": "22697208b78144e83069d330a37909abdd8d2e17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4786,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 144,
"path": "/forteConnessione.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import simpleStackQueue\n\n'''\nDiscussione dell'esercizio [sensi unici]\nLa rete viaria della cittadina può essere rappresentata facilmente tramite un grafo diretto in cui i nodi sono gli\nincroci di due o più strade e ogni arco corrisponde a una strada (tra due incroci). Allora la proprietà che vorrebbe il\nsindaco equivale alla forte connessione del grafo. Si osservi che non fa differenza se due punti A e B che si\nvogliono connettere sono sugli incroci o sulle strade.\nCome possiamo verificare se un grafo è fortemente connesso? Un modo semplice consiste nel fare una visita a\npartire da ogni nodo del grafo. Se tutte le visite raggiungono tutti i nodi del grafo, allora il grafo è fortemente\nconnesso. Altrimenti non lo è. Ma questo algoritmo è piuttosto inefficiente, infatti richiede n visite e quindi tempo\nO(n(n + m)).\nMa non è necessario effettuare così tante visite, cioè una per ogni nodo del grafo. Se un grafo G è fortemente\nconnesso, fissando un nodo u, sappiamo che tutti i nodi di G sono raggiungibili da u e che da ogni nodo si può\nraggiungere u. Inoltre la validità di questa proprietà per uno specifico nodo, implica che il grafo è fortemente\nconnesso. Infatti, dati due qualsiasi nodi v e w, si può ottenere un cammino che va da v a w concatenando un\ncammino da v a u con uno da u a w e tali cammini esistono in virtù della proprietà suddetta; in modo simmetrico si\nottiene un cammino da w a v. Riassumendo abbiamo un algoritmo per verificare se un grafo G è fortemente\nconnesso: facciamo una visita da un nodo fissato u per verificare che tutti i nodi sono raggiungibili da u e poi, per\nverificare che da ogni nodo u è raggiungibile, basta fare una visita da u del grafo trasposto.\n'''\n\n\ndef dfs(G, u, VIS):\n VIS[u] = 0\n for w in G[u]:\n if VIS[w] == -1:\n dfs(G, w, VIS)\n\n\ndef checkForteConnessione(G):\n VIS = [-1 for _ in G]\n dfs(G, 0, VIS) # scegliamo 0 come nodo di partenza ma va bene un qualunque nodo.\n for x in VIS:\n if x == -1:\n print(\"Non è possibile raggiungere tutti i nodi da 0\")\n return -1 # Non è possibile raggiungere tutti i nodi da 0\n Gt = {x: [] for x in G}\n for node in G:\n for adjacent in G[node]:\n Gt[adjacent].append(node)\n for node in G:\n VIS[node] = -1\n dfs(Gt, 0, VIS)\n for x in VIS:\n if x == -1:\n print(\"Non è possibile raggiungere 0 da tutti i nodi\")\n return -1 # Non è possibile raggiungere 0 da tutti i nodi\n print(\"Il grafo è fortemente connesso\")\n return 0\n\n\n'''Algoritmo di Tarjan'''\n\ndef DFS_SCCTarj(G, node, CC, S, c, nc):\n c[0] += 1\n CC[node] = -c[0] # il tempo di inizio visita in negativo per distinguerlo dall'indice di una componente\n S.push(node)\n back = c[0]\n for adjacent in G[node]:\n if CC[adjacent] == 0:\n b = DFS_SCCTarj(G, adjacent, CC, S, c, nc)\n back = min(back, b)\n elif CC[adjacent] < 0: # la componente di adjacent non è ancora stata determinata\n back = min(back, -CC[adjacent])\n if back == -CC[node]: # node è una c-radice\n nc[0] += 1\n w = S.pop()\n CC[w] = nc[0]\n while w != node:\n w = S.pop()\n CC[w] = nc[0]\n return back\n\ndef SCCTarj(G):\n CC = [0 for _ in G] # array che darà l'indice della componente di ogni nodo, inizializzato a 0\n nc = [0] # contatore componenti\n c = [0] # contatore nodi visitati\n S = simpleStackQueue.Stack()\n for node in G:\n if CC[node] == 0:\n DFS_SCCTarj(G, node, CC, S, c, nc)\n return CC\n\ndef DFSNumVis(G, u):\n def DFSVIS(Gg, uu, VVIS):\n VVIS[uu] = 0\n for adjacent in Gg[uu]:\n if VVIS[adjacent] == -1:\n DFSVIS(Gg, adjacent, VVIS)\n VIS = [-1 for _ in G]\n DFSVIS(G, u, VIS)\n count = 0\n for n in VIS:\n if n == 0:\n count += 1\n return count\n\n# esercizio nodi broadcast\ndef DFS_BroadcastNodes(G):\n CC = SCCTarj(G)\n nc = 0\n u = 0\n for node in G:\n if CC[node] > nc:\n nc = CC[node]\n u = node\n vis = DFSNumVis(G, u)\n print(vis)\n B = []\n if vis == len(G):\n for node in G:\n if CC[node] == nc:\n B.append(node)\n return B\n\ngrafoFortConn = {\n 0: [1],\n 1: [2],\n 2: [0, 3],\n 3: [2]\n}\ngrafoNonFortConn1 = {\n 0: [1, 2, 3],\n 1: [],\n 2: [1],\n 3: [2]\n}\n\ngrafoNonFortConn2 = {\n 0: [1],\n 1: [0],\n 2: [0],\n 3: [0, 2]\n}\n\n#checkForteConnessione(grafoFortConn)\n#checkForteConnessione(grafoNonFortConn1)\n#checkForteConnessione(grafoNonFortConn2)\nprint(SCCTarj(grafoFortConn))\nprint(SCCTarj(grafoNonFortConn1))\nprint(SCCTarj(grafoNonFortConn2))\n\nprint(DFS_BroadcastNodes(grafoNonFortConn2))"
},
{
"alpha_fraction": 0.42408376932144165,
"alphanum_fraction": 0.49040138721466064,
"avg_line_length": 26.238094329833984,
"blob_id": "653a31f3f9a25053a88f4e7318ea0d2f70e7b688",
"content_id": "d1917661cea196e88b2805897d6baaa27c63add0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 573,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 21,
"path": "/eseBacktracking.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "def printSottSeqCresc(n, X, sol, u, z, check, k):\n if k == n:\n print(sol)\n return\n sol[k] = -1\n printSottSeqCresc(n, X, sol, u, z, check, k+1)\n if X[k] == 0 and check == 0:\n sol[k] = 0\n printSottSeqCresc(n, X, sol, u, z+1, 0, k+1)\n elif X[k] == 1 and z >= (u+1):\n sol[k] = 1\n printSottSeqCresc(n, X, sol, u+1, z, 1, k + 1)\n\n\nX = [1, 1, 0, 0]\nX2 = [0, 1, 0, 1]\nsol = [-1 for _ in X]\nsol2 = [-1 for _ in X2]\nprintSottSeqCresc(len(X), X, sol, 0, 0, 0, 0)\nprint(\"\\n\\n\")\nprintSottSeqCresc(len(X2), X2, sol2, 0, 0, 0, 0)\n\n"
},
{
"alpha_fraction": 0.47246021032333374,
"alphanum_fraction": 0.5036719441413879,
"avg_line_length": 22.854015350341797,
"blob_id": "67f1fd2adaf791c41ad3f3e0781eef3ddedb30ed",
"content_id": "ed2b274fa8b075247f66a07421a33e8e01befe0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3270,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 137,
"path": "/BFS.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import collections\nimport math\n\n\ndef BFS(G, u):\n P = [-1 for _ in G] # array dei padri\n DIST = [-1 for _ in G] # array delle distanze\n P[u] = u # radice dell'albero BFS\n DIST[u] = 0\n Q = collections.deque()\n Q.append(u) # accoda u alla coda\n while len(Q) != 0:\n v = Q.popleft() # preleva il primo nodo della coda\n for adjacent in G[v]:\n if P[adjacent] == -1:\n P[adjacent] = v\n DIST[adjacent] = DIST[v] + 1\n Q.append(adjacent)\n return P, DIST\n\n\n'''\nIl vettore dei padri rappresenta l'albero di visita della BFS e quindi per ogni nodo v contiene un cammino di lunghezza\nminima da u a v. Per ricostruire un tale cammino basta partire da v e percorrerlo di padre in padre tramite P fino alla\nradice u.\n'''\n\n\ndef pathMinDist(G, u, v):\n P = BFS(G, u)[0]\n if P[v] != -1:\n L = collections.deque()\n L.appendleft(v)\n while v != u:\n v = P[v]\n L.appendleft(v)\n return L\n else:\n print(\"v non è raggiungibile a partire da u, la loro distanza è inf\")\n return math.inf\n\n\ndef calcolaNCamminiMinimi(G, u):\n DIST = [-1 for _ in G]\n M = [0 for _ in G]\n DIST[u] = 0\n M[u] = 1\n Q = collections.deque()\n Q.append(u)\n while len(Q) != 0:\n v = Q.popleft()\n for adjacent in G[v]:\n if DIST[adjacent] == -1:\n DIST[adjacent] = DIST[v]+1\n M[adjacent] = M[v]\n Q.append(adjacent)\n elif DIST[adjacent] == DIST[v]+1:\n M[adjacent] = M[adjacent]+M[v]\n return M\n\ngrafetto = {\n 0: [1, 3, 5],\n 1: [2],\n 2: [4],\n 3: [5],\n 4: [0],\n 5: [4]\n}\n\ngraf = {\n 0: [2, 3, 4],\n 1: [4],\n 2: [5],\n 3: [5],\n 4: [5],\n 5: [6, 7],\n 6: [1],\n 7: [1]\n}\n\n\ngraf2 = {\n 0: [3, 6, 7],\n 1: [5],\n 2: [4],\n 3: [],\n 4: [0, 3],\n 5: [0, 2, 7],\n 6: [],\n 7: [2]\n}\n\n#C = BFS(grafetto, 0)\n#print(\"PADRI: \"+str(C[0])+\"\\nDISTANZE: \"+str(C[1]))\n\n#print(pathMinDist(grafetto, 0, 4))\n\n#print(calcolaNCamminiMinimi(grafetto, 0))\n#print(calcolaNCamminiMinimi(graf, 0))\n\nC = BFS(graf2, 0)\nprint(\"PADRI: \"+str(C[0])+\"\\nDISTANZE: \"+str(C[1]))\n\n\ndef bfsRecArchi(G, u, contatori, c):\n VIS = [-1 for _ in G]\n c += 1\n VIS[u] = c\n P = [-1 for _ in G] # array dei padri\n DIST = [-1 for _ in G] # array delle distanze\n P[u] = u # radice dell'albero BFS\n DIST[u] = 0\n Q = collections.deque()\n Q.append(u) # accoda u alla coda\n while len(Q) != 0:\n v = Q.popleft() # preleva il primo nodo della coda\n for adjacent in G[v]:\n if VIS[adjacent] == -1:\n contatori[0] += 1 # n. archi albero\n c += 1\n VIS[adjacent] = c\n P[adjacent] = v\n DIST[adjacent] = DIST[v] + 1\n Q.append(adjacent)\n elif VIS[adjacent] > 0:\n contatori[1] += 1 # n. archi all'indietro o di attraversamento\n return P, DIST\n\n\ndef calcolaArchiBFS(G, u):\n contatori = [0, 0]\n bfsRecArchi(G, 0, contatori, 0)\n print(\"n.archi albero: \"+str(contatori[0]))\n print(\"n. archi all'indietro o di attraversamento: \" + str(contatori[1]))\n\n\ncalcolaArchiBFS(graf2, 0)\n"
},
{
"alpha_fraction": 0.6677560210227966,
"alphanum_fraction": 0.6840958595275879,
"avg_line_length": 42.71428680419922,
"blob_id": "adf9cddaaac053a39517d59336b60146e6a30922",
"content_id": "b929d1380636cc4b6fa36da61fb15f72d8700606",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3696,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 84,
"path": "/esercizioDfsTrovaPonti2.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "'''\nDiscussione dell'esercizio [strade critiche]\nPossiamo rappresentare la rete viaria con un grafo G non diretto in cui i nodi sono gli incroci e due nodi sono\ncollegati da un arco se c'è una strada che collega i corrispondenti incroci. Per ipotesi G è un grafo connesso. Una\nstrada critica corrisponde a un ponte del grafo G. Per trovare tutti i ponti, un algoritmo molto semplice consiste\nnell'esaminare ogni arco {u, v} considerando il grafo G' che si ottiene rimuovendo l'arco da G e controllare se G' è\nconnesso (se non lo è, l'arco è un ponte, altrimenti non è un ponte). Ma l'algoritmo è molto inefficiente, infatti\nrichiede m visite di grafi che sono quasi uguali a G e quindi ha complessità O(m(n + m)).\n\nPossiamo fare di meglio?\nPossiamo tentare di usare una DFS opportunamente modificata per trovare i ponti.\nSupponiamo di fare una DFS, a partire da un nodo qualsiasi, del nostro grafo connesso G.\nSappiamo che tutti gli archi saranno classificati o come archi dell'albero della DFS o come archi all'indietro.\nUn arco all'indietro può essere un ponte? No, perché sappiamo che ogni arco all'indietro appartiene\nad almeno un ciclo e un ponte non può far parte di cicli.\nQuindi rimangono solamente gli archi dell'albero. Sia {u, v} un arco dell'albero e supponiamo,\nsenza perdita di generalità, che u sia il padre di v.\nSia Tree(v) l'insieme dei nodi del sottoalbero della DFS da v. Se c'è un arco all'indietro da un nodo di\nTree(v) verso u o un antenato di u, allora l'arco {u, v} non è un ponte (perchè c'è un ciclo che contiene l'arco).\nViceversa, se non c'è un arco all'indietro da Tree(v) a u o un antenato di u? Supponiamo per assurdo che esista un\ncammino che collega u e v e che non contiene l'arco {u, v}. Allora sia z il primo nodo del cammino (percorrendolo\nda v verso u) che non è in Tree(v). E sia w il predecessore di z, sempre nel cammino, quindi w è in Tree(v). Ne segue\nche {w, z} è un arco da Tree(v) a un nodo fuori di Tree(v) per cui non può essere un arco dell'albero e deve\nnecessariamente essere un arco all'indietro, in contraddizione con l'ipotesi che tali archi non ci sono.\n\nQuindi per determinare se un arco è un ponte basterà controllare che sia un arco dell'albero della DFS e che non ci\nsiano archi all'indietro dal sottoalbero di un estremo dell'arco all'altro estremo o un suo antenato. Per fare questo\ncontrollo facciamo in modo che la funzione che esegue la DFS da u ritorni il minimo tra il tempo d'inizio visita di u e\nil tempo di inizio visita dei nodi antenati di u relativi agli archi all'indietro da Tree(u). Inoltre dobbiamo passargli\nanche il padre di u per evitare che scambi l'arco tra u e il padre di u per un arco all'indietro.\n\n'''\n\n\ndef dfsPonti(G, u, z, tt, C, Pt):\n C[0] += 1\n tt[u] = C[0]\n back = C[0]\n for adjacent in G[u]:\n if tt[adjacent] == 0:\n b = dfsPonti(G, adjacent, u, tt, C, Pt)\n if b > tt[u]: # è un arco ponte\n Pt.append({u, adjacent})\n back = min(back, b)\n elif adjacent != z:\n back = min(back, tt[adjacent])\n return back\n\n\ndef trovaPonti(G):\n tt = [0 for _ in G] #array dei tempi di inizio visita inizializzato a 0\n C = [0] #contatore dei nodi visitati\n Pt = [] #lista dei ponti\n dfsPonti(G, 0, 0, tt, C, Pt)\n return Pt\n\ngrfNonDirCicl = {\n 0: [1, 7],\n 1: [0, 2],\n 2: [1, 3],\n 3: [2, 4, 7],\n 4: [3, 5, 6],\n 5: [4, 6],\n 6: [4, 5],\n 7: [0, 3, 8],\n 8: [7],\n}\n\ngrfDirCicl = {\n 0: [1],\n 1: [2],\n 2: [3, 9],\n 3: [4, 5],\n 4: [6],\n 5: [6],\n 6: [7],\n 7: [8],\n 8: [6],\n 9: []\n}\n\nprint(trovaPonti(grfNonDirCicl))\n\nprint(trovaPonti(grfDirCicl))\n"
},
{
"alpha_fraction": 0.5839415788650513,
"alphanum_fraction": 0.604141891002655,
"avg_line_length": 29.36598014831543,
"blob_id": "8db8d426510f103748dae1690add1235f1fe035b",
"content_id": "110836679c1dc3f4e58536c4f7e1f2812168706b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5927,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 194,
"path": "/DFS.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import simpleStackQueue\n\n\ndef dfs(G, u):\n VIS = []\n S = simpleStackQueue.Stack()\n S.push(u)\n VIS.append(u)\n while S.size() >= 1:\n v = S.top()\n check = 0\n for w in G[v]:\n if w not in VIS:\n VIS.append(w)\n S.push(w)\n check = 1\n break\n if check == 0:\n S.pop()\n return VIS\n\n\ndef dfsRecursive(Gr, u):\n def dfsToRec(node):\n VIS.append(node)\n for w in Gr[node]:\n if w not in VIS:\n dfsToRec(w)\n return VIS\n VIS = []\n return dfsToRec(u)\n\n\ndef dfsBool(G, u):\n VIS = [-1 for _ in G]\n S = simpleStackQueue.Stack()\n S.push(u)\n VIS[u] = 0\n while S.size() >= 1:\n v = S.top()\n check = 0\n for w in G[v]:\n if VIS[w] == -1:\n VIS[w] = 0\n S.push(w)\n check = 1\n break\n if check == 0:\n S.pop()\n return VIS\n\n\ndef dfsRecursiveBool(Gr, u):\n def dfsToRec(node):\n VIS[node] = 0\n for w in Gr[node]:\n if VIS[w] == -1:\n dfsToRec(w)\n return VIS\n\n VIS = [-1 for _ in Gr]\n return dfsToRec(u)\n\n\ndef dfsOpt(G, u):\n VIS = [-1 for _ in G]\n S = simpleStackQueue.Stack()\n S.push((u, 0))\n VIS[u] = 0\n while S.size() >= 1:\n v = S.top()\n check = 0\n for index in range( v[1], len(G[v[0]]) ):\n if VIS[ G[v[0]][index] ] == -1:\n S.pop()\n S.push((v[0], index))\n VIS[G[v[0]][index]] = 0\n S.push((G[v[0]][index], 0))\n check = 1\n break\n if check == 0:\n S.pop()\n return VIS\n\n\ndef dfsTempInFin(Gr, u):\n def dfsToRec(node, C):\n C[0] += 1\n TT[node][0] = C[0]\n for w in Gr[node]:\n if TT[w][0] == 0:\n C[0] = dfsToRec(w, C)\n TT[node][1] = C[0]\n return C[0]\n\n TT = [[0, 0] for _ in Gr]\n Counter = [0]\n dfsToRec(u, Counter)\n return TT\n\n\ngraph = {\n 0: [4],\n 1: [2, 4],\n 2: [1, 3],\n 3: [2, 4, 5],\n 4: [0, 1, 3],\n 5: [3]}\n\ngraph2 = {\n 0: [1, 5, 6],\n 1: [0, 2, 3, 4, 5],\n 2: [1, 3],\n 3: [1, 2],\n 4: [1, 5],\n 5: [0, 1, 4],\n 6: [0, 7, 8, 9],\n 7: [6],\n 8: [6, 9],\n 9: [6, 8]}\n\nprint(dfs(graph, 0)) # controllo \"if w not in VIS:\" non efficiente!!!\nprint(dfsRecursive(graph, 0)) # controllo \"if w not in VIS:\" non efficiente!!!\nprint(\" - \")\nprint(dfsBool(graph, 0))\nprint(dfsRecursiveBool(graph, 0))\nprint(\" - \")\nprint(dfsOpt(graph, 0))\nprint(\" --- \")\nprint(dfsTempInFin(graph, 0))\nprint(\" - \")\nprint(dfsTempInFin(graph2, 0))\n\n'''\nCORRETTEZZA DFS\n- Dimostrare che la DFS partendo da un nodo u visita tutti i nodi raggiungibili da u.\n Supponiamo per assurdo che esista un nodo z raggiungibile da u ma che la DFS non visita.\n Siccome z è raggiungibile da u, esiste un cammino u(0), u(1), …, u(k) \n (se il grafo è diretto, il cammino è orientato)\n!!! con u(0) = u e u(k) = z.\n Sia u(i) il primo nodo del cammino che non è stato visitato, chiaramente 0 < i ≤ k.\n Allora, u(i-1) è stato visitato e durante la visita, prima che il nodo u(i-1) sia estratto dallo stack,\n tutti gli adiacenti di u(i-1) devono essere stati visitati.\n!!! Siccome u(i) è adiacente a u(i-1), il nodo u(i) deve essere stato visitato \n in contraddizione con la nostra ipotesi per assurdo.\n\n\nEFFICIENZA DFS\nPer mantenere l'insieme dei nodi visitati possiamo usare un semplice array VIS \"booleano\"\ncon un elemento per ogni nodo,inizializzato a -1 e ogni volta che un nuovo w viene visitato \nsi pone VIS[w] = 0 . --> Così l'aggiornamento e il test relativo alla visita di un nodo costa tempo costante.\n\nLo stack può essere facilmente implementato in modo che tutte le operazioni push , top e pop abbiano costo costante.\n\nSe il grafo è rappresentato tramite liste di adiacenza, la scansione degli adiacenti prende tempo costante \nper ogni adiacente considerato.\n\nAd ogni iterazione del WHILE o viene visitato un nuovo nodo o è estratto un nodo dallo stack.\nPoiché ogni nodo è inserito nello stack una sola volta (quando viene visitato), \nil numero di iterazioni del WHILE è al più 2n.\n\n!!!Il numero di operazioni non costanti in una iterazione del WHILE sono solamente le scansioni degli adiacenti \no in altri termini gli attraversamenti degli archi. Ogni arco è attraversato al più due volte!!! (per grafi diretti\nuna sola volta).\nQuindi gli attraversamenti degli archi costano complessivamente O(m).\n!\nIn totale, la complessità della DFS su un grafo connesso è O(n + m).\nIn generale, la complessità è O(h + k) dove h è il numero di nodi e k il numero di archi della \ncomponente connessa del nodo di partenza.\n!\n\nLa DFS ha complessità ottimale perchè qualsiasi procedura di visita \ndeve necessariamente visitare tutti i nodi e gli archi che sono raggiungibili.\n\n[Siccome lo stack delle chiamate ricorsive ha tipicamente una dimensione limitata, \nl'implementazione ricorsiva non è adatta per grafi di grandi dimensioni.]\n\n\nAlbero di visita --> sottografo formato da tutti i nodi visitati assieme agli archi che hanno permesso di visitarli.\nAlbero --> grafo connesso e aciclico.\n\n!!! Un grafo non diretto di n nodi è un albero se e solo se è connesso e ha esattamente n - 1 archi. !!!\n\nUn albero è un grafo minimamente connesso, nel senso che ha il numero minimo di archi per renderlo connesso o, \nequivalentemente, che nessun arco può essere eliminato senza sconnettere il grafo.\nGli alberi di visita dipendono dall'ordine con cui i nodi e gli archi vengono visitati.\n\n!!! L'albero della DFS è anche determinato dall'ordine con cui sono scanditi gli adiacenti dei nodi visitati. !!!\nNel caso di grafi diretti l'albero di visita è più propriamente chiamato arborescenza \nper indicare che è un grafo diretto.\n\n\n\n'''\n"
},
{
"alpha_fraction": 0.5543849468231201,
"alphanum_fraction": 0.5723234415054321,
"avg_line_length": 22.891157150268555,
"blob_id": "f2c2f4cca66957bf8be98f4eba95dd96f5bb95bc",
"content_id": "4ace936c62814b29b4f93a6eac7a6e778991ddef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3516,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 147,
"path": "/eserciziBFS.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import BFS\nimport collections\nimport math\n'''\nEsercizio [dall'albero alle distanze]\nDato un vettore dei padri P che rappresenta l'albero di una BFS a partire da un nodo u, dare un algoritmo che calcola il\ncorrispondente array Dist delle distanze da u in tempo O(n).\n'''\n\ndef calcolaDistanzeArrayPadriBFS(P):\n def dist(Pp, w, DDIST):\n if DDIST[w] == -1:\n if P[w] == w:\n DDIST[w] = 0\n else:\n DDIST[w] = dist(Pp, Pp[w], DDIST) + 1\n return DDIST[w]\n DIST = [-1 for _ in P]\n for i in range(0, len(P)):\n if DIST[i] == -1:\n DIST[i] = dist(P, i, DIST)\n return DIST\n\n\n'''\nEsercizio [stessa distanza]\nDescrivere un algoritmo efficiente che, dato un grafo non diretto e connesso e due suoi nodi u e v, trova i nodi che hanno la stessa\ndistanza da u e v.\n'''\n\n\ndef sameDist(G, u, v):\n DIST_u = BFS.BFS(G, u)[1]\n DIST_v = BFS.BFS(G, v)[1]\n S = []\n for node in G:\n if DIST_u[node] == DIST_v[node]:\n S.append(node)\n return S\n\n'''\nEsercizio [distanza tra insiemi]\nDato un grafo non diretto G e due sottoinsiemi A e B dei suoi nodi si definisce distanza tra A e B la distanza minima per andare da\nun nodo in A ad un nodo in B. Se A e B non sono disgiunti, la loro distanza è 0. Descrivere un algoritmo che, dato G e due\nsottoinsiemi dei nodi A e B calcola la loro distanza. L’algoritmo deve avere complessità O(n + m).\n'''\n\ndef BFS_SET(G, A):\n DIST_A = [-1 for _ in G]\n Q = collections.deque()\n for node in A:\n DIST_A[node] = 0\n Q.append(node)\n while len(Q) != 0:\n v = Q.popleft()\n for adjacent in G[v]:\n if DIST_A[adjacent] == -1:\n DIST_A[adjacent] = DIST_A[v]+1\n Q.append(adjacent)\n return DIST_A\n\ndef distInsiemi(G, A, B):\n DIST_A = BFS_SET(G, A)\n d = math.inf\n for node in B:\n if DIST_A[node] < d:\n d = DIST_A[node]\n return d\n\n\ndef calcolaGT(G):\n Gt = {x: [] for x in G}\n for node in G:\n for adjacent in G[node]:\n Gt[adjacent].append(node)\n return Gt\n\n'''\nEsercizio [Roma]\nDescrivere un algoritmo che, dato un grafo diretto e fortemente connesso e un suo nodo r, trova tutti i cammini minimi tra tutte le\ncoppie di nodi con il vincolo che questi cammini devono passare per r.\n\nCENTER(G: grafo diretto, r: nodo)\nP <- BFS(G, r) /* Vettore dei padri dell'albero dei cammini minimi di G da r */\nGT <- TRASP(G) /* Ritorna il grafo trasposto */\nPT <- BFS(GT, r) /* Vettore dei padri dell'albero dei cammini minimi di GT da r */\nRETURN P, PT\n\n\nC_u <- lista vuota\nDO\nC_u.append(u)\nu <- PT[u]\nWHILE u <> r\nC_v <- lista vuota\nWHILE v <> r DO\nC_v.head(v)\nv <- P[v]\nC <- concatenazione di C_u e C_v\nOUTPUT C\n'''\n\n\ndef centerMinPath(G, u, v, r): # G grafo, u nodo inizio, v nodo fine, r nodo vincolo\n P = BFS.BFS(G, r)[0]\n GT = calcolaGT(G)\n PT = BFS.BFS(GT, r)[0]\n\n C_u = [u]\n u = PT[u]\n while u != r:\n C_u.append(u)\n u = PT[u]\n C_u.append(r)\n C_v = []\n while v != r:\n C_v.append(v)\n v = P[v]\n C_v.reverse()\n return C_u+C_v\n\n\n\n#PADRI = [0, 0, 1, 0, 5, 0]\n#print(calcolaDistanzeArrayPadriBFS(PADRI))\nGgg = {\n 0: [1, 5],\n 1: [0, 2, 3],\n 2: [1, 4],\n 3: [1, 4],\n 4: [2, 3, 6],\n 5: [0, 6],\n 6: [4, 5]\n}\n\ngrafoFortConn = {\n 0: [1],\n 1: [2],\n 2: [0, 3],\n 3: [2]\n}\n\nprint(sameDist(Ggg, 1, 4))\n\nprint(distInsiemi(Ggg, {0, 1, 2}, {6}))\n\nprint(centerMinPath(grafoFortConn, 0, 2, 3))\n"
},
{
"alpha_fraction": 0.41759881377220154,
"alphanum_fraction": 0.4511558413505554,
"avg_line_length": 25.739999771118164,
"blob_id": "fd4a0bd8917ef7edebbef8bbd60c1d7bfe315074",
"content_id": "4f17cd8da97eff3348fd492049a686008730de9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1345,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 50,
"path": "/esePD.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import collections\n\n\ndef EQ_BIP(S, x):\n if x % 2 != 0:\n print(\"Il valore totale della sequenza è dispari, la sequenza non è bipartibile!\")\n return False, None\n T = [[False for _ in range(0, int(x/2)+1)] for _ in range(0, len(S))]\n for i in range(0, len(S)):\n T[i][0] = True\n for c in range(1, int(x/2)+1):\n if S[0] == c:\n T[0][c] = True\n else:\n T[0][c] = False\n for i in range(1, len(S)):\n for c in range(1, int(x/2)+1):\n T[i][c] = T[i-1][c] #Se la sottosequenza con somma c non include S[i]\n if c-S[i] >= 0:\n T[i][c] = T[i-1][c] or T[i-1][c-S[i]] #Se la sottosequenza con somma c può includere S[i]\n return T[len(S)-1][int(x/2)], T\n\n\ndef printSeqFromT(T, S, x):\n i = len(S)-1\n c = x\n L = collections.deque()\n while c > 0 or i > 0:\n if (c-S[i]) >= 0 and T[i-1][c-S[i]]:\n L.appendleft(S[i])\n print(\"ho inserito \" + str(S[i]))\n c = c-S[i]\n print(\"è rimasto \"+str(c))\n i = i - 1\n else:\n L.appendleft(-1)\n i = i - 1\n return L\n\n\nS = [1, 1, 1, 4, 2, 3, 11, 3]\ntot = 0\nfor el in S:\n tot += el\n\nTup = EQ_BIP(S, tot)\nT = Tup[1]\nif Tup[0]:\n L = printSeqFromT(T, S, int(tot/2))\n print(L)\n\n\n\n\n"
},
{
"alpha_fraction": 0.509687602519989,
"alphanum_fraction": 0.5492289662361145,
"avg_line_length": 23.794116973876953,
"blob_id": "fe8809728ea0513348981f8e7200dfb4cdcb6732",
"content_id": "53a8982eacfaded4947cbc57f4d3c35b305f6483",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2540,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 102,
"path": "/bicolorazione.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import simpleStackQueue\n\n'''\nCOLORAZIONE DI UN GRAFO\n-Dato un grafo si vogliono colorare i nodi usando il numero minimo di colori che garantiscono che due\nqualsiasi nodi adiacenti hanno colori diversi.\n\nProblema: sapere se esiste e in tal caso trovare una cosidetta 2-colorazione del nostro grafo.\n\nUn altro modo di vedere il problema è di vedere l'assegnamento dei due colori ai nodi come la partizione del grafo\nin due insiemi (disgiunti) tali che non ci sono archi tra nodi appartenenti alla stessa parte.\nUn grafo che può esserecosì partizionato si dice bipartito.\n'''\n\n\ndef dfsBiCol(G, node, bc):\n bc[node] = 0\n S = simpleStackQueue.Stack()\n S.push(node)\n while S.size() >= 1:\n v = S.top()\n control = 0\n for adjacent in G[v]:\n if bc[adjacent] == -1:\n bc[adjacent] = 1 if bc[v] == 0 else 0\n control = 1\n S.push(adjacent)\n else:\n if bc[adjacent] == bc[v]:\n return -1\n if control == 0:\n S.pop()\n return 0\n\n\ndef dfsBiCol2(G, u, bc):\n for adjacent in G[u]:\n if bc[adjacent] == -1:\n bc[adjacent] = 1 if bc[u] == 0 else 0\n c = dfsBiCol2(G, adjacent, bc)\n if c == -1:\n return -1\n elif bc[adjacent] == bc[u]:\n print(\"Grafo non bicolorabile!\")\n return -1\n return 0\n\n\ndef biCol(G):\n bc = [-1 for _ in G]\n for node in G:\n if bc[node] == -1:\n if dfsBiCol(G, node, bc) == -1:\n return None\n return bc\n\n\ndef biCol2(G):\n bc = [-1 for _ in G]\n for node in G:\n if bc[node] == -1:\n bc[node] = 0\n if dfsBiCol2(G, node, bc) == -1:\n return None\n return bc\n\n\ngraph = {\n 0: [4],\n 1: [2, 4],\n 2: [1, 3],\n 3: [2, 4, 5],\n 4: [0, 1, 3],\n 5: [3],\n 6: [7, 8],\n 7: [6],\n 8: [6, 9],\n 9: [8],\n 10: [11, 12, 13],\n 11: [10],\n 12: [10],\n 13: [10]\n}\n\nG_nonbic2 = {\n 0: [1, 4],\n 1: [0, 2],\n 2: [1, 3],\n 3: [2, 4],\n 4: [3, 0]}\n\nprint(biCol2(graph))\nprint(biCol2(G_nonbic2))\n\n'''\nUsando la DFS si può risolvere il problema della 2-colorazione (o bipartizione) in modo molto efficiente, cioè, in\ntempo O(n + m).\n\nIn generale, il problema della colorazione è molto più difficile. Determinare se un grafo è 3-colorabile è già un\nproblema per cui non si conoscono algoritmi efficienti. Gli algoritmi migliori hanno complessità esponenziale nella\ndimensione del grafo.\n'''\n"
},
{
"alpha_fraction": 0.5906715989112854,
"alphanum_fraction": 0.6078962683677673,
"avg_line_length": 34.875,
"blob_id": "36e1e7e9eae9b73964bfc5d381ca13f1567824e4",
"content_id": "a196b1c59d7c8c5b30c0706df227f71f82009aed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5198,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 144,
"path": "/eserciziDfs2.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import copy\n'''\nEsercizio [pozzo]\nIn un grafo diretto, un nodo si dice pozzo universale se ha grado entrante n − 1 e grado uscente 0.\nDimostrare che un grafo diretto non può avere più di un pozzo universale.\nDescrivere un algoritmo che preso un grafo diretto G, rappresentato tramite matrice di adiacenza, determina\nse G contiene o meno un pozzo universale. L’algoritmo deve avere complessità O(n).\nDimostrare che il problema non è risolvibile in tempo O(n) se il grafo è rappresentato con liste di adiacenza.\n\n\n- Se un grafo ha un pozzo universale u allora per un qualsiasi altro nodo v c'è l'arco (v, u) che essendo un arco\nuscente da v impedisce che v possa essere un pozzo universale.\n\n-Se consideriamo due nodi qualsiasi u e v e ci chiediamo se c'è un arco da u a v in base alla risposta\npossiamo escludere con certezza che uno dei due nodi sia il pozzo (se l'arco c'è escludiamo u altrimenti\nescludiamo v). A questo punto l'idea di un algoritmo per trovare il pozzo universale, se esiste, è molto\nsemplice. Scegliamo due nodi, diciamo u e v, e vediamo se c'è l'arco da u a v. Per quanto detto almeno uno\ndei due nodi sarà scartato e quindi scegliamo un altro nodo w. Applichiamo la stessa procedura ai due nodi\nin esame scartandone almeno uno. Continuando così fino a considerare tutti i nodi, alla fine o rimarremo con\nun solo nodo oppure li avremo scartati tutti e il pozzo universale non c'è. Se rimaniamo con un nodo\ndobbiamo solamente verificare che sia il pozzo universale.\n\n\nN.B.\nSe il grafo è rappresentato tramite liste di adiacenza non è possibile risolvere il problema in tempo O(n)\nperché per verificare che un nodo sia un pozzo universale bisogna controllare che abbia grado entrante n - 1\ne questo richiede la scansione delle liste di adiacenza di tutti gli altri nodi.\n'''\n\n\ndef pozzo(matG):\n p = 0 # nodo qualsiasi, es. nodo iniziale nella rappresentazione a matrice\n for node in range(0, len(matG)):\n if node != p and matG[p][node] == 1:\n # il p corrente ha un arco uscente e quindi non può essere un pozzo\n p = node\n for node in range(0, len(matG)):\n if node != p and (matG[node][p] == 0 or matG[p][node] == 1):\n print(\"Non ci sono pozzi universali nel grafo\")\n return -1\n print(\"Il pozzo universale del grafo è \"+str(p))\n return p\n\n\nmatGpoz = [\n [0, 1, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 0],\n [0, 0, 1, 0]\n]\n\nmatGnopoz = [\n [0, 1, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n]\n\npozzo(matGpoz)\nprint(\" \")\npozzo(matGnopoz)\n\n'''\nEsercizio per casa [strade critiche]\nLa rete viaria di una cittadina non è stata progettata molto bene. Tutte le strade sono a doppio senso di marcia e da\nun qualsiasi incrocio è possibile arrivare ad un qualsiasi altro incrocio. Ma ci sono delle strade critiche che se\ninterrotte (ad esempio per lavori) dividono la cittadina in due parti e non si può più andare da una parte all'altra.\nVogliamo un algoritmo efficiente che analizzando la rete viaria trovi tutte le strade critiche.\n\n- Algoritmo che trova tutti i ponti di un Grafo non diretto connesso\n-nodi: incroci\n-archi: strade a doppio senso\n'''\n\ndef dfs(Gp, u, P):\n for adjacent in Gp[u]:\n if P[adjacent] == -1:\n P[adjacent] = u\n dfs(Gp, adjacent, P)\n\n# Per grafi non diretti!\ndef trovaPonteConVetPadri(G, u, v, P):\n Gp = copy.deepcopy(G)\n try:\n Gp[u].remove(v)\n P[u] = u\n dfs(Gp, u, P)\n if P[v] == -1:\n print(\"l'arco {\" + str(u) + \",\" + str(v) + \"} è un ponte\")\n return 0 # l'arco è un ponte\n else:\n print(\"l'arco {\"+str(u)+\",\"+str(v)+\"} NON è un ponte\")\n return 1 # l'arco non è un ponte (c'è un ciclo nel grafo G, rappresentato dal cammino semplice da u a v in Gp a cui si aggiunge l'arco {u,v})\n except ValueError:\n print(\"arco {\"+str(u)+\",\"+str(v)+\"} non presente in G\")\n return -1 # errore non esiste l'arco {u,v} in G\n\n\ndef dfsPonti(G, node, VIST, L, M):\n VIST[node] = 0\n for adjacent in G[node]:\n if VIST[adjacent] == -1 and M[node][adjacent] == -1:\n P = [-1 for _ in G]\n c = trovaPonteConVetPadri(G, node, adjacent, P)\n if c == 1:\n P[node] = adjacent\n print(str(adjacent), end='')\n w = adjacent\n M[w][P[w]] = 0\n M[P[w]][w] = 0\n while w != node:\n print(\" \"+str(P[w]), end='')\n w = P[w]\n M[w][P[w]] = 0\n M[P[w]][w] = 0\n print(\"\")\n print(str(M))\n elif c == 0:\n L.append((node, adjacent))\n dfsPonti(G, adjacent, VIST, L, M)\n\n\ndef trovaPontiNonEff(G):\n L = []\n M = [[-1 for _ in G] for _ in G]\n VIST = [-1 for _ in G]\n for node in G:\n if VIST[node] == -1:\n dfsPonti(G, node, VIST, L, M)\n return L\n\n\ngrfNonDirCicl = {\n 0: [1, 7],\n 1: [0, 2],\n 2: [1, 3],\n 3: [2, 4, 7],\n 4: [3, 5, 6],\n 5: [4, 6],\n 6: [4, 5],\n 7: [0, 3, 8],\n 8: [7],\n}\nprint(trovaPontiNonEff(grfNonDirCicl))\n\n"
},
{
"alpha_fraction": 0.42294323444366455,
"alphanum_fraction": 0.4495944380760193,
"avg_line_length": 22.95833396911621,
"blob_id": "2cf262f8ce9de20b6632fad10e8642e3a4d84b0b",
"content_id": "68035f082c92f08f853bb930b5cc3c429b498317",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1727,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 72,
"path": "/prinSottSeqCresc.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "# N[i] = numero delle sottosequenze crescenti di S[0..i] che terminano in i\n\n\ndef numSottSeqCresc(S, n):\n N = [0 for _ in range(0, n)]\n N[0] = 1\n nsc = 1\n for i in range(1, n):\n N[i] = 1\n for j in range(0, i):\n if S[j] < S[i]:\n N[i] = N[i] + N[j]\n nsc = nsc + N[i]\n return nsc\n\ndef printModSeq(S,C):\n stringa = \"( \"\n for x in range(0, len(S)):\n if C[x] == 1:\n stringa += str(S[x])+\" \"\n print(stringa+\")\")\n\ndef arrModSeq(S,C):\n arr = []\n for x in range(0, len(S)):\n if C[x] == 1:\n arr.append(S[x])\n return arr\n\ndef printFullT(T):\n for indice in range(0, len(T)):\n print(\"T[\"+str(indice)+\"] = \", end='')\n for stsq in T[indice]:\n print(str(stsq)+\" \", end='')\n print(\"\\n\")\n\n\ndef printTH(Tt, h):\n print(\"T[\" + str(h) + \"] = \", end='')\n for stsq in Tt:\n print(str(stsq) + \" \", end='')\n print(\"\\n\")\n\n\ndef printSottSeqCresc(S, n, T):\n for x in range(n-1, -1, -1):\n T[x].append([S[x]])\n for y in range(x+1, n):\n if S[y] > S[x]:\n for seq in T[y]:\n A = [S[x]]\n for elem in seq:\n A.append(elem)\n T[x].append(A)\n print(\"T[\" + str(x) + \"] è: \" + str(T[x]))\n print(\"\\n\")\n for x in T:\n for sottosequenza in x:\n print( sottosequenza, end=' ')\n\n\nS1 = [5, 7, 3, 6]\nprint(numSottSeqCresc(S1, len(S1)))\nS2 = [5, 3, 7, 8, 6]\nprint(numSottSeqCresc(S2, len(S2)))\nS3 = [8, 1, 2, 9]\nprint(numSottSeqCresc(S3, len(S3)))\n\nT = [[] for _ in S3]\nprintSottSeqCresc(S3, len(S3), T)\nT = [[] for _ in S2]\nprintSottSeqCresc(S2, len(S2), T)\n\n"
},
{
"alpha_fraction": 0.603283166885376,
"alphanum_fraction": 0.6214401125907898,
"avg_line_length": 34.58407211303711,
"blob_id": "476b33a97c458c7022ea0f8f027ef4b15b06209c",
"content_id": "c81c4b66a5cd867210cc8d23eb303005e74542d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8080,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 226,
"path": "/eserciziDfs.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import copy\n'''\nEsercizio [archi]\nVogliamo scrivere un algoritmo che esegue una DFS su un grafo diretto e ritorna il numero di archi dell'albero della\nDFS, il numero di archi all'indietro, il numero di archi in avanti e il numero di archi di attraversamento.\n'''\n\n\ndef dfsRecArchi(G, u, VIS, contatori, c):\n c += 1\n VIS[u] = -c\n for adjacent in G[u]:\n if VIS[adjacent] == -1:\n contatori[0] += 1 # n. archi albero\n c = dfsRecArchi(G, adjacent, VIS, contatori, c)\n elif VIS[adjacent] < 0:\n contatori[1] += 1 # n. archi all'indietro\n elif VIS[adjacent] > -VIS[u]:\n contatori[2] += 1 # n. archi in avanti\n else: contatori[3] += 1 #n. archi di attraversamento\n VIS[u] = -VIS[u]\n return c\n\n\ndef calcolaArchi(G, u):\n VIS = [-1 for _ in G]\n contatori = [0, 0, 0, 0]\n dfsRecArchi(G, u, VIS, contatori, 0)\n print(\"n.archi albero: \"+str(contatori[0]))\n print(\"n.archi attraversamento: \" + str(contatori[3]))\n print(\"n. archi all'indietro: \" + str(contatori[1]))\n print(\"n. archi in avanti: \" + str(contatori[2]))\n\n\n'''\nEsercizio [trasposto]\nIl grafo trasposto di un grafo diretto G = (V, E), e un grafo diretto GT = (V, ET) che ha lo stesso insieme dei nodi ma\ntutti gli archi con direzione opposta, vale a dire ET = { (v, u) | (u, v) in E }. Descrivere un algoritmo che dato un grafo\ndiretto G, rappresentato tramite liste di adiacenza degli adiacenti uscenti, ritorna il grafo trasposto GT\nrappresentato nello stesso modo. L'algoritmo deve avere complessita O(n + m).\n'''\n\n\ndef dfsTrasp(G, GT, VIS, node):\n VIS[node] = 0\n for adjacent in G[node]:\n GT[adjacent].append(node)\n if VIS[adjacent] == -1:\n dfsTrasp(G, GT, VIS, adjacent)\n\n\ndef creaGTrasp(G):\n VIS = [-1 for _ in G]\n GT = {x: [] for x in G}\n for node in G:\n if VIS[node] == -1:\n dfsTrasp(G, GT, VIS, node)\n return GT\n\ndef creaGTrasp2(G):\n GT = {x: [] for x in G}\n for node in G:\n for adjacent in G[node]:\n GT[adjacent].append(node)\n return GT\n\n\n'''\nEsercizio [ponte]\nDescrivere un algoritmo che, dato un grafo non diretto G e un arco {u, v} del grafo, determina se G ha un ciclo che\ncontiene {u, v}. Analizzare il tempo di esecuzione dell’algoritmo. E se, nel caso un ciclo esista, vogliamo anche\ntrovare un ciclo che contiene l'arco?\n\nSe un arco {u, v} di un grafo non diretto G non è contenuto in nessun ciclo, allora nel grafo G' ottenuto rimuovendo\nl'arco da G, i nodi u e v non sono connessi. Infatti, se lo fossero vuol dire che ci sarebbe in G' un cammino che li\ncollega e siccome tale cammino non contiene l'arco {u, v}, il cammino insieme a tale arco è un ciclo in G che\ncontiene l'arco, contraddizione. Viceversa, se la rimozione dell'arco {u, v} sconnette i nodi u e v vuol dire che non ci\npoteva essere un ciclo che conteneva l'arco. Quindi abbiamo diamostrato\nIn un grafo non diretto e connesso G, un arco non è contenuto in nessun ciclo se e solo se la rimozione\ndell'arco sconnette il grafo.\nUn arco la cui rimozione sconnette un grafo connesso è chiamato ponte (bridge). Un algoritmo molto semplice per\ndeterminare se un arco è un ponte di un grafo G non diretto e connesso consiste nel fare una visita del grafo G'\nottenuto rimuovendo l'arco. Se G' è sconnesso, l'arco è un ponte, altrimenti non lo è. Se in generale il grafo non è\nconnesso lo stesso ragionamento vale per la componente connessa che contiene l'arco da esaminare (ovvero la\nvisita parte da uno dei due estremi dell'arco). Chiaramente, tale algoritmo ha complessità O(n + m).\nNel caso vogliamo anche trovare un ciclo che contiene l'arco {u, v} (se esiste) basterà fare una DFS a partire da u\nfacendo in modo che il primo adiacente scelto sia proprio v. In questo modo la DFS troverà un arco all'indietro che\narriva al nodo u e da qui possiamo ricostruire il ciclo come già sappiamo.\n'''\n\n\ndef dfs(Gp, u, VIS):\n VIS[u] = 0\n for adjacent in Gp[u]:\n if VIS[adjacent] == -1:\n dfs(Gp, adjacent, VIS)\n\n\ndef dfsRetInd(G, arcA, arcB, v, C, VIStime, P):\n C[0] += 1\n VIStime[v] = -C[0]\n for adjacent in G[v]:\n if VIStime[adjacent] == 0:\n P[adjacent] = v\n check = dfsRetInd(G, arcA, arcB, adjacent, C, VIStime, P)\n if check != -1:\n return check\n elif v != arcB and adjacent == arcA and VIStime[adjacent] < 0:\n print(\"trovato v: \"+ str(v))\n return v\n VIStime[v] = -VIStime[v]\n return -1\n\n\n# Per grafi non diretti!\ndef trovaPonte(G, u, v):\n Gp = copy.deepcopy(G)\n try:\n Gp[u].remove(v)\n VIS = [-1 for _ in G]\n dfs(Gp, u, VIS)\n if VIS[v] == -1:\n print(\"l'arco {\" + str(u) + \",\" + str(v) + \"} è un ponte\")\n return 0 # l'arco è un ponte\n else:\n print(\"l'arco {\"+str(u)+\",\"+str(v)+\"} NON è un ponte\")\n return 1 # l'arco non è un ponte (c'è un ciclo nel grafo G, rappresentato dal cammino semplice da u a v in Gp a cui si aggiunge l'arco {u,v})\n except ValueError:\n print(\"arco {\"+str(u)+\",\"+str(v)+\"} non presente in G\")\n return -1 # errore non esiste l'arco {u,v} in G\n\n\ndef checkCycleEdge(G, u, v):\n if trovaPonte(G, u, v) == 1:\n P = [-1 for x in G]\n VIStime = [0 for x in G]\n P[u] = u\n C = [1]\n VIStime[u] = -C[0]\n P[v] = u\n ret = dfsRetInd(G, u, v, v, C, VIStime, P)\n VIStime[u] = -VIStime[u]\n if ret != -1:\n w = ret\n print(str(w), end='')\n while w != u:\n print(\" \" + str(P[w]), end='')\n w = P[w]\n print(\"\")\n return ret # se ret != -1 {c,u} rappresenta l'arco all'indietro che chiude il ciclo\n else:\n print(\"l'arco {\"+str(u)+\",\"+str(v)+\"} non fa parte di un ciclo\")\n return -1 # l'arco {u,v} non fa parte di un ciclo\n\n\ngrfDag = {\n 0: [4, 6],\n 1: [2, 5],\n 2: [3],\n 3: [5],\n 4: [1],\n 5: [],\n 6: [7, 11],\n 7: [8, 9],\n 8: [],\n 9: [1, 10],\n 10: [],\n 11: []\n}\n\ncalcolaArchi(grfDag, 0)\nprint(creaGTrasp(grfDag))\nprint(creaGTrasp2(grfDag))\n\ngrfNonDirCicl = {\n 0: [1, 7],\n 1: [0, 2],\n 2: [1, 3],\n 3: [2, 4, 7],\n 4: [3, 5, 6],\n 5: [4, 6],\n 6: [4, 5],\n 7: [0, 3, 8],\n 8: [7],\n}\n\ngrfDirCicl = {\n 0: [1],\n 1: [2],\n 2: [3, 9],\n 3: [4, 5],\n 4: [6],\n 5: [6],\n 6: [7],\n 7: [8],\n 8: [6],\n 9: []\n}\nprint(\"________________________\")\ntrovaPonte(grfNonDirCicl, 2, 3)\ntrovaPonte(grfNonDirCicl, 7, 8)\ntrovaPonte(grfNonDirCicl, 3, 4)\ntrovaPonte(grfNonDirCicl, 3, 7)\ntrovaPonte(grfNonDirCicl, 4, 0)\ncheckCycleEdge(grfNonDirCicl, 3, 7)\ncheckCycleEdge(grfNonDirCicl, 4, 6)\n#checkCycleEdge(grfDirCicl, 6, 7) # Errore poiché l'algoritmo vale solo per i grafi NON diretti!\n\n'''\nEsercizio [grado due]\nDimostrare che se tutti i nodi di un grafo non diretto G hanno grado almeno due allora c’è almeno un ciclo. Se il\ngrado di ogni nodo è esattamente due, si puo affermare che G è un ciclo?\n\nUn modo molto semplice di dimostrare che se ogni nodo di un grafo non diretto G ha grado almeno due allora il\ngrafo contiene un ciclo e di considerare una DFS a partire da un nodo di G. La visita dovrà necessariamente\nincontrare un nodo w che è una foglia dell'albero della DFS e siccome w ha grado almeno due, w deve avere\nalmeno un altro arco oltre a quello che appartiene all'albero. Sappiamo che tale arco, non appartenendo all'albero,\nnon potrà che essere un arco all'indietro e questo dimostra l'esistenza di un ciclo.\nSe ogni nodo di G ha grado esattamente due, non è detto che G sia un ciclo. Potrebbe infatti essere formato da\ndue o più cicli disgiunti. Se invece è anche connesso, allora è necessariamente un ciclo. Perché?\n\nPerché la DFS a partire da uno qualunque dei nodi di G riuscirà a visitare tutti i nodi di G trovando infine \nnel nodo foglia dell'albero DFS un arco all'indietro verso il nodo da cui è partita la DFS\n(poiché anche questo è di grado 2).\n\n'''"
},
{
"alpha_fraction": 0.6736343502998352,
"alphanum_fraction": 0.6895205974578857,
"avg_line_length": 36.7684211730957,
"blob_id": "f097ce3bea1013e4b27afaea913b6b43bceeec5b",
"content_id": "1b82fb044782826f8bf5f5e60209ccb8e8d475cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3605,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 95,
"path": "/ordinareGrafo.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "#import simpleStackQueue\nimport collections\n\n'''\n!!!Se un grafo è aciclico, esiste almeno un nodo che non ha archi entranti.!!!\ndim: Se così non fosse potremmo partire da un nodo v1 e prendere un suo arco entrante che esce da un nodo v2, poi\nprendere un arco entrante in v2 e uscente da un nodo v3 diverso dai primi due perché il grafo è aciclico, anche v3\nha un arco entrante che deve uscire, per le stesse ragioni, da un quarto nodo distinto v4 e procedendo in questo\nmodo si arriverebbe all'n-esimo nodo ma quest'ultimo ha un arco entrante che necessariamente dovrebbe uscire\nda uno dei nodi già considerati chiudendo un ciclo che non può esistere, contraddizione.\n\nGrazie a questa proprietà possiamo costruire un ordine come segue. Come primo nodo scegliamo un nodo v1\nsenza archi entranti (cioè, una lavorazione senza vincoli). Eliminando v1 dal grafo rimaniamo con un grafo aciclico e\nda questo scegliamo un nodo v2 senza archi entranti (cioè, una lavorazione o senza vincoli o che aveva come unico\nvincolo v1). Eliminiamo v2 e otteniamo un grafo ancora aciclico e da questo scegliamo v3 un nodo senza archi\nentranti (cioè, una lavorazione o senza vincoli o che aveva come vincoli solo v1 o v2). Possiamo procedere in questo\nmodo fino all'ultimo nodo.--->Implementazione 1: Grafo dato con liste di adiacenza\n\nOrdine topologico\nEffettuiamo una DFS su un DAG G. Se la DFS da v termina dopo la DFS da w, siamo certi che non ci può essere un\narco da w a v. Infatti, se ci fosse sarebbe un arco all'indietro ma in un DAG non essendoci cicli non ci possono\nessere archi all'indietro. Allora possiamo ottenere un ordinamento topologico di un DAG semplicemente ordinando i\nnodi per tempi di fine visita decrescenti. Quindi, ogniqualvolta la DFS da un nodo v termina, inseriremo v in testa\nalla lista che mantiene l'ordine.\n\n'''\n\n\ndef dfsOrd(dag, node, VIS, L):\n VIS[node] = 0\n for adjacent in dag[node]:\n if VIS[adjacent] == -1:\n dfsOrd(dag, adjacent, VIS, L)\n L.appendleft(node)\n\n# Ordinamento nodi per un grafo diretto aciclico\ndef ordTop(dag):\n L = collections.deque()\n VIS = [-1 for _ in dag]\n for node in dag:\n if VIS[node] == -1:\n dfsOrd(dag, node, VIS, L)\n return L\n\n\ndef calcolaGradoEntranti(dag):\n ge = [0 for _ in dag]\n for node in dag:\n for adjacent in dag[node]:\n ge[adjacent] += 1\n return ge\n\n\ndef ordTop2(dag):\n L = []\n gradoEntranti = calcolaGradoEntranti(dag) # O(n+m)\n S = []\n for node in dag: # O(n)\n if gradoEntranti[node] == 0:\n S.append(node)\n while len(S) >= 1: # Ad ogni passo del while viene preso in considerazione un nodo x senza archi entranti che verrà in seguito \"rimosso\" da G ed i nodi nella sua lista di adiacienze--> O(n+m)\n v = S.pop()\n L.append(v)\n for adjacent in dag[v]:\n gradoEntranti[adjacent] -= 1\n if gradoEntranti[adjacent] == 0:\n S.append(adjacent)\n return L\n\n\ngrfDag = {\n 0: [4, 6],\n 1: [2, 5],\n 2: [3],\n 3: [5],\n 4: [1],\n 5: [],\n 6: [7, 11],\n 7: [8, 9],\n 8: [],\n 9: [1, 10],\n 10: [],\n 11: []\n}\nprint(ordTop(grfDag))\nprint(ordTop2(grfDag))\n\n'''\nordTop2\n\nLa costruzione dell'array dei gradi entranti ha costo O(n + m) perché fa semplicemente una scansione\ndell'intero grafo. L'inizializzazione dello stack dei nodi con grado entrante zero costa O(n). Il WHILE esegue n\niterazioni e complessivamente il numero di iterazioni del FOR interno è pari al numero di tutti gli archi, cioè m.\nQuindi la complessità totale è O(n + m).\n'''\n"
},
{
"alpha_fraction": 0.5248227119445801,
"alphanum_fraction": 0.5652482509613037,
"avg_line_length": 22.131147384643555,
"blob_id": "304832596e99ff02e9e369c555e2ae8d272061a2",
"content_id": "eac0f44d1edb3fbc53896d3678e0c75a0607753c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1417,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 61,
"path": "/componentiConnesse.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "import simpleStackQueue\n\n'''\nPer tenere traccia delle componenti connesse si può usare un array che ad\nogni nodo assegna l'indice della sua componente connessa (gli indici sono determinati dall'ordine con cui sono\ntrovate le componenti)\n'''\n\n\ndef dfsCC(G, node, arrayCc, countComp):\n S = simpleStackQueue.Stack()\n S.push(node)\n arrayCc[node] = countComp\n while S.size() >= 1:\n v = S.top()\n control = 0\n for adjacent in G[v]:\n if arrayCc[adjacent] == -1:\n arrayCc[adjacent] = countComp\n S.push(adjacent)\n control = 1\n if control == 0:\n S.pop()\n\n\ndef CC(G):\n arrayCc = [-1 for _ in G]\n countComp = 0\n for node in G:\n if arrayCc[node] == -1:\n dfsCC(G, node, arrayCc, countComp)\n countComp += 1\n return arrayCc\n\n\ngraph = {\n 0: [4],\n 1: [2, 4],\n 2: [1, 3],\n 3: [2, 4, 5],\n 4: [0, 1, 3],\n 5: [3],\n 6: [7, 8],\n 7: [6],\n 8: [6, 9],\n 9: [8],\n 10: [11, 12, 13],\n 11: [10],\n 12: [10],\n 13: [10]\n}\nprint(CC(graph))\n\n\n'''\nQuindi CC(G) ritorna un array che per ogni nodo di G dà l'indice della sua componente connessa.\nPer grafi non diretti ciò è corretto ed è anche efficiente in quanto la complessità è ancora una volta O(n + m).\n\n!!!Per grafi diretti l'algoritmo non determina in generale le componenti fortemente connesse.!!!\n\n'''"
},
{
"alpha_fraction": 0.6425586342811584,
"alphanum_fraction": 0.6550948023796082,
"avg_line_length": 39.94736862182617,
"blob_id": "548481b3ee7cc09c12b175ced6f563b8cbbf8c3a",
"content_id": "f79b595c29a46d48e796cdea3d25eec9b51082a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3146,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 76,
"path": "/classificazioneArchiDfsECicli.py",
"repo_name": "FlameDavid92/Python-ProgettazioneDiAlgoritmi",
"src_encoding": "UTF-8",
"text": "'''\nConsideriamo un qualsiasi arco diretto (x, y) non appartenente all'albero della DFS. Per gli intervalli di visita\ndi x e y sono possibili solamente i seguenti casi:\n\n- Gli intervalli di x e y sono disgiunti: non può essere t(x) < t(y) perchè l'arco (x, y) avrebbe forzato la visita\n di y durante la visita da x e i due intervalli non sarebbe stati disgiunti.\n Però può tranquillamente essere t(y) < t(x), cioè l'arco (x, y) è tra due nodi che non hanno rapporti di\n discedenza e va da quello più giovane a quello più vecchio. Questo tipo di arco\n (che non può esistere in grafi non diretti) è detto \"arco di attraversamento\" (in inglese cross edge).\n\n- L'intervallo di x è contenuto nell'intervallo di y: l'arco va da un nodo x a un suo antenato y ed è detto arco\n all'indietro (in inglese back edge). Questo tipo di arco esiste anche in grafi non diretti.\n\n- L'intervallo di x contiene l'intervallo di y: l'arco va da un nodo x a un suo discendente y. Questo vuol dire che\n il nodo y è stato visitato durante la DFS da x ma seguendo un cammino diverso dal semplice arco (x, y).\n Questo tipo di arco è detto arco in avanti (in inglese forward edge). Per i grafi non diretti coincide con l'arco\n all'indietro.\n\n\nSe il grafo non è diretto (e connesso):\n la presenza di un qualsiasi arco all'indietro indica l'esistenza di un ciclo.\n E se non ci sono archi all'indietro il grafo è aciclico perchè coincide con l'albero della DFS.\n\nLo stesso vale per grafi diretti, cioè il grafo ha un ciclo se e solo se c'è almeno un arco all'indietro.\n'''\n\n\ndef DFS_CYCLE(G, v, u, P, dir):\n P[v] = -u # Il valore negativo indica che la visita è iniziata ma non è terminata\n for w in G[v]:\n if P[w] == 0:\n z = DFS_CYCLE(G, w, v, P, dir)\n if z != 0: # Un ciclo è già stato trovato\n P[v] = -P[v]\n return z\n elif P[w] < 0 and (w != u or dir == 1): # Trovato ciclo\n P[w] = 0 # Marca il primo nodo del ciclo\n P[v] = u\n return v\n P[v] = u # La visita da u è terminata\n return 0 # senza aver trovato un ciclo\n\n\ndef dfs_cycle_nodeList(G, u):\n P = [0 for _ in G]\n w = DFS_CYCLE(G, u, u, P, 1) # 1 se il grafo è diretto\n L = []\n while w > 0:\n L.append(w)\n w = P[w]\n return L\n\n\ngrf = {\n 0: [1,10],\n 1: [2],\n 2: [3, 4, 7],\n 3: [],\n 4: [5],\n 5: [6],\n 6: [2],\n 7: [8],\n 8: [7, 9],\n 9: [],\n 10: [9]\n}\nprint(dfs_cycle_nodeList(grf, 0))\n\n'''\nSi osservi che l'algoritmo DFS_CYCLE non costa più della DFS. Inoltre, nel caso di un grafo non diretto, può essere\nmolto più efficiente perché termina sempre in O(n). Infatti, se il grafo è aciclico la DFS stessa impiega O(n) \nperchè il grafo è un albero che ha solamente n - 1 archi. Se invece il grafo ha almeno un ciclo, l'algoritmo \ntermina non appena trova un arco all'indietro. Al più saranno visitati tutti gli n - 2 archi dell'albero \ndella DFS prima di incontrare un tale arco (dato che un qualsiasi arco o appartiene all'albero \no è un arco all'indietro). Quindi il costo dell'algoritmo è O(n).\n'''"
}
] | 15 |
WILDERNESSWIRELESS/WILDERNESS-WIRELESS-RESEARCH | https://github.com/WILDERNESSWIRELESS/WILDERNESS-WIRELESS-RESEARCH | 896688303dea0a977f63cdebe19afa5a937ca044 | e3ccc9303bb047b67d52aa29de33657c4c53b99f | 8edf3505e9e5e8ec5178cd0346382e0577b94c97 | refs/heads/master | 2020-03-23T03:25:57.561617 | 2018-07-15T13:51:01 | 2018-07-15T13:51:01 | 141,030,430 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6133333444595337,
"alphanum_fraction": 0.653333306312561,
"avg_line_length": 23.66666603088379,
"blob_id": "148656672d5f18fd6fa187a1ae8e89d7478a8c6b",
"content_id": "c7d4d3bb794d98241224b69bc3aadcc47b547abc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 3,
"path": "/MICROPYTHON/count.py",
"repo_name": "WILDERNESSWIRELESS/WILDERNESS-WIRELESS-RESEARCH",
"src_encoding": "UTF-8",
"text": "print(\"Hello world! I can count to ten:\")\nfor i in range(1,11):\n\tprint(i)\n\n"
},
{
"alpha_fraction": 0.5966851115226746,
"alphanum_fraction": 0.6491712927818298,
"avg_line_length": 23.133333206176758,
"blob_id": "4d2a1cca58e97e167b663a70604b6a2a6db590e3",
"content_id": "1873473f3ccd84ae4400ab5461edfbc1d7584b42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 15,
"path": "/MICROPYTHON/picowebtest.py",
"repo_name": "WILDERNESSWIRELESS/WILDERNESS-WIRELESS-RESEARCH",
"src_encoding": "UTF-8",
"text": "import picoweb\n \napp = picoweb.WebApp(__name__)\n\[email protected](\"/\")\ndef index(req, resp):\n yield from picoweb.start_response(resp, content_type = \"text/html\")\n \n htmlFile = open('index.html', 'r')\n \n for line in htmlFile:\n yield from resp.awrite(line)\n \n#app.run(debug=True, host=\"192.168.1.33\",port=80)\napp.run(debug=True, host=\"127.0.0.1\", port=80)\n"
}
] | 2 |
mongchy/server-bot | https://github.com/mongchy/server-bot | 656c02945ee64c1430ee461a8f0daf121784b8d9 | 67cf86acd26170d70b56be8f09bc13677d74ce67 | 88d167b874c84f1db1d41d36bbb1d593cf4b0024 | refs/heads/master | 2020-09-19T03:56:59.059189 | 2019-11-26T14:11:19 | 2019-11-26T14:11:19 | 224,199,825 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5030864477157593,
"alphanum_fraction": 0.5030864477157593,
"avg_line_length": 25,
"blob_id": "64a6ec211f75ccde7cd18a27548d682874d8af91",
"content_id": "8df93aa20eda401dcbab9f007b3f7a71fdaba01d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 12,
"path": "/serverbot.py",
"repo_name": "mongchy/server-bot",
"src_encoding": "UTF-8",
"text": "import discord\r\nimport os\r\n\r\nclient = discord.Client()\r\n\r\[email protected]\r\nasync def on_message(message):\r\n if message.content.startswith(\"안녕\"):\r\n await message.channel.send(\"그 대는 정말로 안녕이라고 말하였기 때문에 평민 등급을 주지\")\r\n \r\n \r\nclient.run(\"NjA3MTQyNDAxODM2MDU2NTc5.Xdp-Aw.Rt_jPT6TcFjVNK9XSBlkUe_adrg\")\r\n"
}
] | 1 |
kkdh7609/gteamserver | https://github.com/kkdh7609/gteamserver | 1fb84715531d2c336393be0576ca11525649955f | ba489cea716e518d3d36ef5922c50f20cc43f9f2 | ae5091b5bdb87efaf563e339ae110623d4b98f1a | refs/heads/master | 2020-12-13T14:44:25.369194 | 2020-01-17T01:56:21 | 2020-01-17T01:56:21 | 234,449,142 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.684959352016449,
"alphanum_fraction": 0.684959352016449,
"avg_line_length": 34.21428680419922,
"blob_id": "f5990a0a9a5c0315e2bfe36dbd634afa5fff23b4",
"content_id": "5077d96e2ef107c4c667f843901a6bd81a5cc4b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 492,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 14,
"path": "/django/gteam/gteamManage/urls.py",
"repo_name": "kkdh7609/gteamserver",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom . import views\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import routers\nfrom rest_framework_swagger.views import get_swagger_view\n\nurlpatterns = [\n url(r'^user/$', views.users),\n url(r'^manager/$', csrf_exempt(views.manager)),\n url(r'^api/doc/$', get_swagger_view(title='Rest API DOC')),\n url(r'^$', views.home),\n url(r'^stadium/$', views.stadium),\n url(r'^api/push/$', csrf_exempt(views.post_call))\n ]"
},
{
"alpha_fraction": 0.7731958627700806,
"alphanum_fraction": 0.7731958627700806,
"avg_line_length": 18.399999618530273,
"blob_id": "d3a10bc0ee53d6bc7c197bafbfeae226f5ee5c29",
"content_id": "ad449e24fdf296b18fab6fcab7c1a1a5c90368fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 97,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 5,
"path": "/django/gteam/gteamManage/apps.py",
"repo_name": "kkdh7609/gteamserver",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass GteammanageConfig(AppConfig):\n name = 'gteamManage'\n"
},
{
"alpha_fraction": 0.5817591547966003,
"alphanum_fraction": 0.5850058794021606,
"avg_line_length": 26.770492553710938,
"blob_id": "ba6831fa77638dccc3f116456e28928cc1984a9c",
"content_id": "ca702ff7ab4020ba1749a1ee12d6145900461781",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3388,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 122,
"path": "/django/gteam/gteamManage/views.py",
"repo_name": "kkdh7609/gteamserver",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom gteamManage import firebase_util\nfrom django.shortcuts import redirect\n# Create your views here.\ntt = \"123\"\ntempId = \"\"\n\n\nclass Data:\n def __init__(self, num, name, email, role, status):\n self.num = num\n self.name = name\n self.email = email\n self.role = role\n self.status = status\n\n\nclass StadiumData:\n def __init__(self, num, name, location, phone):\n self.num = num\n self.name = name\n self.location= location\n self.phone = phone\n\n\nclass RegData:\n def __init__(self, collectId, num, name, email, role, regNum, keyId, btn, val):\n self.collectId = collectId\n self.num = num\n self.name = name\n self.email = email\n self.role = role\n self.regNum = regNum\n self.keyId = keyId\n self.btn = btn\n self.val = val\n\n\ndef users(request):\n db = firebase_util.setFireStore()\n doc_ref = db.collection(u'user').stream()\n arr = []\n cnt = 1\n for doc in doc_ref:\n temp_doc = doc.to_dict()\n name = temp_doc[\"name\"]\n email = temp_doc[\"email\"]\n if temp_doc[\"isUser\"]:\n role = \"user\"\n else:\n role = \"manager\"\n status = \"Active\"\n arr.append(Data(cnt, name, email, role, status))\n cnt += 1\n\n context = {'arr': arr, 'arrLen': len(arr)}\n\n return render(request, 'gteamManage/user.html', context)\n\n\ndef stadium(request):\n db = firebase_util.setFireStore()\n doc_ref = db.collection(u'stadium').stream()\n arr = []\n cnt = 1\n for doc in doc_ref:\n temp_doc = doc.to_dict()\n name = temp_doc[\"stadiumName\"]\n phone = temp_doc[\"telephone\"]\n location = temp_doc[\"location\"]\n arr.append(StadiumData(cnt, name, location, phone))\n cnt += 1\n\n context = {'arr': arr, 'arrLen': len(arr)}\n\n return render(request, 'gteamManage/stadium.html', context)\n\n\ndef manager(request):\n global tt, tempId\n db = firebase_util.setFireStore()\n doc_ref = db.collection(u'managerReg').stream()\n arr = []\n cnt = 1\n for doc in doc_ref:\n temp_doc = doc.to_dict()\n collectId = doc.id\n name = temp_doc[\"name\"]\n email = temp_doc[\"email\"]\n keyId = temp_doc[\"key\"]\n regNum = temp_doc[\"businessNum\"]\n role = \"manager\"\n btn = f\"Accept{cnt}\"\n val = keyId\n arr.append(RegData(collectId, cnt, name, email, role, regNum, keyId, btn, val))\n cnt += 1\n tt = request.POST.get(f\"Accept\")\n if tt:\n tempId = tt.split(\" \")[0]\n for tempNum in range(0, len(arr)):\n if arr[tempNum].keyId == tempId:\n db.collection(u'user').document(arr[tempNum].keyId).update({\"permission\": True})\n db.collection(u'managerReg').document(arr[tempNum].collectId).delete()\n return redirect('/manager')\n\n context = {'arr': arr, 'arrLen': len(arr)}\n return render(request, 'gteamManage/manager.html', context)\n\n\ndef home(request):\n return render(request, 'gteamManage/home.html')\n\n\ndef post_call(request):\n typeId = int(request.POST.get(\"type\"))\n target = request.POST.get(\"target\")\n try:\n token = firebase_util.push_notification(typeId, target)\n return HttpResponse(token)\n except Exception:\n HttpResponse(Exception)\n"
},
{
"alpha_fraction": 0.549281656742096,
"alphanum_fraction": 0.5546274781227112,
"avg_line_length": 31.182796478271484,
"blob_id": "0288c9917f9a0920923b8c7b8b79538897e5e27d",
"content_id": "c33fcbbc38c06b42f1b8d19e237598542ec8bf31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3141,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 93,
"path": "/django/gteam/gteamManage/firebase_util.py",
"repo_name": "kkdh7609/gteamserver",
"src_encoding": "UTF-8",
"text": "import firebase_admin\nfrom firebase_admin import credentials, firestore, datetime, messaging\nfrom multiprocessing import Process\n\ndef setFireStore():\n if not len(firebase_admin._apps):\n cred = credentials.Certificate(\"service_key.json\")\n firebase_admin.initialize_app(cred)\n db = firestore.client()\n return db\n\ndef push_notification(typeId, target): # type1은 예약접수 시작, 2는 주인에게 알림, 3는 예약완료 알 림\n db = setFireStore()\n if typeId == 1:\n gameId = target\n doc_ref = db.collection(u'game3').document(gameId)\n userList = doc_ref.get().get(u'userList')\n user_ref = db.collection(u'user').stream()\n\n arr = []\n th_arr = []\n for doc in user_ref:\n if(doc.to_dict()['email'] in userList):\n new_user_ref = db.collection(u'token').document(doc.id)\n token = new_user_ref.get().get(u'token')\n arr.append(token)\n\n for token in arr:\n th_arr.append(Process(target=push_message, args=(token, \"예약접수\", f\"{doc_ref.get().get(u'gameName')} 게임의 예약접수가 시작되었습니다.\")))\n for th in th_arr:\n th.start()\n for th in th_arr:\n th.join()\n return \"Success\"\n\n elif typeId == 2:\n stadiumId = target\n doc_ref = db.collection(u'stadium').document(stadiumId)\n ownerId = doc_ref.get().get('ownerId')\n owner_ref = db.collection(u'token').document(ownerId)\n token = owner_ref.get().get('token')\n title = \"예약접수\"\n body = \"예약접수가 들어왔습니다.\"\n push_message(token, title, body)\n return token\n\n else:\n gameId = target\n doc_ref = db.collection(u'game3').document(gameId)\n userList = doc_ref.get().get(u'userList')\n user_ref = db.collection(u'user').stream()\n\n arr = []\n th_arr = []\n for doc in user_ref:\n if (doc.to_dict()['email'] in userList):\n new_user_ref = db.collection(u'token').document(doc.id)\n token = new_user_ref.get().get(u'token')\n arr.append(token)\n\n for token in arr:\n th_arr.append(Process(target=push_message, args=(token, \"예약완료\", f\"{doc_ref.get().get(u'gameName')} 게임의 예약접수가 완료되었습니다.\")))\n for th in th_arr:\n th.start()\n for th in th_arr:\n th.join()\n return \"Success\"\n\n\ndef push_message(token, title, body):\n token = token\n message = messaging.Message(\n android=messaging.AndroidConfig(\n ttl=datetime.timedelta(seconds=3600),\n priority='normal',\n notification=messaging.AndroidNotification(\n title = title,\n body = body,\n icon='',\n color='#f45342',\n sound='default'\n )\n ),\n token = token\n )\n\n try:\n response = messaging.send(message)\n print(\"Successfully sent message:\", response)\n except Exception:\n print(Exception)\n\n return\n"
},
{
"alpha_fraction": 0.7789473533630371,
"alphanum_fraction": 0.7789473533630371,
"avg_line_length": 27.5,
"blob_id": "000bf9b6606c6796de26563328031e4c21ae24ea",
"content_id": "a5239d487bf29007ba6ae02867061eecc12a9420",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 285,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 10,
"path": "/django/gteamproject/gteamproject/views.py",
"repo_name": "kkdh7609/gteamserver",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n\nfrom django.http import HttpResponse\nfrom django.template import Context\nfrom django.template.loader import get_template\n\ndef index_view(req):\n template = get_template(\"index.html\")\n output = template.render()\n return HttpResponse(output)\n"
}
] | 5 |
AlexGokan/Interpreter | https://github.com/AlexGokan/Interpreter | 733282d7b1fab43835e036b8679648410f89cff2 | 6068ffb167e4dbe6d88ac1fc36c156e5b0400fb0 | 9ec137d2e1b1bc8f47bc82d3a2554326dc76d759 | refs/heads/master | 2020-05-23T11:42:39.826017 | 2019-05-17T19:41:02 | 2019-05-17T19:41:02 | 186,742,499 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5779796242713928,
"alphanum_fraction": 0.5799733996391296,
"avg_line_length": 19.70183563232422,
"blob_id": "7a4e179657f9137c1ac5a0e6c66327105bee27ff",
"content_id": "22cfe95284b5c9bffc0857655ad5d602acf5344c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4514,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 218,
"path": "/C/zeta.c",
"repo_name": "AlexGokan/Interpreter",
"src_encoding": "UTF-8",
"text": "\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <math.h>\n#include <string.h>\n#include \"defines.h\"\n#include \"scanner.c\"\n\ntypedef struct ast_var{\n double value;\n char* name;\n struct ast_var* next;\n struct ast_var* prev;\n}ast_var;\n\ntypedef struct var_table{\n struct ast_var* top;\n}var_table;\n\nstruct ast_var* new_ast_var(char* name){\n struct ast_var* n = malloc(1*sizeof(ast_var));\n n->name = name;\n n->value = 0.0;\n n->next = NULL;\n n->prev = NULL;\n}\n\nvoid free_var_table(struct var_table* VT){\n struct ast_var* v = VT->top;\n while(v != NULL){\n struct ast_var* tmp = v->next;\n free(v);\n v = tmp;\n }\n free(VT);\n}\n\nvoid print_all_variables(struct var_table* VT){\n print_seperator();\n struct ast_var* v = VT->top;\n while(v != NULL){\n printf(\"%s : %f\\n\",v->name,v->value);\n v = v->next;\n }\n print_seperator();\n}\n\nstruct ast_var* search_for_variable_in_table(struct var_table* VT, char* name){\n struct ast_var* T = VT->top;\n while(T != NULL){\n if(strcmp(T->name,name) == 0){\n return T;\n }\n T = T->next;\n }\n\n if(T == NULL){\n printf(\"could not find given variable in table\\n\");\n return NULL;\n }\n}\n\nvoid assign(struct var_table* VT, struct Token* T, char* name, double value){\n struct ast_var* v = search_for_variable_in_table(VT,name);\n\n if(v != NULL){\n v->value = value;\n }\n\n if(v == NULL){\n error_message(T->line,\"variable was not declared\");\n }\n}\n\ndouble evaluate(struct Token* head){\n struct Token* T = head;\n double result = 0;\n while(head->type != SEMICOLON){\n if(head->type == NUMBER){\n result = head->num_literal;\n }\n else{\n printf(\"dont know how to handle this yet, will come back to it\");\n break;\n }\n head = head->next;\n }\n return result;\n}\n\nvoid parse_for_assignments(struct Scanner* S, struct var_table* VT){\n struct Token* T = S->tokens;\n\n while(T->type != EOFTOKEN){\n if(T->type == VARIABLE){\n struct ast_var* var_to_search_for = search_for_variable_in_table(VT,T->string_literal);\n if(var_to_search_for == NULL){\n error_message(T->line,\"attempting to assign an undeclared variable\");\n }\n else{\n if(T->next != NULL & T->next->type == EQUAL){\n double value_to_assign = evaluate(T->next->next);\n assign(VT,T,T->string_literal,value_to_assign);\n }\n // else{\n // error_message(T->line, \"variable name must be followed by an assignment operator\");\n // }\n //idk why this error is popping up, even though it is assigning fine\n }\n }\n\n T = T->next;\n }\n}\n\nstruct var_table* parse_for_declarations(struct Scanner* S){\n struct Token* T = S->tokens;\n\n struct var_table* VT = malloc(sizeof(var_table));\n VT->top = NULL;\n\n while(T->type != EOFTOKEN){\n if(T->type == KEYWORD_VAR){\n if(T->next != NULL & T->next->type == VARIABLE){\n if(T->next->next != NULL & T->next->next->type == SEMICOLON){\n printf(\"declared a variable %s\\n\",T->next->string_literal);\n struct ast_var* v = new_ast_var(T->next->string_literal);\n struct ast_var* VT_head = VT->top;\n\n v->next = VT_head;\n if(VT_head != NULL){\n VT_head->prev = v;\n }\n VT->top = v;\n }\n else{\n error_message(T->line,\"missing semicolon\");\n }\n }\n else{\n error_message(T->line,\"keyword \\'var\\' must be followed by a variable name\");\n }\n }\n\n T = T->next;\n }\n\n return VT;\n\n}\n\n\nvoid parse_tokens(struct Scanner* S){\n struct Token* T = S->tokens;\n struct Token* curr = T;\n\n struct var_table* VT = parse_for_declarations(S);\n\n print_all_variables(VT);\n\n parse_for_assignments(S,VT);\n\n print_all_variables(VT);\n\n free_var_table(VT);\n\n\n}\n\n\nvoid run_file(char* filename){\n FILE* fp = fopen(filename,\"r\");\n\n fseek(fp,0,SEEK_END);\n int file_size_bytes = ftell(fp);\n rewind(fp);\n\n char* source = malloc(file_size_bytes * sizeof(char));\n\n for(int i=0; i<file_size_bytes; i++){\n char c = fgetc(fp);\n source[i] = c;\n }\n\n struct Scanner* S = new_scanner(source);\n S->num_chars_in_input = file_size_bytes;\n\n\n\n\n struct Token* token_list = scan_tokens(S);\n // token_list = S->tokens;\n\n print_all_tokens(S);\n\n\n parse_tokens(S);\n\n\n free_tokens(S);\n free(source);\n free(S);\n fclose(fp);\n}\n\nvoid main(int argc, char* argv[]){\n if(argc != 2){\n printf(\"enter ONE input file\");\n return;\n }\n\n char* filename = argv[1];\n\n run_file(filename);\n\n return;\n\n}\n"
},
{
"alpha_fraction": 0.6876404285430908,
"alphanum_fraction": 0.7910112142562866,
"avg_line_length": 19.674419403076172,
"blob_id": "30d512182601576d0feb673518253eb4b0ee6486",
"content_id": "c0f54c2fbfc13c1b07a1abcdbfea28eb4cdefa86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 890,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 43,
"path": "/C/defines.h",
"repo_name": "AlexGokan/Interpreter",
"src_encoding": "UTF-8",
"text": "\n#define LPAREN 1\n#define RPAREN 2\n#define LBRACKET 3\n#define RBRACKET 4\n#define COMMA 5\n#define DOT 6\n#define MINUS 7\n#define PLUS 8\n#define SEMICOLON 9\n#define STAR 10\n#define EXLAMATION 11\n#define EQUAL 12\n#define LESSTHAN 13\n#define GREATERTHAN 14\n#define NOTEQUAL 15\n#define EQUALEQUAL 16\n#define LESSEQUAL 17\n#define GREATEREQUAL 18\n#define SLASH 19\n#define SLASHSLASH 20\n#define NUMBER 21\n#define STRING 22\n#define EOFTOKEN 23\n#define LCURLY 24\n#define RCURLY 25\n\n#define VARIABLE 100\n#define KEYWORD_AND 101\n#define KEYWORD_CLASS 102\n#define KEYWORD_ELSE 103\n#define KEYWORD_FALSE 104\n#define KEYWORD_FOR 105\n#define KEYWORD_FUN 106\n#define KEYWORD_IF 107\n#define KEYWORD_NIL 108\n#define KEYWORD_OR 109\n#define KEYWORD_PRINT 110\n#define KEYWORD_RETURN 111\n#define KEYWORD_SUPER 112\n#define KEYWORD_THIS 113\n#define KEYWORD_TRUE 114\n#define KEYWORD_VAR 115\n#define KEYWORD_WHILE 116\n"
},
{
"alpha_fraction": 0.5048471093177795,
"alphanum_fraction": 0.5184829831123352,
"avg_line_length": 19.313852310180664,
"blob_id": "08c2392a731eef9eabf1051972f9f35e07948062",
"content_id": "959252a530b48a2fa9acd16b84890c99b6f9168c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 9387,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 462,
"path": "/C/scanner.c",
"repo_name": "AlexGokan/Interpreter",
"src_encoding": "UTF-8",
"text": "\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <math.h>\n#include <string.h>\n#include \"defines.h\"\n\nvoid print_seperator(){\n printf(\"--------------------------------------------------------------------\\n\");\n}\n\ntypedef struct Token{\n int type;\n char* lexeme;\n int line;\n\n char* string_literal;\n int string_length;\n double num_literal;\n struct Token* next;//to make a linked list of tokens\n struct Token* prev;\n //oh god what have I done\n}Token;\n\ntypedef struct Scanner{\n int start;\n int current;\n int line;\n\n char* source;\n struct Token* tokens;\n struct Token* tail;\n int num_chars_in_input;\n}Scanner;\n\nvoid print_all_tokens(struct Scanner* S){\n print_seperator();\n struct Token* t = S->tokens;\n while(t != NULL){\n printf(\"Token of type: %d \",t->type);\n if(t->type == NUMBER){\n printf(\"%f\",t->num_literal);\n }\n if(t->type == EOFTOKEN){\n printf(\" (EOF)\");\n }\n if(t->type == EQUAL){\n printf(\" = \");\n }\n if(t->type == STRING){\n for(int i=0; i<t->string_length; i++){\n printf(\"%c\",(t->string_literal)[i]);\n }\n }\n if(t->type == VARIABLE){\n printf(\"(var) \");\n }\n if((t->type <=116) & (t->type >=100)){\n for(int i=0; i<t->string_length; i++){\n printf(\"%c\",(t->string_literal)[i]);\n }\n }\n\n printf(\"\\n\");\n t = t->next;\n }\n printf(\"--------------------------------------------------------------------\\n\");\n}\n\nbool is_alphabetic(char c){\n if((c>='a' & c<='z') | (c >='A' & c<='Z') | (c=='_')){\n return true;\n }\n return false;\n}\n\nbool is_digit(char c){\n if(c>=48 & c<=57){\n return true;\n }\n return false;\n}\n\nbool is_alphanumeric(char c){\n return (is_digit(c) | is_alphabetic(c));\n}\n\nstruct Token* new_token(int type, char* lexeme, int line, char* string_literal, int string_length, double num_literal){\n struct Token* T = malloc(1*sizeof(Token));\n T->type = type;\n T->lexeme = lexeme;\n T->line = line;\n T->string_literal = string_literal;\n T->string_length = string_length;\n T->num_literal = num_literal;\n T->next = NULL;\n T->prev = NULL;\n return T;\n}\n\nstruct Scanner* new_scanner(char* source){\n struct Scanner* S = calloc(1,sizeof(Scanner));\n S->source = source;\n S->start = 0;\n S->current = 0;\n S->line = 1;\n return S;\n}\n\nvoid error_message(int line, char* message){\n printf(\"ERROR : line %d : %s\\n\",line,message);\n}\n\n\nbool is_at_end(struct Scanner* S){\n return(S->current >= S->num_chars_in_input);\n}\n\nvoid free_tokens(struct Scanner* S){\n struct Token* T = S->tokens;\n while(T != NULL){\n struct Token* tmp = T->next;\n free(T->lexeme);\n free(T->string_literal);\n free(T);\n T = tmp;\n }\n}\n\nvoid tokens_append(struct Scanner* S,struct Token* t){\n if(S->tail != NULL){\n S->tail->next = t;\n t->prev = S->tail;\n S->tail = t;\n }\n else{\n S->tokens = t;\n S->tail = t;\n }\n\n}\n\nvoid add_token(struct Scanner* S, int type, char* string_literal, int string_length, double num_literal){\n int len = (S->current) - (S->start);\n // printf(\"len: %d\\n\",len);\n if(len < 0){\n printf(\"invalid length\\n\");\n return;\n }\n char* text = malloc(len*sizeof(char));\n int idx = 0;\n for(int i=S->start; i<S->current; i++){\n text[idx] = (S->source)[i];\n idx++;\n }\n struct Token* nt = new_token(type,text,S->line,string_literal,string_length,num_literal);\n tokens_append(S,nt);\n}\n\n\n\nchar advance(struct Scanner* S){\n (S->current)++;\n return (S->source)[(S->current)-1];\n}\n\nbool match(struct Scanner* S, char expected){\n if(is_at_end(S)){\n return false;\n }\n if((S->source)[S->current] != expected){\n return false;\n }\n (S->current)++;\n return true;\n}\n\nchar peek(struct Scanner* S){\n if(is_at_end(S)){\n return '\\0';\n }\n return (S->source)[S->current];\n}\n\nchar peek_next(struct Scanner* S){\n if((S->current)+1 >= S->num_chars_in_input){\n return 0;\n }\n return (S->source)[S->current+1];\n}\n\nvoid scan_number(struct Scanner* S){\n\n while(is_digit(peek(S))){\n advance(S);\n }\n\n if((peek(S) == 46) & (is_digit(peek_next(S)))){\n advance(S);\n while(is_digit(peek(S))){\n advance(S);\n }\n }\n //from start to current contains the chars for the value\n int num_digits = (S->current) - (S->start);\n // printf(\"%d digits\\n\",num_digits);\n int i=S->start;\n for(i=S->start; i<S->current; i++){\n if((S->source)[i] == 46){\n //hit a decimal point\n break;\n }\n }\n\n //upper from start to i\n //lower from i+1 to current\n double output = 0;\n int power = 0;\n int d;\n for(d=i-1; d>(S->start)-1; d--){\n int digit = ((S->source)[d])-48;\n // printf(\"encountered %d\\n\",digit);\n output += (digit * pow(10,power));\n power++;\n }\n // printf(\"upper\\n\");\n\n// printf(\"d: %d start %d current %d i %d\\n\",d,S->start,S->current,i);\n\n\n\n power = -1;\n for(int d=i+1; d<(S->current); d++){\n int digit = ((S->source)[d])-48;\n output += (digit * pow(10,power));\n power--;\n }\n\n // printf(\"lower\\n\");\n // printf(\"%lf\\n\",output);\n\n add_token(S,NUMBER,NULL,0,output);\n\n}\n\nvoid scan_string(struct Scanner* S){\n while(peek(S) != 34 & (is_at_end(S) == false)){\n if(peek(S) == 10){\n (S->line)++;\n }\n advance(S);\n }\n\n if(is_at_end(S)){\n error_message(S->line,\"unterminated string\");\n return;\n }\n\n advance(S);\n\n int len = ((S->current)-1) - ((S->start)+1);\n char* text = malloc(len*sizeof(char));\n int idx = 0;\n for(int i=(S->start)+1; i<(S->current)-1; i++){\n text[idx] = (S->source)[i];\n idx++;\n }\n\n // for(int i=0; i<len; i++){\n // printf(\"%c\",text[i]);\n // }\n // printf(\"\\n\");\n\n add_token(S,STRING,text,len,0);\n\n}\n\nvoid scan_identifier(struct Scanner* S){\n while(is_alphanumeric(peek(S))){\n advance(S);\n }\n int len = (S->current) - (S->start);\n char* text = malloc((len+1)*sizeof(char));\n int idx = 0;\n for(int i=S->start; i<S->current; i++){\n text[idx] = (S->source)[i];\n idx++;\n }\n text[len] = '\\0';//to make it a string!\n\n // printf(\"text 0: %d\\n\",text[0]);\n\n int type;\n if((text != NULL) & (text[0] != '\\0')){\n if(strcmp(text,\"and\") == 0){\n type = KEYWORD_AND;\n }\n else if(strcmp(text,\"class\") == 0){\n type = KEYWORD_CLASS;\n }\n else if(strcmp(text,\"else\") == 0){\n type = KEYWORD_ELSE;\n }\n else if(strcmp(text,\"false\") == 0){\n type = KEYWORD_FALSE;\n }\n else if(strcmp(text,\"for\") == 0){\n type = KEYWORD_FOR;\n }\n else if(strcmp(text,\"fun\") == 0){\n type = KEYWORD_FUN;\n }\n else if(strcmp(text,\"if\") == 0){\n type = KEYWORD_IF;\n }\n else if(strcmp(text,\"nil\") == 0){\n type = KEYWORD_NIL;\n }\n else if(strcmp(text,\"or\") == 0){\n type = KEYWORD_OR;\n }\n else if(strcmp(text,\"print\") == 0){\n type = KEYWORD_PRINT;\n }\n else if(strcmp(text,\"return\") == 0){\n type = KEYWORD_RETURN;\n }\n else if(strcmp(text,\"super\") == 0){\n type = KEYWORD_SUPER;\n }\n else if(strcmp(text,\"this\") == 0){\n type = KEYWORD_THIS;\n }\n else if(strcmp(text,\"true\") == 0){\n type = KEYWORD_TRUE;\n }\n else if(strcmp(text,\"var\") == 0){\n type = KEYWORD_VAR;\n }\n else if(strcmp(text,\"while\") == 0){\n type = KEYWORD_WHILE;\n }\n else{\n type = VARIABLE;\n }\n\n\n add_token(S,type,text,len,0);\n\n }\n\n}\n\nvoid scan_token(struct Scanner* S){\n char c = advance(S);\n // printf(\"%d\\n\",c);\n switch(c){\n case '(':\n add_token(S,LPAREN,NULL,0,0);\n break;\n case ')':\n add_token(S,RPAREN,NULL,0,0);\n break;\n case '}':\n add_token(S,RCURLY,NULL,0,0);\n break;\n case '{':\n add_token(S,LCURLY,NULL,0,0);\n break;\n case '[':\n add_token(S,LBRACKET,NULL,0,0);\n break;\n case ']':\n add_token(S,RBRACKET,NULL,0,0);\n case ',':\n add_token(S,COMMA,NULL,0,0);\n break;\n case '.':\n add_token(S,DOT,NULL,0,0);\n break;\n case '-':\n add_token(S,MINUS,NULL,0,0);\n break;\n case '+':\n add_token(S,PLUS,NULL,0,0);\n break;\n case ';':\n add_token(S,SEMICOLON,NULL,0,0);\n break;\n case '*':\n add_token(S,STAR,NULL,0,0);\n break;\n case '!':\n if(match(S,'=')){\n add_token(S,NOTEQUAL,NULL,0,0);\n }\n else{\n add_token(S,EXLAMATION,NULL,0,0);\n }\n break;\n case '=':\n if(match(S,'=')){\n add_token(S,EQUALEQUAL,NULL,0,0);\n }\n else{\n add_token(S,EQUAL,NULL,0,0);\n }\n break;\n case '<':\n if(match(S,'=')){\n add_token(S,LESSEQUAL,NULL,0,0);\n }\n else{\n add_token(S,LESSTHAN,NULL,0,0);\n }\n break;\n case '>':\n if(match(S,'=')){\n add_token(S,GREATEREQUAL,NULL,0,0);\n }\n else{\n add_token(S,GREATERTHAN,NULL,0,0);\n }\n break;\n case 32:\n break;\n case 9:\n break;\n case 13:\n break;\n case '\\n':\n (S->line)++;\n break;\n default:\n if(c=='\\\"'){\n scan_string(S);\n }\n else if(is_digit(c)){\n scan_number(S);\n }\n else if(is_alphabetic(c)){\n scan_identifier(S);\n }\n else{\n error_message(S->line,\"unexpected character\");\n }\n break;\n\n\n }\n\n\n}\n\nstruct Token* scan_tokens(struct Scanner* S){\n printf(\"scanning tokens\\n\");\n while(!is_at_end(S)){\n S->start = S->current;\n scan_token(S);\n }\n tokens_append(S,new_token(EOFTOKEN,0,0,NULL,0,0));\n}\n"
},
{
"alpha_fraction": 0.4654516577720642,
"alphanum_fraction": 0.48335975408554077,
"avg_line_length": 26.55021858215332,
"blob_id": "5961dceb2f311584a70485263a48b1061563db02",
"content_id": "2d31b7c02b8d0046d889917f147ab0aa1c6671d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6310,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 229,
"path": "/scanner.py",
"repo_name": "AlexGokan/Interpreter",
"src_encoding": "UTF-8",
"text": "\nclass Scanner:\n def __init__(self,byte_arr):\n self.start = 0\n self.current = 0\n self.line = 1\n\n self.byte_arr = byte_arr#called 'source' in book\n self.tokens = []\n\n self.keywords = [b'and',b'class',b'else',b'false',b'for',b'fun',b'if',b'nil',b'or',b'print',b'return',b'super',b'this',b'true',b'var',b'while']\n\n\n def print_byte_arr(self):\n for i in range(0,len(self.byte_arr)):\n print(self.byte_arr[i])\n\n def is_at_end(self):\n return (self.current >= len(self.byte_arr))\n\n def scan_tokens(self):\n print(\"scanning tokens\")\n while(self.is_at_end() == False):\n self.start = self.current\n self.scan_token()\n\n\n (self.tokens).append(Token(\"EOF\",\"\",None,None))#I think?\n #in book it is (EOF,\"\",\"NULL\",\"line\")\n return(self.tokens)\n #must return some list of token objects\n\n def scan_token(self):\n c = self.advance()\n if(c==40):\n self.add_token('(',None)\n elif(c==41):\n self.add_token(')',None)\n elif(c==123):\n self.add_token('{',None)\n elif(c==125):\n self.add_token('}',None)\n elif(c==44):\n self.add_token(',',None)\n elif(c==46):\n self.add_token('.',None)\n elif(c==45):\n self.add_token('-',None)\n elif(c==43):\n self.add_token('+',None)\n elif(c==59):\n self.add_token(';',None)\n elif(c==42):\n self.add_token('*',None)\n elif(c==33):\n if(self.match(61)):\n self.add_token('!=',None)\n else:\n self.add_token('!',None)\n elif(c==61):\n if(self.match(61)):\n self.add_token('==',None)\n else:\n self.add_token('=',None)\n elif(c==60):\n if(self.match(61)):\n self.add_token('<=',None)\n else:\n self.add_token('<',None)\n elif(c==62):\n if(self.match(61)):\n self.add_token('>=',None)\n else:\n self.add_token('>',None)\n elif(c==47):#comment\n if(self.match(47)):\n while(self.peek() != '\\n' and (self.is_at_end() == False)):\n self.advance()\n else:\n self.add_token('/',None)\n elif(c==32):#space\n pass\n elif(c==9):#tab\n pass\n elif(c==13):#carriage return\n pass\n elif(c==10):#newline\n self.line += 1\n elif(c==34):#double quotes\n self.scan_string()\n elif(self.is_digit(c)):\n self.scan_number()\n elif(self.is_alphabetic(c)):\n self.scan_indentifier()\n else:\n Z.error_message(self.line,\"unexpected character\")\n\n def scan_indentifier(self):\n while(self.is_alphanumeric(self.peek())):\n self.advance()\n\n text = self.byte_arr[self.start:self.current:1]\n text = bytes(text)\n print(text)\n if(text in self.keywords):\n print(\"keyword detected\")\n type = text\n else:\n type = 'identifier'\n\n self.add_token(type,None)\n\n def is_alphabetic(self,c):\n if((c >= 97 and c <= 122) or (c >= 65 and c<=90) or (c == 95)):\n return True\n else:\n return False\n\n def is_alphanumeric(self,c):\n return (self.is_alphabetic(c) or self.is_digit(c))\n\n def scan_number(self):\n while(self.is_digit(self.peek())):\n self.advance()\n\n if(self.peek() == 46 and self.is_digit(self.peek_next())):\n self.advance()\n\n while(self.is_digit(self.peek())):\n self.advance()\n\n value = self.byte_arr[self.start:self.current:1]\n value = self.array_to_double(value)\n self.add_token('number',value)\n\n\n def array_to_double(self,v):\n num_digits = len(v)\n # print(v)\n for i in range(0,num_digits):\n if(v[i]==46):\n break\n upper = v[0:i:1]\n lower = v[i+1:num_digits:1]\n # print(upper)\n # print(lower)\n\n output = 0\n power = 0\n for d in reversed(upper):\n d = d-48\n output += (d*(10**power))\n power += 1\n\n power = -1\n for d in lower:\n d = d-48\n output += (d*(10**power))\n power -= 1\n\n # print(output)\n return output\n\n\n def is_digit(self,c):\n return ((c >= 48) and (c <= 57))\n\n def advance(self):\n self.current = self.current+1\n return (self.byte_arr)[self.current-1]\n\n def match(self,expected):\n if(self.is_at_end()):\n return False\n if((self.byte_arr)[self.current] != expected):\n return False\n\n self.current += 1\n return True\n\n def peek(self):\n if(self.is_at_end()):\n return '\\0'\n return (self.byte_arr)[self.current]\n\n def peek_next(self):\n if(self.current+1 >= len(self.byte_arr)):\n return 0\n return self.byte_arr[self.current+1]\n\n def scan_string(self):\n while(self.peek() != 34 and (self.is_at_end() == False)):\n if(self.peek() == 10):\n self.line += 1\n self.advance()\n\n if(self.is_at_end()):\n Z.error_message(self.line,\"unterminated string\")\n return\n\n self.advance()\n\n value = self.byte_arr[self.start+1:self.current-1:1]\n value_bytes = bytes(value)\n print(value_bytes)\n # for i in range(0,len(value)):\n # integer_val = value[i]\n # byte_val = byte(integer_val)\n # value_bytes[i] = byte_val\n\n # b''.join(value_bytes).decode('utf-8')\n\n self.add_token('string',value_bytes)\n # def add_token(type):\n # add_token(type,None)\n\n def add_token(self,type,literal):\n # print(\"adding token\")\n text = (self.byte_arr)[self.start:self.current:1]\n (self.tokens).append(Token(type,text,literal,self.line))\n\n\n\n\nclass Token:\n def __init__(self,type,lexeme,literal,line):\n self.type = type\n self.lexeme = lexeme\n self.literal = literal\n self.line = line\n"
},
{
"alpha_fraction": 0.5626413226127625,
"alphanum_fraction": 0.5658073425292969,
"avg_line_length": 17.12295150756836,
"blob_id": "f7f67d43cea9f9ebca4776ae7f388e02692884c7",
"content_id": "be77b14652b1cf3069437b358da1f735eda2edfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2211,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 122,
"path": "/zeta.py",
"repo_name": "AlexGokan/Interpreter",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nfrom array import array\nfrom enum import Enum\nfrom abc import ABC, abstractmethod\nimport scanner#scanner.py\n\n\nclass Zeta:\n def __init__(self):\n self.has_error = False\n\n def error_message(self,line,message):\n print(\"ERROR: line\",line,\":\",message)\n self.has_error = True\n return\n\n\nZ = Zeta()\n\n\n\ndef main():\n\n if(len(sys.argv) > 2):\n print(\"Usage: zeta [script]\")\n exit()\n elif(len(sys.argv) == 2):\n filename = sys.argv[1]\n if(filename[-2:] != \".z\"):\n print(\"please provide a .z file\")\n exit()\n run_file(filename)\n else:\n print(\"please provide ONE input file\")\n exit()\n\n return\n\n\ndef run_file(filename):\n file_size_bytes = os.path.getsize(filename)\n f = open(filename,\"rb\")\n # b = array('b')\n b = []\n\n for i in range(0,file_size_bytes):\n #print(f.read(1))\n c = f.read(1)\n b.append(int.from_bytes(c,\"big\"))\n\n\n\n f.close\n\n s = Scanner(b)\n token_list = s.scan_tokens()\n\n # for t in self.token_list:\n # print(t.type)\n\n print_all_tokens(token_list)\n\n\n\n run(b)\n\n if(Z.has_error):\n exit()\n\n return\n\ndef print_all_tokens(token_list):\n for t in token_list:\n print(t.type,end=' ')\n if(t.type == 'number' or t.type=='string' or t.type=='identifier'):\n print(t.literal,end='')\n print('')\n\n\n\n\ndef run(byte_arr):\n pass\n return\n\n\nclass Expression(ABC):\n def __init__(self):\n pass\n\nclass Binary(Expression, left, operator, right):\n def __init__(self):\n self.left = left\n self.operator = operator\n self.right = right\n\nclass Unary(Expression,operator,right):\n def __init__(self):\n self.operator = operator\n self.right = tight\n\nclass Grouping(Expression,expression):\n def __init__(self):\n self.expression = expression\n\nclass Literal(Expression,value):\n def __init__(self):\n self.value = value\n\nclass Operator(Expression,value):\n def __init__(self):\n self.value = value\n\nclass tree_printer:\n def string_print(Expression expr):\n return expr.accept(self)\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n"
}
] | 5 |
lauranewland/gsdc-webapp-flask | https://github.com/lauranewland/gsdc-webapp-flask | d5ca26c9d8ec9430f4f64b914b400c66e6cb17c0 | 464acf930265f9dad238e56fb32d9e511d891d9a | 63461ffac36ec84648c251f16c1701e12a7b669d | refs/heads/master | 2023-01-28T21:07:02.638726 | 2020-12-08T20:24:31 | 2020-12-08T20:24:31 | 311,819,476 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4823399484157562,
"alphanum_fraction": 0.5224429965019226,
"avg_line_length": 54.48979568481445,
"blob_id": "89d472b73757a196e206bd82128e3c61509f5008",
"content_id": "dcd5f7dadadb092cf338612d7e4715b0d7c5133e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2718,
"license_type": "no_license",
"max_line_length": 628,
"num_lines": 49,
"path": "/templates/login_landing.html",
"repo_name": "lauranewland/gsdc-webapp-flask",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n{% block title %}Create User{% endblock %}\n{% block body %}\n\n <h2>Welcome {{ name }}!!</h2>\n\n\n <div class=\"container4 custom-control-inline\">\n <div class=\"container4 custom-control-inline\">\n <h4>2019 Club Officers</h4>\n <ul>\n <li>President: Mario Speedwagon - [email protected]</li>\n <li>Vice-President: Sue Vaneer - [email protected]</li>\n <li>Secretary: Mylee Yu - [email protected]</li>\n <li>Treasurer: Laura Meyer - [email protected] </li>\n <li>Board Member: Laura Newland - [email protected]</li>\n <li>Board Member: Bob Frapples - [email protected]</li>\n <li>Board Member: Walter Melon - [email protected]</li>\n </ul>\n <br>\n <h4>Club Docs</h4>\n <ul>\n <li> <a href=\"#\">Listing of Past Board and Membership Minutes</a></li>\n <li><a href=\"#\">Constitution & Bylaws</a></li>\n <li><a href=\"#\">Standing Rules</a></li>\n <li><a href=\"#\">Expense Reimbursement Form</a></li>\n <li><a href=\"#\">Membership Application</a></li>\n <li><a href=\"#\">Submit A Litter Announcement</a></li>\n </ul>\n </div>\n <div class=\"container5 custom-control-inline\">\n <h4>December Events</h4>\n <ul>\n <li>December 1st: Board Meeting</li>\n <li>December 4th: Special Training Block-Obedience</li>\n <li>December 4th: Membership Meeting</li>\n <li>December 11th: Special Training Block-Obedience</li>\n <li>December 18th: Graduation!-Obedience</li>\n <li>December 25th: No Training</li>\n </ul>\n </div>\n\n <div class=\"container6 custom-control-inline\">\n <div class=\"calendar row-col-6\">\n <iframe src=\"https://calendar.google.com/calendar/embed?height=600&wkst=1&bgcolor=%23ffffff&ctz=America%2FChicago&src=YzZxcnNzNmI1ZTRxOGxiMTE0b3JmNjY5MG9AZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ&src=MTc5NHZ2MTMzMmhjMGUwNGNmMTNpM3UzZGdAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ&src=Y2I5dWxtY3E1YXVzZGp0dGN0bWx1Y2VtMXNAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ&src=ZW4udXNhI2hvbGlkYXlAZ3JvdXAudi5jYWxlbmRhci5nb29nbGUuY29t&color=%234285F4&color=%23EF6C00&color=%23B39DDB&color=%23E4C441\" style=\"border:solid 1px #777\" width=\"800\" height=\"600\" frameborder=\"0\" scrolling=\"no\"></iframe>\n </div>\n </div>\n </div>\n{% endblock %}"
},
{
"alpha_fraction": 0.6524520516395569,
"alphanum_fraction": 0.6652451753616333,
"avg_line_length": 34.17499923706055,
"blob_id": "55ea03397d1b1a1a92d24ac52389d0161e6c9577",
"content_id": "c695fea5c43610e357e31b7c25663d0b5761d440",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2814,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 80,
"path": "/model.py",
"repo_name": "lauranewland/gsdc-webapp-flask",
"src_encoding": "UTF-8",
"text": "from flask_sqlalchemy import SQLAlchemy\nfrom flask_login import UserMixin\ndb = SQLAlchemy()\n\n\ndef connect_to_db(flask_app):\n flask_app.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///./gsdc.db\"\n\n db.app = flask_app\n db.init_app(flask_app)\n\n # Creates database tables\n db.create_all()\n print('Connected to the db!')\n\n\nclass Users(db.Model, UserMixin):\n \"\"\"Data Model for a User\"\"\"\n\n # Creates a table of users\n __tablename__ = 'users'\n\n # Defines the Schema for the users table\n user_id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n fname = db.Column(db.String(50), nullable=False)\n lname = db.Column(db.String(50), nullable=False)\n email = db.Column(db.String(100), nullable=False, unique=True)\n address = db.Column(db.String(100), nullable=True)\n city = db.Column(db.String(50), nullable=True)\n zip_code = db.Column(db.String(10), nullable=True)\n phone = db.Column(db.String(15), nullable=True)\n pref_communication = db.Column(db.String(50), nullable=True)\n print_permissions = db.Column(db.String(5), nullable=True)\n password = db.Column(db.String(50), nullable=True)\n member_type = db.Column(db.String(100), nullable=True)\n member_standing = db.Column(db.String(25), default='Good')\n other_orgs = db.Column(db.Text)\n num_of_gsd = db.Column(db.Integer)\n num_breedings = db.Column(db.Integer)\n\n def get_id(self):\n return self.user_id\n\n # app_date = db.Column(Date)\n # # co_app_fname = db.Column(db.String(50))\n # # co_app_lname = db.Column(db.String(50))\n # # co_app_email = db.Column(db.String(100))\n\n def __repr__(self):\n return f'<user_id={self.user_id}, fname={self.fname}, lname={self.lname}>'\n\n\nclass Interest(db.Model):\n \"\"\"Data Model for User Interest\"\"\"\n\n # Creates a table of user interests\n __tablename__ = 'interests'\n\n # Defines the Schema for the users interest table\n interest_id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))\n obedience = db.Column(db.Boolean)\n rally = db.Column(db.Boolean)\n conformation = db.Column(db.Boolean)\n agility = db.Column(db.Boolean)\n herding = db.Column(db.Boolean)\n scentwork = db.Column(db.Boolean)\n fun_match = db.Column(db.Boolean)\n shep_o_gram = db.Column(db.Boolean)\n training = db.Column(db.Boolean)\n hospitality = db.Column(db.Boolean)\n fundraising = db.Column(db.Boolean)\n gsd_fun_day = db.Column(db.Boolean)\n demo_mn_fair = db.Column(db.Boolean)\n annual_banquet = db.Column(db.Boolean)\n breeding = db.Column(db.Boolean)\n other = db.Column(db.String(100))\n\n def __repr__(self):\n return f'<interest_id={self.interest_id}, obedience={self.obedience}, training={self.training}>'\n"
},
{
"alpha_fraction": 0.6648920774459839,
"alphanum_fraction": 0.6657553911209106,
"avg_line_length": 32.095237731933594,
"blob_id": "4e03412cfa68c362f9c063683b0999fc835d8fac",
"content_id": "15b238dc1c294e90d1bf675185c2c5fba31cdb4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6950,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 210,
"path": "/server.py",
"repo_name": "lauranewland/gsdc-webapp-flask",
"src_encoding": "UTF-8",
"text": "from flask import (Flask, render_template, request, flash, session, redirect, jsonify)\nfrom werkzeug.security import check_password_hash\nfrom model import connect_to_db, Users\nfrom jinja2 import StrictUndefined\nimport crud\nfrom flask_login import LoginManager, login_user, login_required, current_user, logout_user\n\n# Creates an instance of Flask\napp = Flask(__name__)\napp.secret_key = \"dev\"\napp.jinja_env.undefined = StrictUndefined\n\n# Creates an instance of Flask LoginManager\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'app.login'\nlogin_manager.init_app(app)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return Users.query.get(int(user_id))\n\n\[email protected]('/')\ndef homepage():\n \"\"\"Renders Homepage\"\"\"\n\n return render_template('homepage.html')\n\n\[email protected]('/users', methods=['POST'])\ndef register_user():\n \"\"\"Create a new user.\n\n .. note::\n Checks if the user email is already in the database.\n If yes, a flash message will note the email already exists\n if not, the user will be created\n \"\"\"\n\n fname = request.form.get('fname')\n lname = request.form.get('lname')\n email = request.form.get('email')\n address = request.form.get('address')\n city = request.form.get('city')\n zip_code = request.form.get('zip_code')\n phone = request.form.get('phone')\n pref_communication = request.form.get('pref_communication')\n print_permissions = request.form.get('print_permissions')\n member_type = request.form.get('member_type')\n password = request.form.get('password')\n other_orgs = request.form.get('other_orgs')\n num_of_gsd = request.form.get('num_of_gsd')\n num_breedings = request.form.get('num_breedings')\n\n obedience = bool(request.form.get('obedience'))\n rally = bool(request.form.get('rally'))\n conformation = bool(request.form.get('conformation'))\n agility = bool(request.form.get('agility'))\n herding = bool(request.form.get('herding'))\n scentwork = bool(request.form.get('scentwork'))\n fun_match = bool(request.form.get('fun_match'))\n shep_o_gram = bool(request.form.get('shep_o_gram'))\n training = bool(request.form.get('training'))\n hospitality = bool(request.form.get('hospitality'))\n fundraising = bool(request.form.get('fundraising'))\n gsd_fun_day = bool(request.form.get('gsd_fun_day'))\n demo_mn_fair = bool(request.form.get('demo_mn_fair'))\n annual_banquet = bool(request.form.get('annual_banquet'))\n breeding = bool(request.form.get('breeding'))\n other = request.form.get('other')\n\n # Queries database on the email address and stores all data in user\n user = crud.get_user_by_email(email)\n\n # Checks if a user account has been found in the database\n if user:\n # If so, flash a message that the email already exists\n flash('Email Already Exists.')\n return redirect('/signup')\n\n # Otherwise add a new user and their interest to the database\n else:\n new_user = crud.create_user(fname, lname, email, address, city, zip_code, phone, pref_communication,\n print_permissions, member_type, password, other_orgs, num_of_gsd, num_breedings)\n\n crud.create_user_interest(new_user.user_id, obedience, rally, conformation, agility, herding, scentwork,\n fun_match, shep_o_gram,training, hospitality, fundraising, gsd_fun_day, demo_mn_fair,\n annual_banquet, breeding, other)\n\n flash('Membership Application Submitted.')\n\n return redirect('/')\n\n\[email protected]('/signup')\ndef signup_page():\n \"\"\"Renders Membership Signup Page\"\"\"\n\n return render_template('membership_signup.html')\n\n\[email protected]('/user')\ndef all_users():\n user = crud.get_all_users()\n return render_template('all_users.html', user=user)\n\n\[email protected]('/search', methods=[\"GET\", \"POST\"])\ndef search_database():\n \"\"\"Takes in a request from Search.html and returns results\"\"\"\n\n # Takes in the search input\n user_input = request.form.get('meminput')\n print(user_input)\n\n users = crud.get_user_interest(user_input)\n print(users)\n\n if len(users) != 0:\n return render_template('search.html', name=current_user.fname, users=users)\n else:\n users = crud.get_user(user_input)\n print(users)\n return render_template('search.html', name=current_user.fname, users=users)\n\n\[email protected]('/login')\ndef login():\n\n return render_template('login.html')\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login_post():\n \"\"\"Takes in Users input and checks password & email matches the user in the database\n If a match, a user login session is created and the user is routed to the login_landing page\"\"\"\n\n # Takes in the users input\n email = request.form.get('useremail')\n print(email)\n password = request.form.get('upassword')\n print(password)\n remember = True if request.form.get('remember') else False\n\n # Sets the users variable to an empty list to be a place holder for the login_landing page\n users = []\n try:\n # Queries database on the email address and stores all data in user\n user = crud.get_user_by_email(email)\n print(user)\n # Checks if password and email the user input matches the database\n if check_password_hash(user.password, password):\n # Creates Session for the logged in user\n login_user(user, remember=remember)\n flash('Successful Login')\n\n # If users password does not match flash message\n else:\n flash('Password Incorrect')\n return redirect('/login')\n\n # If users email does not match flash message\n except AttributeError:\n flash('Email not found')\n return redirect('/login')\n\n # Renders the login_landing page and passes the logged in users first name\n return render_template('login_landing.html', name=current_user.fname, users=users)\n\n\[email protected]('/login_landing')\n@login_required\ndef login_landing():\n \"\"\"Renders Login Landing Page\"\"\"\n return render_template('login_landing.html', name=current_user.fname)\n\n\[email protected]('/isLoggedIn')\ndef logged_in():\n \"\"\"AJAX backed for log\"\"\"\n return jsonify(current_user.is_authenticated)\n\n\[email protected]('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect('/login')\n\n\n# @app.route('/interest', methods=[\"GET\", \"POST\"])\n# def search_user_interest():\n# \"\"\"Takes in a request from Search.html and returns results\"\"\"\n#\n# # Takes in the search input\n# user_input = request.form.get('memberInput')\n# print(user_input)\n#\n# # Queries the users input against the database\n# intresults = crud.get_user_interest(user_input)\n# print(intresults)\n#\n# # Passes the query results back to Search.html\n# return render_template('interest.html', intresults=intresults)\n\n\nif __name__ == '__main__':\n connect_to_db(app)\n app.run(host='0.0.0.0', debug=True)\n"
},
{
"alpha_fraction": 0.5886363387107849,
"alphanum_fraction": 0.5886363387107849,
"avg_line_length": 22.052631378173828,
"blob_id": "90f983f77db4e5ac7ddbc9ceb62b079b79bd3887",
"content_id": "565f9a69ce60008dd4436e9d828710072523fa62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 440,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 19,
"path": "/static/membership.js",
"repo_name": "lauranewland/gsdc-webapp-flask",
"src_encoding": "UTF-8",
"text": "'use strict';\n\n//Dark-Mode\nconst btn = document.querySelector(\".btn-toggle\");\n\nbtn.addEventListener(\"click\", function () {\n\n document.body.classList.toggle(\"dark-theme\");\n});\n\n// Display search form when button is clicked\n//$(document).ready(function(){\n// $(\"#visibility\").css(\"display\", \"none\");\n//\n// $(\"#search-btn\").click(function() {\n// alert(\"Yep\")\n// $(\"#visibility\").css(\"display\", \"block\");\n// });\n//});\n\n\n"
},
{
"alpha_fraction": 0.610919713973999,
"alphanum_fraction": 0.6117148399353027,
"avg_line_length": 36.72999954223633,
"blob_id": "e10eba92b3f4ca2632187d07e9a8db07026f283b",
"content_id": "fc7b9a7a217d56da0da60b0cdfd2bcef9d1f6c21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3773,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 100,
"path": "/crud.py",
"repo_name": "lauranewland/gsdc-webapp-flask",
"src_encoding": "UTF-8",
"text": "import sqlalchemy\nfrom flask import flash\n\nfrom model import Users, Interest, db, connect_to_db\nfrom werkzeug.security import generate_password_hash\n\n\ndef create_user(fname, lname, email, address, city, zip_code, phone, pref_communication, print_permissions,\n member_type, password, other_orgs, num_of_gsd, num_breedings):\n user = Users(fname=fname, lname=lname, email=email, address=address, city=city, zip_code=zip_code, phone=phone,\n pref_communication=pref_communication, print_permissions=print_permissions, member_type=member_type,\n password=generate_password_hash(password, method='sha256'), other_orgs=other_orgs,\n num_of_gsd=num_of_gsd, num_breedings=num_breedings)\n\n # Adds user interest to the database session\n db.session.add(user)\n\n # Commits user interest to the database\n db.session.commit()\n\n # Refreshes the database instances\n db.session.refresh(user)\n\n return user\n\n\ndef get_user_by_email(email):\n \"\"\"Queries a user by email\"\"\"\n\n return Users.query.filter(Users.email == email).first()\n\n\ndef get_all_users():\n \"\"\"Queries and returns all users\"\"\"\n return Users.query.all()\n\n\ndef get_user(user_input):\n \"\"\"Queries and returns a user\"\"\"\n return Users.query.filter((Users.fname == user_input) | (Users.lname == user_input)\n | (Users.email == user_input)\n | (Users.city == user_input)\n | (Users.phone == user_input)\n | (Users.pref_communication == user_input)\n | (Users.print_permissions == user_input)\n | (Users.member_type == user_input)\n | (Users.other_orgs == user_input)\n | (Users.num_of_gsd == user_input)\n | (Users.num_breedings == user_input)).all()\n\n\ndef create_user_interest(user_id, obedience, rally, conformation, agility, herding, scentwork, fun_match, shep_o_gram,\n training, hospitality, fundraising, gsd_fun_day, demo_mn_fair,\n annual_banquet, breeding, other):\n \"\"\"Creates a user interest\"\"\"\n\n interest = Interest(user_id=user_id, obedience=obedience, rally=rally, conformation=conformation, agility=agility,\n herding=herding, scentwork=scentwork, fun_match=fun_match, shep_o_gram=shep_o_gram,\n training=training, hospitality=hospitality, fundraising=fundraising, gsd_fun_day=gsd_fun_day,\n demo_mn_fair=demo_mn_fair, annual_banquet=annual_banquet, breeding=breeding, other=other)\n\n # Adds user interest to the database session\n db.session.add(interest)\n\n # Commits user interest to the database\n db.session.commit()\n\n # Refreshes the database instances\n db.session.refresh(interest)\n\n return interest\n\n\ndef get_user_interest(user_input):\n \"\"\"Queries an interest and returns members associated with it\"\"\"\n\n try:\n if user_input is None:\n \"\"\"User_input is going to be nothing when the page first renders \n this statement returns an empty list\"\"\"\n return []\n else:\n query = (\"SELECT * FROM users WHERE user_id IN \"\n f\"(SELECT user_id FROM interests WHERE {user_input} = true)\")\n\n # Executes the Query\n db_cursor = db.session.execute(query)\n\n return db_cursor.fetchall()\n\n # an Operational error is given when a column is not found, if that is the case return an empty list\n except sqlalchemy.exc.OperationalError:\n\n return []\n\n\nif __name__ == '__main__':\n from server import app\n\n connect_to_db(app)\n"
},
{
"alpha_fraction": 0.7727272510528564,
"alphanum_fraction": 0.7782931327819824,
"avg_line_length": 24.0930233001709,
"blob_id": "a89436b3107506b0b4ec9f050240fcd8dfeb125f",
"content_id": "a118f9d513933e246c5fa608effe68f5af748140",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1078,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 43,
"path": "/README.md",
"repo_name": "lauranewland/gsdc-webapp-flask",
"src_encoding": "UTF-8",
"text": "## Membership WebApp for the German Shepherd Dog Club\n\nThis webapp was created for the German Shepherd Dog Club of MSP & St. Paul to host their membership directory to reduce \ndiscrepancies when there is a change in Membership Chair. Another feature that I implemented is the ability to filter \nand search the membership directory so club members can easily find volunteers for Club hosted events.\n\n## Webapp Demo\n[](https://youtu.be/9ZXC5LPHSfc)\n\n## Technologies Used\nFront End\n- HTML\n- Javascript\n- JQuery\n- Ajax\n- Jinja\n\nStyling\n- Bootstrap\n- CSS\n\nBack End\n- Python\n- Flask\n- SQLAlchemy\n- SQLite\n\n## Key Features\n1.Account Creation and Membership Signup\n\n\n\n2.Login with Flask-login Libraries \n\n\n\n3.Search Membership based on a member's interest within the club\n\n\n\n4.Filter Membership Directory \n\n"
}
] | 6 |
joycejiawei/python-challenge | https://github.com/joycejiawei/python-challenge | 9f02456e118d7e7c480642286d491d44f640c920 | fa7ca3d0980c1d4e7aa22f3b3289c110df2afadb | 33389e1f59de6bd444d24011a781e17ffbb9c9e5 | refs/heads/master | 2020-05-20T09:45:40.343430 | 2019-05-11T18:16:36 | 2019-05-11T18:16:36 | 185,510,251 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6129985451698303,
"alphanum_fraction": 0.6302314400672913,
"avg_line_length": 33.05172348022461,
"blob_id": "1518a91cdd8112c86424235c17ce66f5e624cc0f",
"content_id": "957d5a14be4d2ca4a73fdbacbc4403f6f2bd0a7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2031,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 58,
"path": "/python-challenge/PyBank/main.py",
"repo_name": "joycejiawei/python-challenge",
"src_encoding": "UTF-8",
"text": "import csv\r\n\r\nfile_to_input = \"budget_data.csv\"\r\nfile_to_output = \"financial_analysis.txt\"\r\n\r\ntotal_months = 0\r\ntotal_profits = 0\r\nprev_profits = 0\r\nmonth_of_change = []\r\nprofits_change_list = []\r\ngreatest_increase = [\"\", 0]\r\ngreatest_decrease = [\"\", 9999999999999999999]\r\ntotal_profits = 0\r\n\r\nwith open(file_to_input) as profits_data:\r\n reader = csv.DictReader(profits_data)\r\n\r\n for row in reader:\r\n\r\n # The total number of months included in the dataset\r\n total_months = total_months + 1\r\n\r\n # The net total amount of \"Profit/Losses\" over the entire period\r\n total_profits = total_profits + int(row[\"Profit/Losses\"])\r\n\r\n # Track the profit/losses change\r\n profits_change = int(row[\"Profit/Losses\"]) - prev_profits\r\n prev_profits = int(row[\"Profit/Losses\"])\r\n profits_change_list = profits_change_list + [profits_change]\r\n month_of_change = month_of_change + [row[\"Date\"]]\r\n\r\n # The greatest increase in profits (date and amount) over the entire period\r\n if (profits_change > greatest_increase[1]):\r\n greatest_increase[0] = row[\"Date\"]\r\n greatest_increase[1] = profits_change\r\n\r\n # The greatest decrease in losses (date and amount) over the entire period\r\n if (profits_change < greatest_decrease[1]):\r\n greatest_decrease[0] = row[\"Date\"]\r\n greatest_decrease[1] = profits_change\r\n\r\n\r\n# The average of the changes in \"Profit/Losses\" over the entire period\r\nprofits_avg = sum(profits_change_list) / len(profits_change_list)\r\n\r\noutput = (\r\n f\"\\nFinancial Analysis\\n\"\r\n f\"----------------------------\\n\"\r\n f\"Total Months: {total_months}\\n\"\r\n f\"Total: ${total_proftis}\\n\"\r\n f\"Average Change: ${profits_avg}\\n\"\r\n f\"Greatest Increase in Profits: {greatest_increase[0]} (${greatest_increase[1]})\\n\"\r\n f\"Greatest Decrease in Profits: {greatest_decrease[0]} (${greatest_decrease[1]})\\n\")\r\n\r\nprint(output)\r\n\r\nwith open(file_to_output, \"w\") as txt_file:\r\n txt_file.write(output)"
}
] | 1 |
BingFong/GA_3 | https://github.com/BingFong/GA_3 | cc4a0c334bf440e3d54c63728c8cc886deea7e4e | 3fac9abc9c3fb3555481e8791c06fb31d95edb0a | 18a0be950eb652d812483ad0db548eb4fcf25709 | refs/heads/master | 2020-05-19T13:25:54.843370 | 2019-05-13T02:06:20 | 2019-05-13T02:06:20 | 185,039,593 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4605637490749359,
"alphanum_fraction": 0.49715542793273926,
"avg_line_length": 31.34482765197754,
"blob_id": "5be59583c788769afeda35f91ba92f7a46c036ca",
"content_id": "0d7393dfab515dcfd9e2c799720a57396c4c0dac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7778,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 232,
"path": "/GA_3_v2.py",
"repo_name": "BingFong/GA_3",
"src_encoding": "UTF-8",
"text": "def mutation():\r\n global x,PS,mutation_num\r\n mutation_count =0\r\n for i in range(PS,PS+taguchi_num):\r\n probability = np.random.uniform()\r\n if (probability <= mutationRate):\r\n mutation_num +=1\r\n chosen_num = np.random.randint(0,8) #chose gene\r\n chosen_cro = np.random.randint(0,PS+taguchi_num) #chose cromosome\r\n x[PS+taguchi_num+mutation_count] = x[i]\r\n x[PS+taguchi_num+mutation_count,chosen_num] = (x[i,chosen_num] + x[chosen_cro,chosen_num])*0.5\r\n \r\n mutation_count+=1\r\n \r\ndef taguchi_method(x_tmp):\r\n global fit_cro,taguchi_fitSum,taguchi_xNum,taguchi_num\r\n x_result = np.zeros([int(taguchi_num),gene])\r\n fit_result = np.zeros([int(taguchi_num)])\r\n \r\n for i in range(0, taguchi_xNum, 2):\r\n x_cro = np.zeros([2,gene])\r\n x_taguchi = np.zeros([16,gene])\r\n fit_cro = np.zeros([16,2])\r\n x_cro[:] = x_tmp[i:i+2]\r\n \r\n for j in range(16): #16 experiments\r\n for k in range(gene):\r\n x_taguchi[j,k] = x_cro[int(taguchi_chart[j,k])-1, k]\r\n fit_tmp = cal_fitness(x_taguchi, 16) #fitness of 16 experiments\r\n fit_cro[:16,0] = fit_tmp[:16]\r\n fit_cro[:16,1] = np.log(1/(fit_cro[:16,0]*fit_cro[:16,0]))\r\n \r\n #compaire\r\n taguchi_fitSum[2] = -10000\r\n x_new = np.zeros([1,gene])\r\n \r\n for m in range(8): #levels for 8 gene\r\n for n in range(16): #indices of levels\r\n if(taguchi_chart[n,m] == 1):\r\n taguchi_fitSum[0,m] += fit_cro[n,1]\r\n else:\r\n taguchi_fitSum[1,m] += fit_cro[n,1]\r\n if (taguchi_fitSum[0,m] > taguchi_fitSum[1,m]):\r\n taguchi_fitSum[2,m] = 0\r\n else: \r\n taguchi_fitSum[2,m] = 1\r\n x_new[0,m] = x_cro[int(taguchi_fitSum[2,m]),m]\r\n\r\n fit_new = cal_fitness(x_new, 1)\r\n if(fit_new < np.min(fit_tmp)):\r\n x_result[i//2] = x_new[0]\r\n fit_result[i//2] = fit_new\r\n else:\r\n index = np.argmin(fit_tmp)\r\n x_result[i//2] = x_taguchi[index]\r\n fit_result[i//2] = fit_tmp[index]\r\n \r\n return x_result,fit_result\r\n\r\ndef crossover():\r\n global x,fit,x_min,taguchi_num,taguchi_xNum\r\n\r\n reproduction_probability = chosen_probability()\r\n x_tmp = np.zeros([taguchi_xNum, gene])\r\n \r\n #select cromosomes for crossover\r\n x_tmp[0] = x_min\r\n for i in range(2,PS,2): \r\n x_tmp[i+1] = x[np.random.randint(PS)]\r\n probability = np.random.uniform()\r\n for j in range(PS): #reproduction of probability\r\n if (probability <= reproduction_probability[j]):\r\n x_tmp[i] = x[j]\r\n break\r\n \r\n #get taguchi result\r\n x_result,fit_result = taguchi_method(x_tmp) \r\n \r\n #saving crossover result\r\n x[PS:PS+len(x_result)] = x_result\r\n fit[PS:PS+len(fit_result)] = fit_result\r\n \r\ndef chosen_probability():\r\n global fit,PS\r\n \r\n fit_PS = fit[:PS]\r\n reciprocal = np.reciprocal(fit_PS) #對每一fit取倒數\r\n\r\n reciprocalSum = np.sum(reciprocal, axis=0) #累加倒數\r\n chosen_probability = reciprocal/reciprocalSum #計算選擇機率\r\n reproduction_probability = chosen_probability.cumsum(axis=0)#累加選擇機率\r\n return reproduction_probability\r\n\r\ndef reproduction():\r\n global funcall,x,fit\r\n funcall += 1\r\n \r\n reproduction_probability = chosen_probability()\r\n x_tmp = np.copy(x)\r\n fit_tmp = np.copy(fit)\r\n \r\n for i in range(1, PS):\r\n probability = np.random.uniform()\r\n for j in range(PS): #reproduction of probability\r\n if (probability <= reproduction_probability[j]):\r\n x[i] = x_tmp[j]\r\n fit[i] = fit_tmp[j]\r\n break\r\n \r\n# shuffle\r\n dic = np.random.permutation(PS) #dictionary\r\n x[:PS] = x[dic]\r\n fit[:PS] = fit[dic]\r\n\r\ndef sorting():\r\n global fit,x\r\n sort = np.argsort(fit) #indices list\r\n fit = np.sort(fit)\r\n x_tmp = np.zeros([exPS, gene])\r\n\r\n for i in range(exPS):\r\n x_tmp[i] = x[sort[i]]\r\n \r\n x = np.copy(x_tmp)\r\n \r\ndef cal_fitness(x, num):\r\n global w\r\n fit_tmp = np.zeros([num])\r\n for i in range(num):\r\n unfeasibility = 0 #unfeasibility\r\n c = np.zeros([6]) #memorize calculation of constraint\r\n c[0] = 1 - 0.0025 * (x[i,3] + x[i,5])\r\n c[1] = 1 - 0.0025 * (x[i,4] + x[i,6] - x[i,3])\r\n c[2] = 1 - 0.01 * (x[i,7] - x[i,4]) \r\n c[3] = x[i,0] * x[i,5] - 833.33252 * x[i,3] -100 * x[i,0] + 83333.333\r\n c[4] = x[i,1] * x[i,6] - 1250 * x[i,4] - x[i,1] * x[i,3] + 1250 * x[i,3]\r\n c[5] = x[i,2] * x[i,7] - 1250000 - x[i,2] * x[i,4] + 2500 * x[i,4]\r\n for j in range(6):\r\n if c[j] < 0:\r\n unfeasibility += c[j]\r\n unfeasibility = abs(unfeasibility)\r\n p = w * unfeasibility #penalty\r\n fit_tmp[i] = x[i,0] + x[i,1] + x[i,2] + p\r\n \r\n return fit_tmp\r\n\r\ndef generate_Chromosomes(num):\r\n global gene\r\n x_com = np.zeros([num,gene])\r\n for i in range(num):\r\n x_com[i,0] = np.random.uniform(100,10000)\r\n x_com[i,1:3] = np.random.uniform(1000,10000,2)\r\n x_com[i,3:8] = np.random.uniform(10,1000,5)\r\n return x_com\r\n\r\ndef initialization():\r\n x[:PS] = generate_Chromosomes(PS)\r\n\r\n'''global variables'''\r\nimport numpy as np\r\nfrom numpy import genfromtxt\r\ntaguchi_chart = genfromtxt('123.csv', delimiter=',') \r\nPS = 200 #population size\r\nexPS = PS*4 #extra population\r\ngene = 8 #gene number\r\nw = 10000 #penalty\r\ntaguchi_xNum = PS*2\r\ntaguchi_num = int(taguchi_xNum//2)\r\nmutationRate = 0.8\r\nmutation_num = 0\r\niteration = 1000\r\nkeepRate = 0.3\r\nfuncall = 0 #number of function call\r\nglobal_min = 1000000.0\r\n\r\nx = np.full((exPS, gene), 10000.0) #cromosome\r\nfit = np.full((exPS),10000000.0) #fitness\r\nx_min = np.zeros([gene]) #best solution\r\nfit_cro = np.zeros([16,2]) #crossober\r\ntaguchi_fitSum = np.zeros([3,8]) #taguchi method\r\n\r\n''' main '''\r\ninitialization()\r\nfit[:PS] = cal_fitness(x, PS)\r\nsorting()\r\n\r\nwhile(iteration > 0):\r\n if (iteration<900):\r\n keepRate = 0.02\r\n \r\n reproduction()\r\n crossover()\r\n mutation()\r\n fit = cal_fitness(x, exPS)\r\n sorting()\r\n \r\n if(iteration==1):\r\n tmp_min = np.min(fit)\r\n iteration -= 1\r\n print('iter = ',iteration,tmp_min)\r\n \r\n if(tmp_min<global_min): #record best solution\r\n global_min = tmp_min\r\n x_min = x[0]\r\n else:\r\n x[2] = x_min\r\n else:\r\n #generate random cromosome untill 200 cromosomes\r\n x[int(PS*keepRate):PS] = generate_Chromosomes(PS-int(PS*keepRate))\r\n fit[:PS] = cal_fitness(x, PS) \r\n \r\n # shuffle\r\n dic = np.random.permutation(PS) #dictionary\r\n x[:PS] = x[dic]\r\n fit[:PS] = fit[dic]\r\n \r\n x[PS:] = 10000.0\r\n fit[PS:] = 10000000.0\r\n \r\n tmp_min = np.min(fit)\r\n iteration -= 1\r\n print('iter = ',iteration,tmp_min)\r\n \r\n if(tmp_min<global_min): #record best solution\r\n global_min = tmp_min\r\n x_min = x[0]\r\n else:\r\n x[2] = x_min\r\n \r\n\r\nprint(global_min)\r\n#mutation method ofr taguchi"
}
] | 1 |
Demention-inc/comment-app | https://github.com/Demention-inc/comment-app | 5b0223fea8b14d5cf2cda0a3892b450c524b9505 | 2a3ed31bb6c9ddaa356d6201a3c947121094cdd5 | 0ead96469fb8367ee36730f21a602720c12a8edc | refs/heads/main | 2023-08-24T04:42:13.694977 | 2021-10-08T01:37:51 | 2021-10-08T01:37:51 | 413,270,188 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6462915539741516,
"alphanum_fraction": 0.6560102105140686,
"avg_line_length": 36.92232894897461,
"blob_id": "7e071bd1687449da4c8e38e45b7b9a291b0a7c67",
"content_id": "bb6ea9d545c8cfa5198d14f9b0065efacede467a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4042,
"license_type": "no_license",
"max_line_length": 214,
"num_lines": 103,
"path": "/src/codes/initial_livechat_check/main.py",
"repo_name": "Demention-inc/comment-app",
"src_encoding": "UTF-8",
"text": "import platform\nimport json\nimport sys\nimport os\nfrom retry import retry\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n\nclass ContinuationURLNotFound(Exception):\n pass\n\nclass LiveChatReplayDisabled(Exception):\n pass\n\nclass RestrictedFromYoutube(Exception):\n pass\n\n\n\ndef get_ytInitialData(target_url, session):\n headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'}\n html = session.get(target_url, headers=headers)\n soup = BeautifulSoup(html.text, 'html.parser')\n for script in soup.find_all('script'):\n script_text = str(script)\n if 'ytInitialData' in script_text:\n for line in script_text.splitlines():\n if 'ytInitialData' in line:\n if 'var ytInitialData =' in line:\n st = line.strip().find('var ytInitialData =') + 19\n return(json.loads(line.strip()[st:-10]))\n if 'window[\"ytInitialData\"] =' in line:\n return(json.loads(line.strip()[len('window[\"ytInitialData\"] = '):-1]))\n# return(json.loads(line.strip()[len('window[\"ytInitialData\"] = '):-1]))\n\n if 'Sorry for the interruption. We have been receiving a large volume of requests from your network.' in str(soup):\n print(\"restricted from Youtube (Rate limit)\")\n raise RestrictedFromYoutube\n\n return(None)\n\ndef check_livechat_replay_disable(ytInitialData):\n conversationBar = ytInitialData['contents'].get('twoColumnWatchNextResults',{}).get('conversationBar', {})\n if conversationBar:\n conversationBarRenderer = conversationBar.get('conversationBarRenderer', {})\n if conversationBarRenderer:\n text = conversationBarRenderer.get('availabilityMessage',{}).get('messageRenderer',{}).get('text',{}).get('runs',[{}])[0].get('text')\n print(text)\n if text == 'この動画ではチャットのリプレイを利用できません。':\n return(True)\n else:\n return(True)\n\n return(False)\n\n@retry(ContinuationURLNotFound, tries=2, delay=1)\ndef get_initial_continuation(target_url):\n print(target_url)\n session = requests.session()\n try:\n ytInitialData = get_ytInitialData(target_url, session)\n except RestrictedFromYoutube:\n return(None)\n\n\n if not ytInitialData:\n print(\"Cannot get ytInitialData\")\n raise ContinuationURLNotFound\n\n if check_livechat_replay_disable(ytInitialData):\n print(\"LiveChat Replay is disable\")\n raise LiveChatReplayDisabled\n\n continue_dict = {}\n try:\n continuations = ytInitialData['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['header']['liveChatHeaderRenderer']['viewSelector']['sortFilterSubMenuRenderer']['subMenuItems']\n for continuation in continuations:\n continue_dict[continuation['title']] = continuation['continuation']['reloadContinuationData']['continuation']\n except KeyError:\n print(\"Cannot find continuation\")\n\n continue_url = None\n if not continue_url:\n if continue_dict.get('上位のチャットのリプレイ'):\n continue_url = continue_dict.get('上位のチャットのリプレイ')\n if continue_dict.get('Top chat replay'):\n continue_url = continue_dict.get('Top chat replay')\n\n if not continue_url:\n if continue_dict.get('チャットのリプレイ'):\n continue_url = continue_dict.get('チャットのリプレイ')\n if continue_dict.get('Live chat replay'):\n continue_url = continue_dict.get('Live chat replay')\n\n if not continue_url:\n continue_url = ytInitialData[\"contents\"][\"twoColumnWatchNextResults\"].get(\"conversationBar\", {}).get(\"liveChatRenderer\",{}).get(\"continuations\",[{}])[0].get(\"reloadContinuationData\", {}).get(\"continuation\")\n\n if not continue_url:\n raise ContinuationURLNotFound\n\n return(continue_url)\n\n\n\n\n"
},
{
"alpha_fraction": 0.6067588329315186,
"alphanum_fraction": 0.7096773982048035,
"avg_line_length": 42.46666717529297,
"blob_id": "743d4b6a59fffc7cbf5ff9608c97bf9bf109ed80",
"content_id": "52eb2c92a26dd49304adae613802d11e51bb18db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 651,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 15,
"path": "/src/secret/secret.py",
"repo_name": "Demention-inc/comment-app",
"src_encoding": "UTF-8",
"text": "key={\n \"type\": \"service_account\",\n \"project_id\": \"chat-crawl-708ee\",\n \"private_key_id\": \"739530d01adba460f1ffedfc64b1bbac8711cc4a\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCp3vaCgfzgB3pn\\nAGJBV6ilFNHmNYqkW3gv8UG8NJAhR4m0SjSygo+eOmw79CMhXzsFrdiDipBgJqXW\\nwkMZ1VA5ORmVQXwOr+LbKPqxoX1v0qyiBFxUOlSMC/7RBQHL9L2oJGh5SoOnsnIT\\nxPfPf6Xk2lgbsgbIyAxJIXHBoo2OJ4d4bAJnT6Vp4xU1dn/VVx/nlpAxXBJ1cRCg\\nvErcxilFOqfA6cVCBFi+M+SmYtBzBRuMa+5JaG7Rj96RLp6+1r460hHUWMHcNxm4\\nWCZvAfhysimMHKen/MwPFYbZZ15OOvsjyFv3YxlTzIC/iirgoVcEVCYeMXb2uE6B\\nDqDv416nAgMBAAECggEABmJuExdN86Du/Z+65v148K9uI4AKqcUS/iuhTrC5DrnQ\\nwqPGru46S0ujKsOxGFLVyzpZTg8EqyB4u4V0noFxG516SSz7Zn/qKlY0Hz3/EsIG\\ndqvBnpwh9luIZGm9mLamSYNeIS+Rmtlu8TBGJ23YRILdAeQjM3kAATWjwA/bJdG7\\nKhjV/6bpSYy6WenDI21/oBRo+Tl8GJQyqk+HGHU5BLrqNtv2bQktV8LwFlSp72eW\\nmLF3m4+s9CBOjI4E9xtrLgZEj9XNp4/pMOO3BsCo55/4eOlN5OjYq3tB1800wX0f\\nuUSSWCJuTJp7mEVG9CRSPiEAqOhnI/8Or3GFL2sSrQKBgQDZdHeL6xoXjiLqIYbD\\nsol60UaiRmENCwYQj3mQrVBUyuPqidb/URHtg2GO3k9fOkxcbtcQhIteC2RzG9fB\\nmIOFAAaoZmAqQYTsJ/rUAYv+UAYvl3hiZHh45i2T9WLJyHh32M2M5I+zduMMawux\\nlDtfR29zTFdTivlln8pOjf+3TQKBgQDH+0JiTnREAIGPW1wui2VYJw8NrBbS98BR\\n3EUyXbYUS+xRvTIPv/egxy+uWuMsV7my+nI1ZbysDPjAnBk7BWiEV926uk4lYWfg\\nHwmLeM39kfpQKVuo/4nF4hWgo+v+lNekCuAare6QxAIm8bh/AXhOZ4XiGbbL9LTg\\n9rmCHEw7wwKBgQCmDhhUUp5EKphi7mM5AheWIEWuGcx251okP0v3vUHA3oaZTdVO\\ndxwXoeJX9U/rSluRolq9FFpQ4KY+eJ6UCeE1KUXsRvFck1fr9sPGLcA33eg7Y5Hc\\noRiWSj+34wUjTeEgtACTfN53j2vmyb/ORYHpSmXDSCLPeyrj/dfBkRIbaQKBgCuc\\na7msikZaJebxcYTk+VDkE7BTmxae2N4BP0XgkdiTRhH9RHGS15nEVm2bfHcrGmAh\\npc5nRWHv4j9+yzHH2CckUkJqZqhYOaLyoc48pg9qSkA5BLgyIgV5bGX/2XNHWk93\\n369xxtjjJeUASc/IrkfK47OPEAIid1+n7oSlTyqpAoGBAKzyIIkECp0ohhqEXl1v\\n0neCyE7SttY+BpvEveONSfkVkLz0jCp87DQHx+YyFmPgvawAxJvWFMnJHYtkhe1q\\nbdqmH62uvJNqiRnlBrWKcXqwdU1kNutm8dFNxijmb2pAQI/2s7r9hiFtthNoxYta\\nS/QJ/O2K6TUXzCjaZ561nTWi\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"[email protected]\",\n \"client_id\": \"110918171689088650018\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://oauth2.googleapis.com/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-ptw8q%40chat-crawl-708ee.iam.gserviceaccount.com\"\n}\n\nbucket_name='chat-crawl-708ee.appspot.com'\nproject_id = \"youtube-chat-crawl\""
},
{
"alpha_fraction": 0.6929879784584045,
"alphanum_fraction": 0.6980416774749756,
"avg_line_length": 35.651161193847656,
"blob_id": "7579cfb161df988429b7284bdf7aa5065bf499b6",
"content_id": "422b5946d5cabf604669bbb1c09f77ea05b5a109",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1637,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 43,
"path": "/src/codes/continuation_livechat_crawler/run.py",
"repo_name": "Demention-inc/comment-app",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nfrom continuation_livechat_crawler.main import get_chat_replay_from_continuation, RestrictedFromYoutube\nfrom initial_livechat_check.main import get_initial_continuation, ContinuationURLNotFound, LiveChatReplayDisabled, RestrictedFromYoutube\nimport firebase_admin\nfrom firebase_admin import credentials, initialize_app, storage\nfrom secret.secret import key,bucket_name\nimport pandas as pd\nfrom tqdm import tqdm\n\nif __name__ == '__main__':\n target_url=sys.argv[1]\n video_id = target_url.split(\"https://www.youtube.com/watch?v=\")[1]\n continuation = get_initial_continuation(target_url)\n comment_data, continuation = get_chat_replay_from_continuation(video_id, continuation, 3000, True)\n\n dmplist = []\n cols = ['user','timestampUsec','time authorbadge','text purchaseAmount','type','video_id','Chat_No']\n dfn = pd.DataFrame(index=[], columns=cols)\n\n for line in tqdm(comment_data):\n df = pd.DataFrame(line,index=['1',])\n dfn=pd.concat([df,dfn],axis=0)\n\n csv=dfn.to_csv(index=False)\n output_file_name = video_id + '.csv'\n print('DONE!')\n\n #firebase storageにアップロードする\n file_name=output_file_name\n\n cred = credentials.Certificate(key)\n if not firebase_admin._apps:\n # 初期済みでない場合は初期化処理を行う\n initialize_app(cred, {'storageBucket': bucket_name})\n\n bucket = storage.bucket()\n blob = bucket.blob(file_name)\n blob.upload_from_string(csv)\n\n print('DONE!')\n\n \n\n"
},
{
"alpha_fraction": 0.6027466654777527,
"alphanum_fraction": 0.6159715056419373,
"avg_line_length": 24.205127716064453,
"blob_id": "dbe8bac4e2a472b59829deea7974da9f4b769a31",
"content_id": "6c77a0b06bb6b8d0e144cfcae0c1c9d0d9d5d8ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2092,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 78,
"path": "/src/codes/highlight.py",
"repo_name": "Demention-inc/comment-app",
"src_encoding": "UTF-8",
"text": "import subprocess\nimport os\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport os\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport plotly.graph_objs as go\nfrom functions import *\nfrom pytube import YouTube\nimport streamlit as st\n\n\nimport warnings\nwarnings.simplefilter('ignore')\n\n\nurl = st.text_input(label='URLを入力してね')\nurl=url.strip()\n\nif len(url) < 5:\n st.warning('URLを入力して')\n # 条件を満たないときは処理を停止する\n st.stop()\nelse:\n\n # コメントスクレイピング\n # subprocess.run(['videochatget',url]) \n name=url.split(\"https://www.youtube.com/watch?v=\")[1]\n # shutil.move(name+\".txt\",\"../../txt/\"+name+\".txt\")\n\n # csvファイル作成\n # subprocess.run(['ruby','get_data.rb',name])\n\n\n\n df=process_csv('../../csv/'+name+'.csv')\n ts=df['num']\n outlier=make_outlier(ts)\n\n df=df.reset_index()\n print(df)\n\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=df['timestamp'], y=df[\"num\"], name='コメント数'))\n fig.add_trace(go.Scatter(x=outlier['timestamp'], y=outlier[\"num\"],name=\"盛り上がり\",mode = 'markers'))\n\n\n st.dataframe(outlier)\n st.plotly_chart(fig)\n\n\n #動画ダウンロード\n yt = YouTube(url)\n st.subheader(yt.title )\n stream = yt.streams.filter(file_extension='mp4').first()\n next_title=name\n\n # stream.download(\"../../mp4/\")\n # os.rename(\"../../mp4/\" + yt.title + \".mp4\",\"../../mp4/\" + next_title + \".mp4\")\n\n file_path = \"../../mp4/\"+next_title+'.mp4'\n for i,t in enumerate(outlier['timestamp']):\n time=t\n short_time=60\n sec=int(time[0:1])*60*60+int(time[3:5])*60+int(time[6:8])\n start = str(sec-(short_time/2))\n save_path ='../../short/'+next_title+str(i)+'.mp4'\n short_time=str(short_time)\n subprocess.run(['ffmpeg','-i',file_path,'-ss',start,'-t',short_time,save_path])\n\n video_file = open(save_path, 'rb')\n video_bytes = video_file.read()\n st.video(video_bytes)\n"
},
{
"alpha_fraction": 0.6873015761375427,
"alphanum_fraction": 0.6904761791229248,
"avg_line_length": 30.81012725830078,
"blob_id": "558a69b5fc29d3e2be16c074cba6d7b46a8c1780",
"content_id": "ccd7ee3d88fe773741464c70c2e5b97d19aa45a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2848,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 79,
"path": "/src/codes/main.py",
"repo_name": "Demention-inc/comment-app",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport subprocess\nfrom io import BytesIO\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))#親ディレクトリをパスに追加\nfrom datetime import datetime as dt\nimport plotly.graph_objs as go\nfrom functions import *\nfrom pytube import YouTube\nfrom google.cloud import storage as gcs\nfrom google.oauth2 import service_account\nimport json\nimport streamlit as st\nimport firebase_admin\nfrom firebase_admin import credentials, initialize_app, storage\nfrom secret.secret import key,bucket_name,project_id\n\nimport warnings\nwarnings.simplefilter('ignore')\n\n\nurl = st.text_input(label='URLを入力してね')\nurl=url.strip()\n\nif url.split(\"=\")[0] != 'https://www.youtube.com/watch?v':\n\n st.warning('youtubeのURLを入力してください')\n # 条件を満たないときは処理を停止する\n st.stop()\nelse:\n video_id=url.split(\"https://www.youtube.com/watch?v=\")[1]\n # url='https://www.youtube.com/watch?v=oPDqWGXwoPA'\n # video_id='oPDqWGXwoPA'\n csv_name=video_id+'.csv'\n # firebaseにチャットデータのcsvファイルがなければ取得してアップロード\n credential = service_account.Credentials.from_service_account_info(key)\n client = gcs.Client(project_id, credentials=credential)\n bucket = client.get_bucket(bucket_name)\n box=[]\n for file in client.list_blobs(bucket_name):\n box.append(file.name)\n if csv_name in box:\n pass\n else:\n subprocess.run(['python','continuation_livechat_crawler/run.py',url])\n\n # firebaseからcsvファイルを取ってくる\n cred = credentials.Certificate(key)\n\n if not firebase_admin._apps:\n # 初期済みでない場合は初期化処理を行う\n initialize_app(cred, {'storageBucket': bucket_name})\n bucket = storage.bucket()\n blob = bucket.blob(csv_name)\n\n #何秒ごとに集計するか\n number=30\n #移動平均の時間\n # 平均から標準偏差の threshold 倍以上外れているデータを外れ値としてプロットする\n ewm_span=60\n threshold=2\n # df = pd.read_csv(BytesIO(blob.download_as_string()),index_col=False)\n if 'db' not in st.session_state:\n st.session_state['db'] = pd.read_csv(BytesIO(blob.download_as_string()),index_col=False)\n df_new=st.session_state['db'].rename(columns={'time':'timestamp'})\n df_new=df_new[['timestamp']]\n df_new=process_df(df_new,number)\n ts=df_new['num']\n outlier=make_outlier(ts,ewm_span, threshold)\n print(df_new)\n print(outlier)\n\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=df_new.index, y=df_new[\"num\"], name='コメント数'))\n fig.add_trace(go.Scatter(x=outlier['timestamp'], y=outlier[\"num\"],name=\"盛り上がり\",mode = 'markers'))\n\n st.write()\n st.dataframe(outlier)\n st.plotly_chart(fig)\n\n\n \n"
},
{
"alpha_fraction": 0.7306122183799744,
"alphanum_fraction": 0.7428571581840515,
"avg_line_length": 10.619047164916992,
"blob_id": "61e9488ab23edfd1655d217e2d904527d883937b",
"content_id": "ca0e969f2a9df6fca1f2e72434c91e2b316de8c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 317,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 21,
"path": "/README.md",
"repo_name": "Demention-inc/comment-app",
"src_encoding": "UTF-8",
"text": "# comment-app\nyoutubeライブからコメントを取得\n\n\n環境設定\n\n`Python 3.9.1`\n\n\n使い方\n\n`streamlit run src/codes/main.py`\n\nで実行\n\n# If you run it locally\n\nRun `run.py` in the `continuation_livechat_crawler` with `URL`.\n\nkey:firebeseの秘密鍵のjsonファイル\nbucket_name: storageの名前\n\n"
},
{
"alpha_fraction": 0.5721217393875122,
"alphanum_fraction": 0.5924128890037537,
"avg_line_length": 38.73684310913086,
"blob_id": "5a6baa937a472a6af8ac9779a9d5e873761b9138",
"content_id": "66fe503096acc908cf923ca87c1fcb5d7c9bb440",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2339,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 57,
"path": "/src/codes/functions.py",
"repo_name": "Demention-inc/comment-app",
"src_encoding": "UTF-8",
"text": "\nimport numpy as np\nimport pandas as pd\nimport os\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime as dt\nfrom datetime import timedelta\n\ndef process_csv(csv):\n df=pd.read_csv(csv).drop('id',axis=1)\n df['num']=1\n for n in range(len(df['timestamp'])):\n if df['timestamp'][n][0]=='-':\n df= df.drop(index=n)\n df=df.reset_index(drop=True)\n for n in range(len(df['timestamp'])):\n if len(df['timestamp'][n])==4:\n df['timestamp'][n]='0:'+'0'+df['timestamp'][n]\n elif len(df['timestamp'][n])==5:\n df['timestamp'][n]='0:'+df['timestamp'][n]\n elif len(df['timestamp'][n])==8:\n df['timestamp'][n]='0:'+df['timestamp'][n][0:5]\n df1=df.groupby('timestamp',as_index=False).sum()\n df1['timestamp']= pd.to_datetime(df1['timestamp']) #datetime型にする\n df1=df1.groupby(pd.Grouper(key='timestamp', freq='30s')).sum().reset_index()\n df1['timestamp'] = df1['timestamp'].dt.strftime('%H:%M:%S')\n df=df1.set_index('timestamp')\n return df\n\ndef process_df(df,number=1):\n df['num']=1\n for n in range(len(df['timestamp'])):\n if df['timestamp'][n][0]=='-':\n df= df.drop(index=n)\n df=df.reset_index(drop=True)\n for n in range(len(df['timestamp'])):\n if len(df['timestamp'][n])==4:\n df['timestamp'][n]='0:'+'0'+df['timestamp'][n]\n elif len(df['timestamp'][n])==5:\n df['timestamp'][n]='0:'+df['timestamp'][n]\n elif len(df['timestamp'][n])==8:\n df['timestamp'][n]='0:'+df['timestamp'][n][0:5]\n df1=df.groupby('timestamp',as_index=False).sum()\n df1['timestamp']= pd.to_datetime(df1['timestamp']) #datetime型にする\n #何秒ごとに集計するか\n df1=df1.groupby(pd.Grouper(key='timestamp', freq=str(number)+'s')).sum().reset_index()\n df1['timestamp'] = df1['timestamp'].dt.strftime('%H:%M:%S')\n df=df1.set_index('timestamp')\n return df\n\ndef make_outlier(ts, ewm_span=30, threshold=1.5):\n assert type(ts) == pd.Series\n ewm_mean = ts.ewm(span=ewm_span).mean() # 指数加重移動平均\n ewm_std = ts.ewm(span=ewm_span).std() # 指数加重移動標準偏差\n outlier = ts[(ts - ewm_mean)> ewm_std * threshold]\n outlier=outlier.reset_index().sort_values('num',ascending=False)\n return outlier\n\n"
}
] | 7 |
faelern/snake | https://github.com/faelern/snake | 3fa46da1e7176c49a32bdecb32f9b4d596a015b8 | 2cc4f5f6c192d401e23d569ab751ae3640f8e15e | 4e0982cd52e321dae3284ca658386c0399be001b | refs/heads/master | 2023-04-29T07:25:30.185885 | 2021-05-18T15:54:04 | 2021-05-18T15:54:04 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5596330165863037,
"alphanum_fraction": 0.5871559381484985,
"avg_line_length": 8.083333015441895,
"blob_id": "16e4debdc5ef626586b1396d1151c96e6c5bd71d",
"content_id": "8469ba2a5405e884a2c65687d15ad423f40a3ca3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 218,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 24,
"path": "/main.py",
"repo_name": "faelern/snake",
"src_encoding": "UTF-8",
"text": "import pgzrun, sys\n\nWIDTH = 500\nHEIGHT = 500\n\n\ndef generate_snake():\n\n\n\ndef draw():\n screen.fill('black')\n\n\ndef update():\n pass\n\n\ndef on_key_down(key):\n if key == keys.ESCAPE:\n sys.exit()\n\n\npgzrun.go()\n"
}
] | 1 |
lelekaz/Hangman | https://github.com/lelekaz/Hangman | 74218f23096c2e1ed88277a0ae1de581ec2c4574 | f9631a90863cc22474c62c81949c9da3cc066605 | a1e9f9041e98f0f0142e48d6ae42a8285d48002f | refs/heads/master | 2022-12-21T22:14:32.490570 | 2020-09-16T01:45:43 | 2020-09-16T01:45:43 | 293,402,381 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4186713397502899,
"alphanum_fraction": 0.4244065582752228,
"avg_line_length": 32.92972946166992,
"blob_id": "2e66ee8e354d408c17897dc6853d1280f53cf309",
"content_id": "ae889fd357138e8bda464891dba546b4dba34621",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6277,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 185,
"path": "/Hangman.py",
"repo_name": "lelekaz/Hangman",
"src_encoding": "UTF-8",
"text": "#This is the method that drives the whole program\ndef Main():\n print(\"Welcome to Hangman!\")\n correct_phrase = Get_Phrase()\n guess_phrase = Setup_Guess_Phrase(correct_phrase)\n\n #This is to put enough blank print statements to prevent Player 2\n #from automatically seeing the hangman phrase\n for x in range(50):\n print()\n\n Play_Hangman(correct_phrase, guess_phrase)\n\n#This proceeds to get the hangman phrase from Player 1\ndef Get_Phrase():\n correct_phrase = \"\"\n while len(correct_phrase) == 0:\n correct_phrase = input(\"Player 1, enter a word/phrase for Player 2 to guess: \")\n return correct_phrase.upper()\n\n#This sets up the underscored hangman phrase that Player 2 sees and tries to guess from\ndef Setup_Guess_Phrase(cp):\n guess_phrase = \"\"\n for x in range(len(cp)):\n if cp[x].isalpha():\n guess_phrase += \"_\"\n else:\n guess_phrase += cp[x]\n return guess_phrase\n\n#This runs the part of the game where Player 2 guesses the letters and hangman phrase\ndef Play_Hangman(cp, gp):\n correct_phrase = cp\n guess_phrase = gp\n num_bad_guesses = 0\n\n running = True\n while running:\n print(\"Hangman Phrase: \" + guess_phrase)\n Print_Hangman_Body(num_bad_guesses)\n\n if correct_phrase == guess_phrase:\n print(\"Congratulations, you guessed the phrase! Player 2 wins!\")\n running = False\n continue\n if num_bad_guesses == 6:\n print(\"Sorry, you lost the game. Player 1 wins!\")\n running = False\n continue\n\n print()\n print(\"Main Menu:\")\n print(\"0: Guess a Letter.\")\n print(\"1: Guess the hangman phrase.\")\n print(\"2: Quit.\")\n\n user_choice = input()\n\n if user_choice == \"0\":\n guess_temp = Guessing_Letter(correct_phrase, guess_phrase)\n if guess_phrase == guess_temp:\n num_bad_guesses += 1 #If Player 2 guessed a wrong letter, another body part is drawn in the illustration\n guess_phrase = guess_temp\n elif user_choice == \"1\":\n got_it_right = Guessing_Phrase(correct_phrase)\n if got_it_right == True:\n running = False #If Player 2 guessed the phrase correctly, the game ends and Player 2 wins\n else:\n num_bad_guesses += 1 #If Player 2 guessed the phrase incorrectly, another body part is drawn in the illustration\n elif user_choice == \"2\":\n running = False\n else:\n print(\"Input a valid response.\")\n\n#This lets Player 2 guess a letter and edits the guess_phrase (underscored phrase) accordingly\ndef Guessing_Letter(cp, gp):\n letter = \"\"\n while len(letter) != 1:\n letter = input(\"Type in the letter you want to guess: \")\n\n guess_phrase_list = list(gp)\n for x in range(len(cp)):\n if cp[x] == letter.upper():\n guess_phrase_list[x] = letter.upper()\n\n guess_phrase = \"\".join(guess_phrase_list)\n\n return guess_phrase\n\n#This lets Player 2 guess the hangman phrase, and returns a boolean value of whether they guessed correctly or not\ndef Guessing_Phrase(cp):\n guess = input(\"Type in your guess for the hangman phrase: \")\n if guess.upper() == cp:\n print(\"Congratulations, you guessed the phrase! Player 2 wins!\")\n return True\n else:\n print(\"That's incorrect; try again.\")\n return False\n\n#This is the printing of the Hangman illustration; it shows the different stages of the hangman body as the number\n#nbg gets larger\ndef Print_Hangman_Body(nbg):\n print()\n print(\" -------\")\n print(\" | |\")\n print(\" | |\")\n\n if nbg == 0:\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n elif nbg == 1:\n print(\" ___ |\")\n print(\" / \\ |\")\n print(\" \\___/ |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" |\")\n elif nbg == 2:\n print(\" ___ |\")\n print(\" / \\ |\")\n print(\" \\___/ |\")\n print(\" | |\")\n print(\" | |\")\n print(\" | |\")\n print(\" | |\")\n print(\" |\")\n print(\" |\")\n elif nbg == 3:\n print(\" ___ |\")\n print(\" / \\ |\")\n print(\" \\___/ |\")\n print(\" | |\")\n print(\" \\ | |\")\n print(\" \\| |\")\n print(\" | |\")\n print(\" |\")\n print(\" |\")\n elif nbg == 4:\n print(\" | |\")\n print(\" ___ |\")\n print(\" / \\ |\")\n print(\" \\___/ |\")\n print(\" | |\")\n print(\" \\ | / |\")\n print(\" \\|/ |\")\n print(\" | |\")\n print(\" |\")\n print(\" |\")\n elif nbg == 5:\n print(\" ___ |\")\n print(\" / \\ |\")\n print(\" \\___/ |\")\n print(\" | |\")\n print(\" \\ | / |\")\n print(\" \\|/ |\")\n print(\" | |\")\n print(\" / |\")\n print(\" / |\")\n elif nbg == 6:\n print(\" ___ |\")\n print(\" / \\ |\")\n print(\" \\___/ |\")\n print(\" | |\")\n print(\" \\ | / |\")\n print(\" \\|/ |\")\n print(\" | |\")\n print(\" / \\ |\")\n print(\" / \\ |\")\n\n print(\" |\")\n print(\" |\")\n print(\" |\")\n print(\" _______________\")\n\nMain()\n"
},
{
"alpha_fraction": 0.7991631627082825,
"alphanum_fraction": 0.7991631627082825,
"avg_line_length": 78.66666412353516,
"blob_id": "763c37745fe83708a780b9bc0ecbf06e7629a3d4",
"content_id": "8c3d412dc3dcecbfcba8d837cca2361e5cdeb5cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 3,
"path": "/README.md",
"repo_name": "lelekaz/Hangman",
"src_encoding": "UTF-8",
"text": "# Hangman\n\nThis mini project was assigned in sophomore year in high school. It's a very simple and easy Hangman code that I reorganized for the sake of brushing up on the Python Language. Any suggestions in how to improve are appreciated!\n"
}
] | 2 |
rfreiberger/TPCode | https://github.com/rfreiberger/TPCode | 1125f16860669f0c213e96c4d8ca3add402899c5 | 26ae0dfd228f2ef2ef129945417f7652998a7c01 | 11c64db328591186fea51112fb0a5e960a7f399a | refs/heads/master | 2021-09-03T06:16:45.348504 | 2018-01-06T09:30:59 | 2018-01-06T09:30:59 | 116,470,551 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6356107592582703,
"alphanum_fraction": 0.6687370538711548,
"avg_line_length": 17.538461685180664,
"blob_id": "8e2168f129d7de8d0f0a35a62067c82eef924746",
"content_id": "f23cb3fd27867453c5c62329bf7d2f18248aa46a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 483,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 26,
"path": "/bubblesort.py",
"repo_name": "rfreiberger/TPCode",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport random\n\nnewlist = [random.randint(0,101) for r in range(50)]\n\ndef bubblesort(mylist):\n\n\tprint(\"Starting sort on following random list: \")\n\tprint(newlist)\n\n\tfor outloop in range(0, len(mylist) -1, 1):\n\n\t\tfor inloop in range(0, len(mylist) -1, 1):\n\n\t\t\tif mylist[inloop] > mylist[inloop + 1]:\n\n\t\t\t\ttemp = mylist[inloop]\n\t\t\t\tmylist[inloop] = mylist[inloop + 1]\n\t\t\t\tmylist[inloop + 1] = temp\n\n\tprint(\"Completed sort\")\n\tprint(newlist)\n\n\nbubblesort(newlist)\n\n"
},
{
"alpha_fraction": 0.7857142686843872,
"alphanum_fraction": 0.7857142686843872,
"avg_line_length": 13,
"blob_id": "6d930cf20d12ac41f4608bf01d1b295d91a01aa7",
"content_id": "db55c8cbb9d44cff0c1948ec31e6c729029d93c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/README.md",
"repo_name": "rfreiberger/TPCode",
"src_encoding": "UTF-8",
"text": "# TPCode\nTwo coding puzzles\n"
},
{
"alpha_fraction": 0.5119825601577759,
"alphanum_fraction": 0.5403050184249878,
"avg_line_length": 16.69230842590332,
"blob_id": "8576d928f6a09e4758d7408c993b35a62027346c",
"content_id": "867096b567c51431b8b9c897c826168b69f13f97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 459,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 26,
"path": "/listcount.py",
"repo_name": "rfreiberger/TPCode",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\na = [1, 6, 2, 3, 5, 1, 4, 1, 2, 4]\n\ndef countitems(mylist):\n\n print(\"Starting count\")\n\n countdict = {}\n\n for items in mylist:\n \n if items in countdict:\n\n countdict[items] = (countdict[items] + 1)\n\n else:\n\n countdict[items] = 1\n\n for name in sorted(countdict):\n print(\"Number: \" + str(name) + \" Count: \" + str(countdict[name]))\n \n print(\"Completed count\")\n\ncountitems(a)"
}
] | 3 |
razlib/espl | https://github.com/razlib/espl | d7cd85faeb839361cd8c6dd2faf483b79d4280d1 | 83602f9da8e0e83913de8b6b223554871a2e11be | 708c8371eda624866b578e5b175ea48b858011c3 | refs/heads/master | 2021-01-01T06:45:30.264477 | 2013-02-04T10:14:21 | 2013-02-04T10:14:21 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5507487654685974,
"alphanum_fraction": 0.5640599131584167,
"avg_line_length": 21.148147583007812,
"blob_id": "af4099bec436609583de285a8b6433ba333ab442",
"content_id": "09428b6aa08db31ad18de37e082770c3f4d68c64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 601,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 27,
"path": "/lab9/grades.py",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "import sys\n\n\nfileName = sys.argv[1]\nfp = open(fileName)\nfp.readline() #skip first line\nsum = 0 \ncount = 0 #number of students\ngradesDic = {}\nidDic = {}\nfor line in fp:\n print line\n arr = line.split(',')\n grade = int(arr[2])\n ID = arr[0]\n sum += grade\n count+= 1\n if grade in gradesDic:\n gradesDic[grade] += 1\n idDic[grade].append(ID) \n else:\n gradesDic[grade] = 1\n idDic[grade] = [ID]\nprint \"*********************************\"\nprint \"Average: \" + str(float(sum) / float(count))\nfor g in gradesDic.keys():\n print str(g) + \": \" + str(gradesDic[g]) + \" ID's: \" + str(idDic[g])\n \n"
},
{
"alpha_fraction": 0.5199999809265137,
"alphanum_fraction": 0.5433333516120911,
"avg_line_length": 11.82608699798584,
"blob_id": "6d1ea2787584ad9501cf06189f836fa4b06c7d39",
"content_id": "f8fcf0ebf29e579ed68e98b3cf83a17ce08bbe54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 300,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 23,
"path": "/lab6/bsplit/Makefile",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": ".PHONY: all clean\n.SUFFIXES: .c .o .h .asm\nCFLAGS=-g -m32 -O0 -Wall\nLDFLAGS=$(CFLAGS)\n\n%.o: %.c\n\t$(CC) $(CFLAGS) -m32 -g -c -o $@ $<\n\n%.o: %.asm\n\tnasm -g -f elf -o $@ $<\n\nall: bsplit\n\nbsplit: bsplit.o\n\tgcc -m32 -o $@ $^\n\n#bsplit.o: bsplit.asm\n\nclean:\n\trm -f *.o \n\ndistclean: clean\n\trm -f bsplit\n \n"
},
{
"alpha_fraction": 0.51347416639328,
"alphanum_fraction": 0.534595787525177,
"avg_line_length": 17.30666732788086,
"blob_id": "f4637405cf48578495b568c68326a0f476779a4f",
"content_id": "72a34260cbf8eaeb74b277c0225e6ab2b74df13c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1373,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 75,
"path": "/lab6/bsplit.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include \"xsum.h\"\n\nint main(int argc, char **argv)\n{\n \n \n int chunkSize = 10;\n char *fileName;;\n char buffer[1024];\n int count = 0;\n int fileCounter = 1; //helps to set the files names\n char tmpbuf[15];\t\n char newName[100];\n unsigned int mychceksum = 0;\n //unsigned int bigChecksum = 0;\n\n\n fileName = argv[1];\n \n \n FILE *bigFile = fopen(fileName, \"rb\"); \n \n /*if (xFlag)\n {\n bigChecksum = checksum(bigFile, 0);\n printf(\"Checksum: %d\\n\", bigChecksum);\n rewind(bigFile);\n }*/\n \n while (1)\n {\n \n\t\t \n //read chunk from the original file\n count = fread(buffer, 1, chunkSize, bigFile); \n \n //set new name\n sprintf(tmpbuf, \"%d\", fileCounter);\n snprintf(newName, 100, \"%s%s%s\",fileName,\".\", tmpbuf);\n \n //create a new file\n FILE *smallFile = fopen(newName, \"w+\");\n \n\t \n //save 4 bytes for checksum\n fwrite(&mychceksum,4,1,smallFile);\n \n //write the chunk to the file \n fwrite(buffer,count,1, smallFile);\n \n fileCounter++;\t\t\t\n \n //calc checksum\n //mychceksum = checksum(smallFile, 1); \n rewind(smallFile);\n //fwrite(&mychceksum,4,1, smallFile);\n\n \n if (count != chunkSize)\n {\n\n\tbreak;\n } \n \n fclose(smallFile);\n }\n \n\n \n fclose(bigFile);\n\n \n\n}\n"
},
{
"alpha_fraction": 0.5526315569877625,
"alphanum_fraction": 0.5625,
"avg_line_length": 12.260869979858398,
"blob_id": "7768c666d899d21923de765876134778eec7417a",
"content_id": "6209e07c12659ae99f70c712ccace32097586409",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 23,
"path": "/lab5/class/Makefile",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": ".PHONY: all clean\n.SUFFIXES: .c .o .h .asm\nCFLAGS=-g -m32 -O0 -Wall\nLDFLAGS=$(CFLAGS)\n\n\n%.o: %.asm\n\tnasm -g -f elf -o $@ $<\n\n%.o: %.c\n\tgcc $(CFLAGS) -c $<\n\nall: scmp scmpasm\n\nscmp: scmp.o cmpstrlex.o\n\tgcc $(LDFLAGS) -o $@ $^\n\nscmpasm: scmp.o cmpstrasmlex.o\n\tgcc $(LDFLAGS) -o $@ $^\n\n\nclean:\n\trm -f *.o *~"
},
{
"alpha_fraction": 0.6164383292198181,
"alphanum_fraction": 0.6273972392082214,
"avg_line_length": 17.049999237060547,
"blob_id": "4a64ef51ac4238e1c24460902f2fd1c770b56366",
"content_id": "328e18bcf7b3fba2cd0a2e7ec97640717eb6f195",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 20,
"path": "/lab2/Class/testfgn.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <stdio.h>\n#include \"fgetname.h\"\n\nint main(int argc, char **argv) {\n\tFILE *stream = fopen(\"fgetname.c\", \"r\");\n\tchar name[64];\n\tif(!stream) {\n\t\tfprintf(stderr, \"run the test in the source directory\\n\");\n\t\treturn 1;\n\t}\n\n\twhile(fgetname(name, sizeof(name), stream))\n\t\tprintf(\"%s \", name);\n\n\tprintf(\"\\n\");\n\tfclose(stream);\n\n\treturn 0;\n}\n\n\t\n\n"
},
{
"alpha_fraction": 0.5673549771308899,
"alphanum_fraction": 0.5796459913253784,
"avg_line_length": 27.20833396911621,
"blob_id": "940107bd066e829673744d41df4bd3d3ee5b6c57",
"content_id": "0065b36300501be3a58c5af597fbc1eeb939ed00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2034,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 72,
"path": "/lab10/pysh.py~",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os, os.path, sys, getpass, shlex, subprocess\n\nhome=os.getenv('HOME')\n\ndef getArgs(argv):\n newArgv = []\n for arg in argv:\n if arg[0] == \"$\" and arg[1:] in os.environ:\n\tnewArgv.append(os.environ[arg[1:]])\n else:\n\tnewArgv.append(arg)\n return newArgv\n\ndef runProccess(argv):\n if '>' in argv:\n f = open(argv[argv.index('>')+1],'w')\n subprocess.call(argv[0:argv.index('>')], stdout = f, shell=False)\n elif '<' in argv:\n f = open(argv[argv.index('<')+1])\n\tsubprocess.call(argv[0:argv.index('<')], stdin = f, shell=False)\n elif '|' in argv:\n p1 = subprocess.Popen(argv[0:argv.index('|')], stdout=subprocess.PIPE)\n p2 = subprocess.call(argv[argv.index('|')+1:], stdin=p1.stdout)\n p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.\n \n else:\n\n subprocess.call(getArgs(argv), shell=False)\n \ndef getcwd():\n \"like os.getcwd(), but collapses the home directory prefix into ~\"\n cwd = os.getcwd()\n if cwd.startswith(home):\n cwd = '~'+cwd[len(home):]\n return cwd\n\nwhile True:\n print \"%s@%s>\" % (getpass.getuser(), getcwd()),\n\n argv = None\n try:\n argv = shlex.split(raw_input())\n except EOFError:\n print\n break # end of file from user, e.g. Ctrl-D\n\n if not argv:\n continue # empty command\n\n if argv[0] in ['exit', 'quit', 'bye']:\n break\n elif len(argv) > 2 and argv[1] == \"=\":\n if argv[0] not in os.environ:\n\tos.environ[argv[0]] = argv[2] \n elif argv[0]=='cd':\n try:\n os.chdir(os.path.expanduser(getArgs(argv)[1])) # expand ~ to home directory\n except IndexError: # no arguments, chdir to home dir\n os.chdir(home)\n except OSError, s:\n print >>sys.stderr, \"chdir: %s\" % s\n else:\n\trunProccess(argv)\n #print \"TODO: execute %s\" % ' '.join(argv)\n\n# goodbye message\ngoodbye = \"have a good sunny day, see you back soon\"\nprint \"*\"+\"-\"*(len(goodbye)+2)+\"*\"\nprint \"| %s |\" % goodbye\nprint \"*\"+\"-\"*(len(goodbye)+2)+\"*\" \n\n\n"
},
{
"alpha_fraction": 0.5365013480186462,
"alphanum_fraction": 0.5468319654464722,
"avg_line_length": 20.597015380859375,
"blob_id": "f93c8c76b20faa386d51604d988d1088f073761a",
"content_id": "d3f37efbba4f03a33ad87f2399b979b8dae11d9f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1452,
"license_type": "no_license",
"max_line_length": 279,
"num_lines": 67,
"path": "/lab2/Class/canalyze.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <string.h>\n#include \"fgetname.h\"\n#include \"namelist.h\"\n\nchar* KEY_WORDS[] = {\"auto\", \"else\", \"long\", \"switch\", \"break\", \"enum\", \"typedef\", \"case\", \"return\", \"union\", \"char\", \"float\", \"short\", \"unsigned\", \"const\", \"for\", \"signed\", \"void\", \"countinue\",\"goto\",\"sizeof\",\"default\", \"if\", \"static\", \"while\", \"do\", \"int\", \"struct\", \"double\"};\n\nnamelist NAME_LIST;\n\nint cstring_cmp(const void *a, const void *b) \n{\n struct namestat *ia = ( struct namestat *)a;\n struct namestat *ib = ( struct namestat *)b;\n return strcmp(ia->name, ib->name);\n}\n\nvoid printFileWords(char *fileName)\n{\n FILE *file = fopen(fileName, \"r\");\n NAME_LIST = make_namelist();\n char buffer[128];\n while (fgetname(buffer, 128, file) != NULL)\n {\n if (!isKeyWord(buffer))\n {\n\t add_name(NAME_LIST, buffer); \n }\n }\n \n \t//sort\n\tqsort(NAME_LIST->names, NAME_LIST->size, sizeof(struct namestat), cstring_cmp);\n\t\n \n int i;\n for (i = 0; i < NAME_LIST->size; i++)\n {\n\tprintf(\"NAME: %s \", NAME_LIST->names[i].name);\n\tprintf(\"COUNT: %d\\n\", NAME_LIST->names[i].count);\n }\n fclose(file);\n \n \n}\n\nint isKeyWord(char *word)\n{\n int i;\n for (i = 0; i < 29; i++)\n {\n\tif (strcmp(word, KEY_WORDS[i]) == 0)\n\t{\n\t return 1;\n\t}\n }\n \n return 0;\n}\n\nint main(int argc, char **argv)\n{\n int i;\n for (i = 1; i < argc; i++)\n {\n printFileWords(argv[i]);\n }\n return 0;\n}\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6888889074325562,
"alphanum_fraction": 0.7111111283302307,
"avg_line_length": 41,
"blob_id": "847663dde5e5a12739f5c7cdad1547181947a2e7",
"content_id": "e08320aeac8302475ea46a0217387bb1f3ee52d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 1,
"path": "/lab7/class/bsplit/xsum.h",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "\nint checksum(FILE *file, int skip4bytes);\n\n\n"
},
{
"alpha_fraction": 0.5184502005577087,
"alphanum_fraction": 0.5369003415107727,
"avg_line_length": 16.933332443237305,
"blob_id": "609cdabcf933ef59fad6f5c2545a798fc46753e1",
"content_id": "958cb864ecc14eb4e8c76f18366da2b7a9c676bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 542,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 30,
"path": "/lab7/class/bsplit/xsum.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include \"xsum.h\"\n\nint checksum(FILE *file, int skip4bytes)\n{\n int count;\n unsigned int checksum = 0;\n unsigned int word;\n \n rewind(file);\n if (skip4bytes)\n {\n //start calc the checksum from the fifth byte\n //because the first 4 are saved for the checksum\n fseek(file,4, SEEK_SET); \n }\n \n while (1)\n {\n word = 0;\n count = fread(&word, 1, 4, file);\n checksum = checksum ^ word;\n if (count != 4)\n {\n\tbreak;\n } \n }\n \n return checksum;\n}\n\n\n \n"
},
{
"alpha_fraction": 0.4828178584575653,
"alphanum_fraction": 0.506872832775116,
"avg_line_length": 16.636363983154297,
"blob_id": "1c48f743f11bd6106a67c4962ff8827ca6024847",
"content_id": "adf0d35309d997a3e721a1c5e5f082e7c58466ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 33,
"path": "/lab2/practice/fortune.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n\nint main (int argc, char** argv)\n{\n int count = 0;\n int i = 0;\n int randomLineNumber;\n FILE *file;\n char buffer[128];\n char *fileName = argv[1];\n file = fopen(fileName, \"r\");\n \n while (fgets(buffer, 128, file) != NULL)\n {\n count++;\n }\n \n fclose(file);\n \n \n srand(time(NULL));\n randomLineNumber = rand() % count;\n file = fopen(fileName, \"r\"); \n \n for (i = 0; i < randomLineNumber; i++)\n {\n\tfgets(buffer, 128, file);\n }\n \n fclose(file);\n printf(\"%d: %s\",i, buffer);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.7475961446762085,
"alphanum_fraction": 0.7475961446762085,
"avg_line_length": 15.5600004196167,
"blob_id": "92c470666f462da4e1c1eb511d211a3f3875078b",
"content_id": "49debd6886194efcf38f3efe89e269b560891b14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 416,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 25,
"path": "/lab2/Class/Makefile",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "all: testnl testfgn canalyze\n\ntest: testnl testfgn\n\t./testnl\n\t./testfgn\n\ntestnl: testnl.o namelist.o\ntestfgn: testfgn.o fgetname.o\ncanalyze: canalyze.o fgetname.o namelist.o\n\ntestnl.o: testnl.c namelist.h\nnamelist.o: namelist.c namelist.h\n\ncanalyze.o: canalyze.c fgetname.h\n\ntestfgn.o: testfgn.c fgetname.h\nfgetname.o: fgetname.c fgetname.h\n\n\n\nclean:\n\trm -f testnl testfgn *.o *~\n\ndistclean: clean\n\trm -f canalyze\n\t\n"
},
{
"alpha_fraction": 0.5080240964889526,
"alphanum_fraction": 0.5280842781066895,
"avg_line_length": 16.339130401611328,
"blob_id": "c87ea64d03faa290c8b56e21f2e7fb41c49cf32b",
"content_id": "41535ddf77887d31da50fa64c63a028c86e664ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1994,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 115,
"path": "/lab3/class/bsplit/bsplit.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include \"xsum.h\"\n\nint main(int argc, char **argv)\n{\n \n \n const char *optString = \"hb:x\";\n int hFlag = 0;\n int bFlag = 0;\n int xFlag = 0;\n int bSIZE = 0;\n extern char *optarg;\n \n int chunkSize = 1024;\n char *fileName;;\n char buffer[1024];\n int count = 0;\n int fileCounter = 1; //helps to set the files names\n char tmpbuf[15];\t\n char newName[100];\n unsigned int mychceksum = 0;\n unsigned int bigChecksum = 0;\n\n //getopt\n int opt = 0;\n while (opt != -1)\n {\n opt = getopt(argc, argv, optString);\n \n switch (opt)\n {\n\tcase 'h':\n\t hFlag = 1;\n\t break;\n\tcase 'b':\n\t bFlag = 1;\n\t bSIZE = atoi(optarg);\n\t break;\n\tcase 'x':\n\t xFlag = 1;\n\t break;\n };\n }\n \n \n if (hFlag)\n {\n printf(\"-b SIZE put at most SIZE bytes per output file\\n-x print the checksum of FILE on the standard output\\n\");\n\n return;\n }\n \n if (bFlag)\n {\n chunkSize = bSIZE;\n }\n fileName = argv[argc - 1];\n \n \n \n FILE *bigFile = fopen(fileName, \"rb\"); \n \n if (xFlag)\n {\n bigChecksum = checksum(bigFile, 0);\n printf(\"Checksum: %d\\n\", bigChecksum);\n rewind(bigFile);\n }\n \n while (1)\n {\n \n\t\t \n //read chunk from the original file\n count = fread(buffer, 1, chunkSize, bigFile); \n \n //set new name\n sprintf(tmpbuf, \"%d\", fileCounter);\n snprintf(newName, 100, \"%s%s%s\",fileName,\".\", tmpbuf);\n \n //create a new file\n FILE *smallFile = fopen(newName, \"w+\");\n \n\t \n //save 4 bytes for checksum\n fwrite(&mychceksum,4,1,smallFile);\n \n //write the chunk to the file \n fwrite(buffer,count,1, smallFile);\n \n fileCounter++;\t\t\t\n \n //calc checksum\n mychceksum = checksum(smallFile, 1); \n rewind(smallFile);\n fwrite(&mychceksum,4,1, smallFile);\n\n \n if (count != chunkSize)\n {\n\n\tbreak;\n } \n \n fclose(smallFile);\n }\n \n\n \n fclose(bigFile);\n\n \n\n}\n"
},
{
"alpha_fraction": 0.5316455960273743,
"alphanum_fraction": 0.5358649492263794,
"avg_line_length": 10.850000381469727,
"blob_id": "d91b5507e858c0439f740182c1ee228ed8e10ef7",
"content_id": "a8df7453bc8b75c9caeee6e95d56ea027454f869",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 20,
"path": "/lab7/class/bsplit/Makefile",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "%.o: %.c\n\t$(CC) $(CFLAGS) -c -o $@ $<\n\n%.man: %.1\n\tnroff -man $< > $@\n\nall: bsplit\n\nbsplit: bsplit.o xsum.o\n\t$(CC) $(LDFLAGS) -o $@ $^\n\n\nbsplit.o: bsplit.c xsum.h\nxsum.o: xsum.c xsum.h\n\nclean:\n\trm -f *.o \n\ndistclean: clean\n\trm -f bsplit\n"
},
{
"alpha_fraction": 0.6177214980125427,
"alphanum_fraction": 0.6354430317878723,
"avg_line_length": 14.15384578704834,
"blob_id": "7be53a187569c527fd584b5cd517ad463c4044e1",
"content_id": "98118ddb6a4987bc7a702ac208b034ef47d596ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 395,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 26,
"path": "/lab7/class/bkup.sh",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": " #!/bin/sh\n\nfileName=$1\nnumberOfCopies=$2\n\n\nif [ ! -r $fileName ]; then \n echo \"$fileName doesn't exist.\"\n exit 1\nfi\n\nwhile [ \"$numberOfCopies\" != 0 ]; do\n\n if [ -r $fileName.$numberOfCopies ]; then \n\techo $numberOfCopies\n\tcp $fileName.$numberOfCopies $fileName.$(( $numberOfCopies + 1 ))\n fi\n \n numberOfCopies=$(( $numberOfCopies - 1 ))\ndone\n\ncp $fileName $fileName.1\n \n \n\n echo \"END\"\n"
},
{
"alpha_fraction": 0.6042972207069397,
"alphanum_fraction": 0.6087735295295715,
"avg_line_length": 18.20689582824707,
"blob_id": "f019ee28263764be3e9ec3b65ac87905a7a423b4",
"content_id": "cc12e316cac62b3e468a7de4a578b1582722c2ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 58,
"path": "/lab9/hangman.py",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "from random import choice\n\nwordsList = [\"hello\",\"tolpin\"]\n\ndef myPrint(word, lettersFound):\n s = \"\"\n for ch in word:\n if (ch in lettersFound):\n s+= ch\n else:\n s+= \"_\"\n s+= \" \"\n print s\n \n \n\ndef startNewGame():\n\n numberOfGuesses = 0\n maxGuesses = 5\n lettersFound = []\n lettersGuessed = []\n winFlag = False\n word = choice(wordsList)\n while 1:\n myPrint(word, lettersFound)\n print \"Guesses left:\", numberOfGuesses\n print \"Letters Guessed:\", list(set(lettersGuessed))\n letter = raw_input(\"guess a letter...\")\n if letter not in lettersGuessed:\n lettersGuessed.append(letter)\n if letter not in word:\n\tnumberOfGuesses += 1\n if letter in word:\n\tlettersFound.append(letter)\n \n if len(set(lettersFound)) == len(set(word)):\n\twinFlag = True\n\tbreak\n elif (numberOfGuesses > maxGuesses):\n\tbreak\n \n if (winFlag):\n print \"You Win!\"\n else:\n print \"Game Over... the word was \" + word\n c = raw_input(\"Another game...? (y/n)\")\n if c == \"y\":\n return True\n else:\n return False\n \n\n \nwhile 1:\n res = startNewGame()\n if res == False:\n break;\n\n\n\n"
},
{
"alpha_fraction": 0.4649006724357605,
"alphanum_fraction": 0.4834437072277069,
"avg_line_length": 14.079999923706055,
"blob_id": "512e9f26187666b7842f1c9746302432cd990f4f",
"content_id": "379ac7717bc299536023ca462217fa97e33129e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 755,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 50,
"path": "/lab3/practice/xsum.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": " \n#include <stdio.h>\n\nint main(int argc, char ** argv)\n{\n \n /*int c;\n int hflag = 0;\n int xflag = 0;\n while ((c = getopt (argc, argv, \"hx\")) != -1)\n {\n switch (c)\n {\n case 'h':\n\thflag = 1;\n\tprintf(\"H\\n\");\n\tbreak;\n case 'x':\n\tprintf(\"X\\n\");\n\txflag = 1;\n\tbreak;\n default:\n\tprintf(\"Invalid Argument.\\n\");\n\tbreak;\n }\n }*/\n \n\n int i = 0;\n FILE *file;\n unsigned int checksum = 0;\n unsigned int word;\n char *fileName = argv[1];\n int count;\n file = fopen(fileName, \"r\");\n while (1)\n {\n word = 0;\n count = fread(&word, 1, 4, file);\n checksum ^= word;\n if (count != 4)\n {\n\tprintf(\"Count %d\\n\",count);\n\tbreak;\n }\n \n }\n \n printf(\"CHECKSUM: %d\\n\", checksum);\n return 0; \n}"
},
{
"alpha_fraction": 0.535611629486084,
"alphanum_fraction": 0.553497314453125,
"avg_line_length": 17.754491806030273,
"blob_id": "05851e67c03d38e5d10bc716a3163335396f23d8",
"content_id": "abc6e5bd5d489b9ae11f3257e1f39698ed44825d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3131,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 167,
"path": "/lab7/class/bsplit/bsplit.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include \"xsum.h\"\n#include <sys/types.h>\n\nint main(int argc, char **argv)\n{\n \n \n const char *optString = \"hb:x\";\n int hFlag = 0;\n int bFlag = 0;\n int xFlag = 0;\n int bSIZE = 0;\n extern char *optarg;\n \n int chunkSize = 1024;\n char *fileName;;\n char buffer[1024];\n int count = 0;\n int fileCounter = 1; //helps to set the files names\n char tmpbuf[15];\t\n char newName[100];\n unsigned int mychceksum = 0;\n unsigned int bigChecksum = 0;\n int location = 0;\n int numberOfChunks = 0;\n int fileSize = 0;\n\n //getopt\n int opt = 0;\n while (opt != -1)\n {\n opt = getopt(argc, argv, optString);\n \n switch (opt)\n {\n\tcase 'h':\n\t hFlag = 1;\n\t break;\n\tcase 'b':\n\t bFlag = 1;\n\t bSIZE = atoi(optarg);\n\t break;\n\tcase 'x':\n\t xFlag = 1;\n\t break;\n };\n }\n \n \n if (hFlag)\n {\n printf(\"-b SIZE put at most SIZE bytes per output file\\n-x print the checksum of FILE on the standard output\\n\");\n\n return;\n }\n \n if (bFlag)\n {\n chunkSize = bSIZE;\n }\n fileName = argv[argc - 1];\n \n \n \n FILE *bigFile = fopen(fileName, \"rb\"); \n \n if (xFlag)\n {\n bigChecksum = checksum(bigFile, 0);\n printf(\"Checksum: %d\\n\", bigChecksum);\n rewind(bigFile);\n }\n \n \n //calc number of chunks\n fseek(bigFile, 0L, SEEK_END);\n fileSize = ftell(bigFile);\n numberOfChunks = (fileSize + chunkSize - 1) / chunkSize;\n printf(\"number Of Chunks: %d\\n\",numberOfChunks);\n \n int i = 0;\n pid_t childPID;\n int childStatus;\n pid_t tpid;\n \n for (i = 0; i < numberOfChunks; i++, location += chunkSize)\n {\n\n\n\n childPID = fork();\n \n if (childPID >= 0)\n {\n\t\n\tif (childPID == 0)\n\t{\n\t \n\t printf(\"This is done by the child proccess\\n\");\n\t //set new name\n\t sprintf(tmpbuf, \"%d\", fileCounter);\n\t snprintf(newName, 100, \"%s%s%s\",fileName,\".\", tmpbuf);\n \n\t writeChunk(bigFile, newName, location, chunkSize);\t\t\t\n\t \n\t printf(\"\\nchild end\\n\");\n\t exit(1);\n\n\t}\n\telse\n\t{\t \n\t //printf(\"This is done by the parent proccess\\n\");\n\t}\n }\n else\n {\n\tprintf(\"Fork has fail\\n\");\n }\n \n fileCounter++; \n \n }\n \n do \n {\n tpid = wait(&childStatus);\n } while (tpid > 0);\n \n printf(\"End\\n\");\n \n \n fclose(bigFile);\n\n \n}\n\nint writeChunk(FILE *originalFile, char *newFileName , int location, int size) \n{\n \n printf(\"writeChunk - location: %d size: %d name: %s\\n\", location, size, newFileName);\n unsigned int mychceksum = 0;\n char buffer[1024];\n int count = 0;\n \n \n fseek(originalFile, location, SEEK_SET);\n\t\n //read chunk from the original file\n count = fread(buffer, 1, size, originalFile); \n \n //create a new file\n FILE *smallFile = fopen(newFileName, \"w+\");\n \n //save 4 bytes for checksum\n fwrite(&mychceksum,4,1,smallFile);\n \n //write the chunk to the file \n fwrite(buffer,count,1, smallFile);\n \n //calc checksum\n mychceksum = checksum(smallFile, 1); \n rewind(smallFile);\n fwrite(&mychceksum,4,1, smallFile);\n \n fclose(smallFile);\n}"
},
{
"alpha_fraction": 0.5527272820472717,
"alphanum_fraction": 0.5690909028053284,
"avg_line_length": 17.233333587646484,
"blob_id": "a76c55deefe82ef836141e77726277e208bcab3d",
"content_id": "66f85b4b283c5946a3c2ca1ada29a40565881279",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 30,
"path": "/lab2/Class/testnl.c",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <stdio.h>\n#include <string.h>\n#include <assert.h>\n#include \"namelist.h\"\n\nint main(int argc, char **argv) {\n\tnamelist nl = make_namelist();\n\tint checks = 0;\n\tint i;\n\n\tadd_name(nl, \"foo\");\n\tadd_name(nl, \"bar\");\n\tadd_name(nl, \"foo\");\n\n\tfor(i = 0; i!=nl->size; ++i) {\n\t\tif(!strcmp(\"foo\", nl->names[i].name)) {\n\t\t\tassert(nl->names[i].count==2);\n\t\t\tchecks|=1;\n\t\t} else if(!strcmp(\"bar\", nl->names[i].name)) {\n\t\t\tassert(nl->names[i].count==1);\n\t\t\tchecks|=2;\n\t\t} else {\n\t\t\tchecks|=4;\n\t\t}\n\t}\n\tassert(checks==3);\n\t\n\treturn 0;\n}\n\t\t\n"
},
{
"alpha_fraction": 0.6052631735801697,
"alphanum_fraction": 0.6578947305679321,
"avg_line_length": 35,
"blob_id": "14eb1ea507a04e585d741e0c9cdddecd01eeeb73",
"content_id": "5d44ef361c8a100e87e0b58e2438d658c2542491",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 1,
"path": "/lab5/class/part1/cmpstr.h",
"repo_name": "razlib/espl",
"src_encoding": "UTF-8",
"text": "\n int cmpstr(char *str1, char *str2);\n"
}
] | 19 |
renatoRPv13/si2 | https://github.com/renatoRPv13/si2 | d0b10a21f09760b1054ad01c5e3eb67797ef771b | 409fcea2dc92711590f6edb89b71530ccd38208c | e999a808c4f0300515c2f4d278eba5d9f4c1fd73 | refs/heads/master | 2023-01-08T11:45:04.356806 | 2020-11-11T23:05:34 | 2020-11-11T23:05:34 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6370370388031006,
"alphanum_fraction": 0.6814814805984497,
"avg_line_length": 15.125,
"blob_id": "838ccdb9a9472620049420a5088df73911e537d7",
"content_id": "a78bfa6d610dd26ffbb03bf8dadbb05957881b80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 8,
"path": "/lecture/httpclient-lib.py",
"repo_name": "renatoRPv13/si2",
"src_encoding": "UTF-8",
"text": "import requests\r\n\r\nurl = \"http://www.google.com.br:80\"\r\n\r\nr1 = requests.get(url)\r\n\r\nprint(r1.status_code, r1.reason)\r\nprint(r1.headers)"
},
{
"alpha_fraction": 0.7913669347763062,
"alphanum_fraction": 0.798561155796051,
"avg_line_length": 45.33333206176758,
"blob_id": "a7f5b51bbbce5789beeeb3b2f30807c69598150e",
"content_id": "45d108270b0764a8da72538921df85483d7c9549",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 3,
"path": "/README.md",
"repo_name": "renatoRPv13/si2",
"src_encoding": "UTF-8",
"text": "# About this repo\n\nThis repo stores material used on the course Information Systems on Internet (Sistemas de Informação na Internet - SI²)\n"
},
{
"alpha_fraction": 0.731203019618988,
"alphanum_fraction": 0.7706766724586487,
"avg_line_length": 27,
"blob_id": "4fbf19fb6ec3c46e4f71c07df3f23698b4fa0031",
"content_id": "c46dc092ed59aeced03ee2a07ee06f6d8734c39a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 532,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 19,
"path": "/lecture/TCPClient.py",
"repo_name": "renatoRPv13/si2",
"src_encoding": "UTF-8",
"text": "from socket import socket, AF_INET, SOCK_STREAM\n\n#serverName = '127.0.0.1'\nserverName = '127.0.0.1'\nserverPort = 12000\nclientSocket = socket(AF_INET, SOCK_STREAM)\n\n#Conecta ao servidor\nclientSocket.connect((serverName,serverPort))\n\n#Recebe mensagem do usuario e envia ao servidor\nmessage = input('Digite uma frase: ')\nclientSocket.send(message.encode('ascii'))\n\n#Aguarda mensagem de retorno e a imprime\nmodifiedMessage, addr = clientSocket.recvfrom(2048)\nprint(\"Retorno do Servidor:\",modifiedMessage.decode())\n\nclientSocket.close()\n"
},
{
"alpha_fraction": 0.5630785226821899,
"alphanum_fraction": 0.5725998282432556,
"avg_line_length": 31.324787139892578,
"blob_id": "3d5b4973e021dd792303747351726c81ade678e5",
"content_id": "39c4a7d220330ad65219e57bf244b98d08d98b7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3781,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 117,
"path": "/practices/05-mqtt/fire.py",
"repo_name": "renatoRPv13/si2",
"src_encoding": "UTF-8",
"text": "import threading\nimport random as rnd\nimport paho.mqtt.client as mqtt\nimport sys\n\nclass Room():\n\n client = None\n name = None\n fire = False\n temp = None\n smoke = None\n sprinkler = False\n\n thF = None #Thread for controlling Fire\n thT = None #Thread for controlling Temperature Sensor\n thS = None #Thread for controlling Smoke Sensor\n\n def __init__(self, name, hostname, port, username, password):\n self.name = name\n\n t = rnd.normalvariate(5,2)\n self.thF = threading.Timer(t,self.setFire)\n self.thF.start()\n \n t = rnd.normalvariate(5,2)\n self.thT = threading.Timer(t,self.tempSensor)\n self.thT.start()\n\n t = rnd.normalvariate(5,2)\n self.thS = threading.Timer(t,self.smokeSensor)\n self.thS.start()\n\n self.client = mqtt.Client(client_id=self.name)\n self.client.username_pw_set(username, password)\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n self.client.connect(hostname, port, 60)\n\n self.client.loop_start()\n\n def cancel(self):\n self.thF.cancel()\n self.thT.cancel()\n self.thS.cancel()\n self.client.loop_stop()\n self.client.disconnect()\n\n def on_connect(self, client, userdata, flags, rc):\n client.subscribe(\"firealarm/\"+self.name+\"/sprinkler\")\n\n def on_message(self, client, userdata, msg):\n if (msg.topic == \"firealarm/\"+self.name+\"/sprinkler\"):\n if (msg.payload.decode(\"ascii\") == \"on\"):\n self.setSprinkler(True)\n elif (msg.payload.decode(\"ascii\") == \"off\"):\n self.setSprinkler(False)\n\n def setFire(self):\n p = rnd.random()\n if ((not self.fire) and (not self.sprinkler) and (p < 0.10)):\n self.fire = True\n print(self.name+\" is on Fire\")\n elif (self.fire) and (self.sprinkler) and (p < 0.4):\n self.fire = False\n print(self.name+\" fire wents out\")\n t = rnd.normalvariate(5,2)\n self.thF = threading.Timer(t,self.setFire)\n self.thF.start()\n\n def setSprinkler(self, status):\n self.sprinkler = status\n\n def tempSensor(self):\n if (not self.fire):\n self.temp = rnd.normalvariate(25,1)\n else:\n self.temp = rnd.normalvariate(57,4)\n self.client.publish(\"firealarm/\"+self.name+\"/temp\",str(self.temp))\n t = rnd.normalvariate(5,2)\n self.thT = threading.Timer(t,self.tempSensor)\n self.thT.start()\n\n def smokeSensor(self):\n if (not self.fire):\n self.smoke = False\n else:\n self.smoke = True\n self.client.publish(\"firealarm/\"+self.name+\"/smoke\",str(self.smoke))\n t = rnd.normalvariate(5,2)\n self.thS = threading.Timer(t,self.smokeSensor)\n self.thS.start()\n\nif __name__ == \"__main__\":\n if (len(sys.argv) == 6):\n N = int(sys.argv[1])\n if (N < 1):\n print(\"Invalid number of rooms\")\n exit(0)\n R = [None]*N\n print(\"Starting rooms...\")\n for i in range(0,N):\n R[i] = Room(name=\"Room\"+str(i+1),hostname=sys.argv[2],port=int(sys.argv[3]),username=sys.argv[4],password=sys.argv[5])\n print(\"Rooms are running, press any key to stop simulation\")\n a = input()\n print(\"Stopping simulation...\")\n for r in R:\n r.cancel()\n print(\"Done\")\n else:\n print(\"Usage: python3 fire.py <N> <host> <port> <usr> <pswd>\")\n print(\"N: the number of rooms to simulate\")\n print(\"host: hostname of the MQTT broker\")\n print(\"port: TCP port of the MQTT broker, don't use TLS/SSL port\")\n print(\"usr: MQTT username\")\n print(\"pswd: MQTT password\")"
},
{
"alpha_fraction": 0.7042889595031738,
"alphanum_fraction": 0.7200902700424194,
"avg_line_length": 25.117647171020508,
"blob_id": "5822eb41a1cf56bb9b563bc4e63709dbb8c20629",
"content_id": "5ec9e20d964b53e8e43cadc7d1c45ba85bb5c06d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 443,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 17,
"path": "/practices/05-mqtt/sub2.py",
"repo_name": "renatoRPv13/si2",
"src_encoding": "UTF-8",
"text": "#!pip install paho-mqtt\nimport paho.mqtt.client as mqtt\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(\"casa1/sala/temp\")\n\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n\nclient = mqtt.Client(client_id=\"test\")\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"mqtt.eclipse.org\", 1883, 60)\n\nclient.loop_forever()"
},
{
"alpha_fraction": 0.7196261882781982,
"alphanum_fraction": 0.7570093274116516,
"avg_line_length": 35,
"blob_id": "88051086197706944a2553123b35245fb05c409e",
"content_id": "9cad9ddb4e549ad523eea8fe297d098653443b95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 3,
"path": "/practices/05-mqtt/pub.py",
"repo_name": "renatoRPv13/si2",
"src_encoding": "UTF-8",
"text": "import paho.mqtt.publish as publish\n\npublish.single(\"casa1/sala/temp\", \"25.4\", hostname=\"mqtt.eclipse.org\")"
},
{
"alpha_fraction": 0.596875011920929,
"alphanum_fraction": 0.621874988079071,
"avg_line_length": 16.823530197143555,
"blob_id": "b794acf4f3c8f879a0cd068126f924323fb2de56",
"content_id": "76e9a45ea63619f15d01b7008306150ac351582d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 320,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 17,
"path": "/lecture/httpclient.py",
"repo_name": "renatoRPv13/si2",
"src_encoding": "UTF-8",
"text": "import socket\r\n\r\nsite = \"www.google.com.br\"\r\n\r\nmysock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\nmysock.connect((site,80))\r\n\r\nmsg = \"GETE / HTTP/1.0\\n\"\r\nmsg = msg + \"Host: \"+site+\"\\n\"\r\nmsg = msg + \"\\n\"\r\n\r\nmysock.send(msg.encode(\"ascii\"))\r\n\r\ndata = mysock.recv(1000)\r\nprint(data.decode())\r\n \r\nmysock.close()\r\n"
}
] | 7 |
edisondev/citycompare | https://github.com/edisondev/citycompare | 7fb22555627593578af6c85af28f497b25d756ca | 12fd0b47213f9899bf0f5a50b6beb959a471052d | cf5434dac19ba99a20c6189c9127086fdc1cf9c2 | refs/heads/master | 2021-05-07T04:51:58.173164 | 2018-01-06T06:01:55 | 2018-01-06T06:01:55 | 111,494,615 | 0 | 0 | null | 2017-11-21T03:34:00 | 2017-11-19T02:59:39 | 2017-11-19T19:07:33 | null | [
{
"alpha_fraction": 0.570218026638031,
"alphanum_fraction": 0.5865898728370667,
"avg_line_length": 38.596092224121094,
"blob_id": "889bf3a4ca1c537e9c365f88301ce0ee0b6b2c6b",
"content_id": "3f2ece1c0d00439eac950808334ddb8d5197c7b6",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12155,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 307,
"path": "/citycompare/classify_text.py",
"repo_name": "edisondev/citycompare",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 26 18:18:33 2017\n\n@author: Nick\n\"\"\"\nimport six.moves.cPickle as pickle\nfrom nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom gensim import corpora, models\nimport gensim\nimport time\nimport pandas as pd\nimport numpy as np\nfrom scipy import spatial\nimport os\n\nfrom matplotlib import pyplot as plt\n\n\nNUM_TOPICS=200\nPATH='C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare'\n\ndef initiate_pipline():\n tokenizer = RegexpTokenizer(r'\\w+') #Words Separator\n en_stop = get_stop_words('en') # create English stop words list\n p_stemmer = PorterStemmer() # Create p_stemmer of class PorterStemmer\n \n pipeline = {'tokenizer':tokenizer,\n 'stop_words': en_stop,\n 'stemmer': p_stemmer}\n return pipeline\n \ndef return_tokens(pipeline, text):\n # clean and tokenize document string\n #TODO: remove numbers from string\n #TODO: remove unicode characters from string\n \n raw = text.lower()\n tokens = pipeline['tokenizer'].tokenize(raw)\n # remove stop words from tokens\n stopped_tokens = [i for i in tokens if not i in pipeline['stop_words']]\n # stem tokens\n #stemmed_tokens = [pipeline['stemmer'].stem(i) for i in stopped_tokens]\n #return stemmed_tokens\n return stopped_tokens\n\ndef generate_word2vec_corpus():\n filename=r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\glove6B\\glove.6B.200d.txt'\n model=gensim.models.KeyedVectors.load_word2vec_format(filename, binary=False)\n return model\n\ndef get_word2vec_vector_txt(model,pipeline, text):\n tokens=return_tokens(pipeline, text)\n doc_vector=np.zeros([1,model.vector_size], dtype=np.float32)\n for kToken in tokens:\n try:\n doc_vector=doc_vector+model[kToken]\n except:\n k=1\n #do nothing\n return doc_vector\n \ndef get_word2vec_vector_tkns(model,pipeline, tokens):\n doc_vector=np.zeros([1,model.vector_size], dtype=np.float32)\n for kToken in tokens:\n try:\n doc_vector=doc_vector+model[kToken]\n except:\n #do nothing\n return doc_vector\n \ndef compare_2_vectors(model, pipeline, text1, text2):\n tokens1=return_tokens(pipeline, text1)\n tokens2=return_tokens(pipeline, text2)\n if (len(tokens1)<10) | (len(tokens2)<10):\n return 0.0\n \n if len(tokens1)>len(tokens2):\n tokens1=tokens1[0:len(tokens2)]\n else:\n tokens2=tokens2[0:len(tokens1)]\n \n dv1=get_word2vec_vector_tkns(model,pipeline, tokens1)\n dv2=get_word2vec_vector_tkns(model,pipeline, tokens2)\n return (1-spatial.distance.cosine(dv1, dv2))\n \n#compare_2_vectors(model, \n# pipeline, \n# cityDataset['description'][0],\n# cityDataset['description'][7])\n\n\n#Generate LDA corpus\ndef generate_LDA_corpus():\n print(\"Generate Corpus\")\n pipeline=initiate_pipline() \n trainDocuments=[]\n t=time.time()\n \n #Load the random wikipedia articles:\n# wiki_list=pickle.load(open(r\"C:\\Users\\h192456\\Desktop\\backup for me\\wiki_listv2.pkl\",'rb')) \n# # loop through document list \n# for i in wiki_list: \n# # add tokens to list\n# trainDocuments.append(return_tokens(pipeline, i ))\n# \n# del wiki_list\n \n #Load city Database descriptions\n path=r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\GlobalDatabase.csv'\n cityDataset=pd.read_csv(path, \n sep=';',\n encoding='utf-8')\n for i in cityDataset['description']: \n # add tokens to list\n trainDocuments.append(return_tokens(pipeline, i ))\n \n #Create an LDA topic model\n pipeline['dictionary'] = corpora.Dictionary(trainDocuments)\n pipeline['corpus'] = [pipeline['dictionary'].doc2bow(text) for text in trainDocuments] \n ldamodel = gensim.models.ldamodel.LdaModel(pipeline['corpus'], \n num_topics=NUM_TOPICS, \n id2word = pipeline['dictionary'],\n update_every=1,\n chunksize=1000,\n passes=5,\n random_state=1234)\n print(time.time()-t)\n print(len(trainDocuments))\n pickle.dump(pipeline,\n open(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\pipeline2.pkl', 'wb'), \n protocol=2)\n return ldamodel,pipeline\n\ndef classify_text(lda,pipeline,newDocument):\n #Turn string into individual tokens:\n stemmedToken=return_tokens(pipeline,newDocument)\n \n #Turn tokens to bag-of-words:\n bagOfWords=pipeline['dictionary'].doc2bow(stemmedToken)\n topicList = lda[bagOfWords]\n return(topicList)\n \n#Return maximum index and maximum values\ndef find_max(sparseArray):\n maxIndex=0\n for index in range(1,len(sparseArray)):\n if sparseArray[index][1]>sparseArray[maxIndex][1]:\n maxIndex=index\n return sparseArray[maxIndex][0], sparseArray[maxIndex][1]\n\n#returns numpy array from sparse array\ndef sparse2array(sparseArray):\n outArray=np.zeros([NUM_TOPICS])\n for index in range(0, len(sparseArray)):\n outArray[sparseArray[index][0]]=sparseArray[index][1]\n return outArray\n\ndef parseTopicMatrix(topicMatrix, cityDataset, threshold=250):\n column_names=['City1',\n 'url1',\n 'index2',\n 'Description1',\n 'Description2',\n 'City2',\n 'index2',\n 'url2']\n df = pd.DataFrame(columns=column_names)\n for kMainTopic in range(0,topicMatrix.shape[0]):\n for kSideTopic in range(0,topicMatrix.shape[1]):\n if topicMatrix[kMainTopic][kSideTopic]>threshold:\n if cityDataset['city'][kMainTopic] !=cityDataset['city'][kSideTopic]:\n print(topicMatrix[kMainTopic][kSideTopic],kMainTopic,kSideTopic)\n data=pd.DataFrame([[cityDataset['city'][kMainTopic],\n cityDataset['download_link'][kMainTopic],\n kMainTopic,\n cityDataset['description'][kMainTopic].encode('ascii','ignore').decode('unicode_escape'),\n cityDataset['description'][kSideTopic].encode('ascii','ignore').decode('unicode_escape'),\n cityDataset['city'][kSideTopic],\n kSideTopic,\n cityDataset['download_link'][kSideTopic]]],\n columns=column_names)\n #print(data)\n df=df.append(data,ignore_index=True)\n \n df.to_html(os.path.join(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare',\n r'matchedDatasets_200.html'))\n return df\n\n\ndef print_specific_lines(k,num=0):\n print(k['City1'][num])\n print(k['Description1'][num])\n print(\" \")\n print(k['City2'][num])\n print(k['Description2'][num])\n \n \ndef load_pickle_file():\n lda=pickle.load(open(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\lda2.pkl','rb'))\n pipeline=pickle.load(open(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\pipeline.pkl','rb'))\n topicMatrix=np.genfromtxt(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\topicMatrix.csv', \n delimiter=',',\n dtype=np.uint8)\n path=r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\GlobalDatabase.csv'\n cityDataset=pd.read_csv(path, \n sep=';',\n encoding='utf-8')\n \n return lda, pipeline , topicMatrix , cityDataset \n\n# example to test the city_data\nif __name__=='__main__':\n #Load files\n pd.set_option('display.max_colwidth', -1) \n lda=pickle.load(open(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\lda2.pkl','rb'))\n pipeline=pickle.load(open(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\pipeline.pkl','rb'))\n \n path=r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\GlobalDatabase.csv'\n cityDataset=pd.read_csv(path, \n sep=';',\n encoding='utf-8')\n \n #LDA\n #pp=initiate_pipline()\n topicMatrix=np.zeros([len(cityDataset),len(cityDataset)],\n dtype=np.uint8)\n \n t=time.time()\n for kMainIndex in range(0,len(cityDataset)):\n mainTopics=classify_text(lda, \n pipeline,\n cityDataset['description'][kMainIndex])\n mainTopicsVector=sparse2array(mainTopics)\n print(kMainIndex, time.time()-t)\n np.savetxt(r\"topicMatrix.csv\",\n topicMatrix, \n fmt='%3d',\n delimiter=',')\n for kSideIndex in range (0,len(cityDataset)):\n if kSideIndex == kMainIndex:\n continue\n sideTopics=classify_text(lda, \n pipeline,\n cityDataset['description'][kSideIndex])\n sideTopicsVector=sparse2array(sideTopics)\n \n #calcualte cosine similarity\n spatial.distance.cosine(mainTopicsVector, \n sideTopicsVector)\n #cosSim=np.dot(mainTopicsVector,sideTopicsVector)/(np.linalg.norm(mainTopicsVector)*np.linalg.norm(sideTopicsVector))\n cosSim=1-spatial.distance.cosine(mainTopicsVector, sideTopicsVector)\n topicMatrix[kMainIndex][kSideIndex]=np.uint8(255*cosSim)\n \n print(time.time()-t)\n \n plt.imshow(topicMatrix, interpolation='nearest')\n plt.show()\n \n \n #Word2Vec\n #Word2Vec\n #model=generate_word2vec_corpus()\n t=time.time()\n for kMainIndex in range(0,len(cityDataset)):\n if (kMainIndex%50)==0:\n print(kMainIndex, time.time()-t)\n# np.savetxt(r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\topicMatrix_wv_200.csv',\n# topicMatrix, \n# fmt='%3d',\n# delimiter=',')\n for kSideIndex in range (0,len(cityDataset)):\n if kSideIndex == kMainIndex:\n continue\n cosSim=compare_2_vectors(model,\n pipeline, \n cityDataset['description'][kMainIndex], \n cityDataset['description'][kSideIndex])\n #calcualte cosine similarity\n topicMatrix[kMainIndex][kSideIndex]=np.uint8(255*cosSim)\n# if cosSim>0.97:\n# #print(kMainIndex)\n# print(cityDataset['description'][kMainIndex])\n# print(kSideIndex)\n# print(cityDataset['description'][kSideIndex])\n# print(cosSim)\n# print(\"\\n\")\n \n print(time.time()-t)\n \n plt.imshow(topicMatrix, interpolation='nearest')\n plt.show()\n \n \n #Laod the word2vec file\n path=r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\citycompare\\citycompare\\topicMatrix_wv_200.csv'\n topicMatrix=np.loadtxt(path, \n dtype=np.uint8,\n delimiter=',')\n df_small=parseTopicMatrix(topicMatrix,cityDataset,242)\n \n #Get topic of 2 files\n print('Test')\n #Dev Notes:\n #Need to take the same number of words from both datasets (truncate to shortest one?)"
},
{
"alpha_fraction": 0.4779350459575653,
"alphanum_fraction": 0.49097973108291626,
"avg_line_length": 37.22340393066406,
"blob_id": "0249c51354554fede2d29847d1467a57b26ea334",
"content_id": "ca8e042f7f98f6a7349b2bfcb7bc9d346c10a793",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3603,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 94,
"path": "/citycompare/mine_database.py",
"repo_name": "edisondev/citycompare",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 23 10:02:09 2017\n\n@author: Nick\n\"\"\"\nimport csv\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\nimport dateutil.parser as dparser\n\n\n#Laods the data\ndef load_link(linkToPage, databaseType):\n print(databaseType)\n\n#Use the following to find title without |Open Calagary Data\ndef longestSubstringFinder(string1, string2):\n answer = \"\"\n len1, len2 = len(string1), len(string2)\n for i in range(len1):\n match = \"\"\n for j in range(len2):\n if (i + j < len1 and string1[i + j] == string2[j]):\n match += string2[j]\n else:\n if (len(match) > len(answer)): answer = match\n match = \"\"\n return answer\n\n \n# example to test the city_data\nif __name__=='__main__':\n print('Test')\n pickleFilePath=r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\GlobalDatabase.pkl'\n csvOutFilePath=r'C:\\Users\\Nick\\Dropbox\\Work\\Data Science\\14 - City Compare\\GlobalDatabase.csv'\n csvFilePath='C:\\Users\\Nick\\Desktop\\city_database.csv'\n column_names=['database_title',\n 'city',\n 'views',\n 'downloads',\n 'last_updated',\n 'download_link',\n 'description']\n df = pd.DataFrame(columns=column_names)\n idx=0\n with open(csvFilePath) as csvfile:\n fileReader = csv.reader(csvfile, delimiter=';', quotechar='|')\n for row in fileReader:\n print ', '.join(row) \n try: \n req = urllib2.Request(row[1], headers={'User-Agent' : \"Magic Browser\"}) \n con = urllib2.urlopen( req )\n soup=BeautifulSoup(con.read(), \"html.parser\")\n \n #Get Parameters\n description=soup.find('meta', {'name':'description'})['content']\n description=description.encode('utf-8').strip()\n viewCount=re.findall('\"viewCount\":\\d+',str(soup))\n viewCount=int(filter(str.isdigit,viewCount[0]))\n title=str(soup.find('title').text)\n lastUpdated=str(re.findall('[Uu]pdatedAt\":.+?(?=\")', str(soup))[0])\n lastUpdated=dparser.parse(lastUpdated,fuzzy=True)\n downloadCount=re.findall('\"downloadCount\":\\d+',str(soup))\n downloadCount=int(filter(str.isdigit,downloadCount[0]))\n \n #Add to dataframe\n data=pd.DataFrame([[longestSubstringFinder(title,description),\n row[0],\n viewCount,\n downloadCount,\n pd.to_datetime(lastUpdated),\n row[1],\n description.replace('\\n', ' ').replace('\\r', '') ]],\n columns=column_names)\n except:\n print(\"Could not read\")\n \n if len(df)==0:\n df=data\n else:\n df=df.append(data, ignore_index=True)\n idx=idx+1\n \n #if idx>5:\n # break\n \n if idx % 10==0:\n df.to_pickle(pickleFilePath) # where to save it, usually as a .pkl\n df.to_csv(csvOutFilePath,sep=';')\n \n df.to_pickle(pickleFilePath) # where to save it, usually as a .pkl\n\n \n "
},
{
"alpha_fraction": 0.5461254715919495,
"alphanum_fraction": 0.5940959453582764,
"avg_line_length": 14.411765098571777,
"blob_id": "82fa9217a6ab4e76f97195eac49bca806554c0d3",
"content_id": "c2c7d1e3f6b11e2cd1cf4ac57f4b61beffba3d76",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 271,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 17,
"path": "/citycompare/get_db_entry.py",
"repo_name": "edisondev/citycompare",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 23 10:02:09 2017\n\n@author: Nick\n\"\"\"\nimport csv\n\n\n#Laods the data\ndef load_link(linkToPage, databaseType):\n print(databaseType)\n \n \n# example to test the city_data\nif __name__=='__main__':\n print('Test')\n \n "
},
{
"alpha_fraction": 0.48571428656578064,
"alphanum_fraction": 0.6095238327980042,
"avg_line_length": 12.125,
"blob_id": "1b65d2231cd6abd0297683d3a58a5beb79c4d6fb",
"content_id": "ddb0ec1e6e0797ffc6a7a99870c4fffc89d8be19",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 105,
"license_type": "permissive",
"max_line_length": 35,
"num_lines": 8,
"path": "/citycompare/03_generate_LDA_corpus.py",
"repo_name": "edisondev/citycompare",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 26 18:18:33 2017\n\n@author: Nick\n\"\"\"\n\n#Generate LDA corpus\n"
},
{
"alpha_fraction": 0.751893937587738,
"alphanum_fraction": 0.7935606241226196,
"avg_line_length": 36.71428680419922,
"blob_id": "2b99e36128515b4a5c5c7edf6c2733a9115f1186",
"content_id": "9e92114d6c6759c84bf1c49b16dd9ef3b64ca5d0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 528,
"license_type": "permissive",
"max_line_length": 329,
"num_lines": 14,
"path": "/README.md",
"repo_name": "edisondev/citycompare",
"src_encoding": "UTF-8",
"text": "# citycompare\ncity compare compares cities\n\n# to run local\n- pip install -r requirements.txt\n- python run.py\n- browse to localhost:5000\n\n# to deploy\npush to master\n\n#Development progress:\n20171203 \nAttempted to train 100 topic LDA on 20,000+ wikiepdia articles and then apply it to the city database descriptions. After classification the accuracy was not the greatest even with 98% accuracy requirement. This was most likley because the topics are too broad. Next step is to attempt to classify on the city descriptions only.\n"
},
{
"alpha_fraction": 0.5680863857269287,
"alphanum_fraction": 0.5848830342292786,
"avg_line_length": 31.076923370361328,
"blob_id": "5c63b4e426c089c16fc98a2c97fdeac4f9372709",
"content_id": "233ad54cfd87988aa5448490982eb7e83213b6ca",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1667,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 52,
"path": "/citycompare/mine_websites.py",
"repo_name": "edisondev/citycompare",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 20 20:48:45 2017\n\n@author: Nick\n\"\"\"\n\nfrom sodapy import Socrata\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport csv\nimport time\nimport pandas as pd\n\n\ndatasets=[\"calgary\", \"edmonton\", \"novascotia\",\"winnipeg\", \"regina\", \"sfgov\"]\next=[\"ca\",\"ca\",\"ca\",\"ca\",\"ca\",\"org\"]\n#client = Socrata(\"data.calgary.ca\", None)\n#client.get('atsy-3a4w')\ndatasets=[\"sfgov\"]\nfor idx,city in enumerate(datasets):\n iSite=25\n keep_reading =True\n #city=\"calgary\"\n #for city in datasets:\n print(city)\n while keep_reading==True: \n #main_page = 'https://data.'+city+'.ca/browse?limitTo=datasets&page='+str(iSite)\n main_page = 'https://data.'+city+'.'+ext[idx]+'/browse?limitTo=datasets&page='+str(iSite)\n req = urllib2.Request(main_page, headers={'User-Agent' : \"Magic Browser\"}) \n con = urllib2.urlopen( req )\n soup=BeautifulSoup(con.read(), 'html.parser')\n page_links=soup.find_all('meta', {'itemprop':'sameAs'})\n \n if(len(page_links==0)): #check that the \n print(\"Found end\")\n keep_reading=False\n break \n \n with open(r'C:\\Users\\Nick\\Desktop\\city_database.csv', 'ab') as csvfile:\n citywriter = csv.writer(csvfile, delimiter=' ',\n quotechar=' ', quoting=csv.QUOTE_MINIMAL) \n for i in range(0,len(page_links)):\n citywriter.writerow([city+' ; '+ str(page_links[i]['content']) ] ) \n iSite=iSite+1\n if iSite>50:\n keep_reading=False\n time.sleep(1) \n print(iSite)\n\n #page_links[0]['content']"
}
] | 6 |
leigharobinson/chap5_PythonDictionaries | https://github.com/leigharobinson/chap5_PythonDictionaries | 4c5df5e792fda24fec8c8768a570a5d4fa34b19b | aff03cd179666371b9620faf27d69cd7b0ac466a | 9dca8f4e4c9d786f25ad1a80d1ba9ebe5c9e3123 | refs/heads/master | 2022-11-17T01:17:02.671827 | 2020-07-17T03:52:01 | 2020-07-17T03:52:01 | 280,320,690 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7479861974716187,
"alphanum_fraction": 0.7479861974716187,
"avg_line_length": 45.97297286987305,
"blob_id": "5362adba1a367afd26d09e26cefb9ccba63e2e11",
"content_id": "2ff00d2e3c7ab1a166b55fcabe2a445d09335aa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1738,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 37,
"path": "/dictionary_of_words.py",
"repo_name": "leigharobinson/chap5_PythonDictionaries",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCreate a dictionary with key value pairs to\nrepresent words (key) and its definition (value)\n\"\"\"\nword_definitions = dict()\n# add key value pairs\nword_definitions['Adaptability'] = 'the quality of being able to adjust to new conditions.'\n# print(word_definitions)\n\"\"\"\nAdd several more words and their definitions\n Example: word_definitions[\"Awesome\"] = \"The feeling of students when they are learning Python\"\n\"\"\"\nword_definitions['Balance'] = 'a condition in which different elements are equal or in the correct proportions.'\nword_definitions['Conscientiousness'] = \"the quality of wishing to do one's work or duty well and thoroughly.\"\nword_definitions['Determination'] = 'firmenss of purpose; resoluteness.'\nword_definitions['Effervescent'] = 'vivacious and enthusiastic'\nword_definitions['Facilitate'] = 'make (an action or process) easy or easier.'\nword_definitions['Gratitude'] = 'the quality of being thankful; readiness to show appreciation for and to return kindness.'\nword_definitions[\n 'Hygge'] = 'a quality of coziness and comfortable conviviality that engenders a feeling of contentment or well-being (regarded as a defining characteristic of Danish culture).'\n# print(word_definitions)\n\"\"\"\nUse square bracket lookup to get the definition of two\nwords and output them to the console with `print()`\n\"\"\"\n# print(\"First Word:\", word_definitions[\"Hygge\"])\n# print(\"Second Word:\", word_definitions[\"Adaptability\"])\n\n\n\"\"\"\nLoop over the dictionary to get the following output:\n The definition of [WORD] is [DEFINITION]\n The definition of [WORD] is [DEFINITION]\n The definition of [WORD] is [DEFINITION]\n\"\"\"\nfor word, definiton in word_definitions.items():\n print(f'The definition of {word} is {definiton}', \"\\n\")\n"
}
] | 1 |
mi3nts/mintsTesting | https://github.com/mi3nts/mintsTesting | de5257eeb5c8799d6f744624e2931fd106d8ad5e | 1b440b4a574991137ee4040f07f8fa9e347ecd16 | afb5ef5410b87f61d6c1f7c296faa002d221ca9d | refs/heads/master | 2020-12-20T13:47:27.027792 | 2020-02-04T22:10:50 | 2020-02-04T22:10:50 | 236,097,123 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7220801115036011,
"alphanum_fraction": 0.7331628203392029,
"avg_line_length": 52.272727966308594,
"blob_id": "a135e8fb1dc9bda08ab8b992581a0596027406f6",
"content_id": "5bc6e32373132511d0e9873952958dfe7f95caab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1173,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 22,
"path": "/people/berkleyShofner/Light-Module-Instructable.md",
"repo_name": "mi3nts/mintsTesting",
"src_encoding": "UTF-8",
"text": "# **Getting to know the light module:**\n\n### **A. Getting the IDE Ready**\n\n1. Install Atom: https://atom.io/ and https://flight-manual.atom.io/getting-started/sections/installing-atom/\n2. Go to the settings page (Crtl+Comma) and go to the +Install tab at the bottom.\n3. Search for \"platformio\" and install the 4 packages made by platformio: \n - platformio-idle \n - platformio-idle-debugger\n - platformio-idle-terminal\n - platformio-aia\n4. If any errors appear while installing these packages, there should be a link at the top right in the error message itself that will give you step by step instrutions on fixing the errors.\n\n### **B. Creating First File**\n\n1. Open up Atom and go to the \"PlatformIO Home\" page.\n2. Click on \"+ New Project\" and name the project (making sure to not use any spaces).\n3. Click on the board option and type in \"nano\". Select the board \"Arduino Nano ATmega328 (New Bootloader)\", keep the framework as \"Arduino\", and click Finish.\n4. There should now be a Project tab to the right of Atom with your one project folder in it.\n5. To get to the c++ file, click on the \"srs\" folder under your project folder and click on \"main.cpp\".\n\n### **C. \n"
},
{
"alpha_fraction": 0.6023297905921936,
"alphanum_fraction": 0.6366740465164185,
"avg_line_length": 35.8814811706543,
"blob_id": "e9e497d6654abeaf2fde9fb1dbec10e4593cad93",
"content_id": "96e807bfbf78f8abe691062f14c9c17f6f851567",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4979,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 135,
"path": "/people/berkleyShofner/Light-Module/src/main.cpp",
"repo_name": "mi3nts/mintsTesting",
"src_encoding": "UTF-8",
"text": "#include <math.h>\n#include <Wire.h>\n#include <Adafruit_Sensor.h>\n#include \"Adafruit_TSL2591.h\"\n\nAdafruit_TSL2591 tsl = Adafruit_TSL2591(2591);\n\nconst int LIGHT_SENSOR=A1; //set the sensor to pin A0\nconst int ledPin=12; //Connect the LED Grove module to Pin12, Digital 12\nconst int thresholdvalue=10; //The treshold for which the LED should turn on. Setting it lower will make it go on at more light, higher for more darkness\nfloat Rsensor; //Resistance of sensor in K\n\nvoid displaySensorDetails(void)\n{\n sensor_t sensor;\n tsl.getSensor(&sensor);\n Serial.println(F(\"------------------------------------\"));\n Serial.print (F(\"Sensor: \")); Serial.println(sensor.name);\n Serial.print (F(\"Driver Ver: \")); Serial.println(sensor.version);\n Serial.print (F(\"Unique ID: \")); Serial.println(sensor.sensor_id);\n Serial.print (F(\"Max Value: \")); Serial.print(sensor.max_value); Serial.println(F(\" lux\"));\n Serial.print (F(\"Min Value: \")); Serial.print(sensor.min_value); Serial.println(F(\" lux\"));\n Serial.print (F(\"Resolution: \")); Serial.print(sensor.resolution, 4); Serial.println(F(\" lux\"));\n Serial.println(F(\"------------------------------------\"));\n Serial.println(F(\"\"));\n delay(500);\n}\n\nvoid configureSensor(void)\n{\n // You can change the gain on the fly, to adapt to brighter/dimmer light situations\n //tsl.setGain(TSL2591_GAIN_LOW); // 1x gain (bright light)\n tsl.setGain(TSL2591_GAIN_MED); // 25x gain\n //tsl.setGain(TSL2591_GAIN_HIGH); // 428x gain\n\n // Changing the integration time gives you a longer time over which to sense light\n // longer timelines are slower, but are good in very low light situtations!\n //tsl.setTiming(TSL2591_INTEGRATIONTIME_100MS); // shortest integration time (bright light)\n // tsl.setTiming(TSL2591_INTEGRATIONTIME_200MS);\n tsl.setTiming(TSL2591_INTEGRATIONTIME_300MS);\n // tsl.setTiming(TSL2591_INTEGRATIONTIME_400MS);\n // tsl.setTiming(TSL2591_INTEGRATIONTIME_500MS);\n // tsl.setTiming(TSL2591_INTEGRATIONTIME_600MS); // longest integration time (dim light)\n\n /* Display the gain and integration time for reference sake */\n Serial.println(F(\"------------------------------------\"));\n Serial.print (F(\"Gain: \"));\n tsl2591Gain_t gain = tsl.getGain();\n switch(gain)\n {\n case TSL2591_GAIN_LOW:\n Serial.println(F(\"1x (Low)\"));\n break;\n case TSL2591_GAIN_MED:\n Serial.println(F(\"25x (Medium)\"));\n break;\n case TSL2591_GAIN_HIGH:\n Serial.println(F(\"428x (High)\"));\n break;\n case TSL2591_GAIN_MAX:\n Serial.println(F(\"9876x (Max)\"));\n break;\n }\n Serial.print (F(\"Timing: \"));\n Serial.print((tsl.getTiming() + 1) * 100, DEC);\n Serial.println(F(\" ms\"));\n Serial.println(F(\"------------------------------------\"));\n Serial.println(F(\"\"));\n}\n\nvoid setup(void)\n{\n Serial.begin(9600); //Start the Serial connection\n pinMode(ledPin,OUTPUT); //Set the LED on Digital 12 as an OUTPUT\n\n //Serial.println(\"Sensor1; Full; Visable; IR; Lux; Sensor2; Analog; Resistance\");\n //Serial.println(F(\"Starting Adafruit TSL2591 Test!\"));\n\n if (tsl.begin())\n {\n //Serial.println(F(\"Found a TSL2591 sensor\"));\n }\n else\n {\n Serial.println(F(\"No sensor found ... check your wiring?\"));\n while (1);\n }\n\n /* Display some basic information on this sensor */\n //displaySensorDetails();\n\n /* Configure the sensor */\n //configureSensor();\n}\n\nvoid advancedRead(void)\n{\n // More advanced data read example. Read 32 bits with top 16 bits IR, bottom 16 bits full spectrum\n // That way you can do whatever math and comparisons you want!\n uint32_t lum = tsl.getFullLuminosity();\n uint16_t ir, full;\n ir = lum >> 16;\n full = lum & 0xFFFF;\n Serial.print(\"\"); Serial.print(full); Serial.print(\";\");\n Serial.print(\"\"); Serial.print(full - ir); Serial.print(\";\");\n Serial.print(\"\"); Serial.print(ir); Serial.print(\";\");\n Serial.print(\"\"); Serial.print(tsl.calculateLux(full, ir), 6); Serial.print(\";~\");\n /*Serial.print(F(\"[ \")); Serial.print(millis()); Serial.print(F(\" ms ] \"));\n Serial.print(F(\"IR: \")); Serial.print(ir); Serial.print(F(\" \"));\n Serial.print(F(\"Full: \")); Serial.print(full); Serial.print(F(\" \"));\n Serial.print(F(\"Visible: \")); Serial.print(full - ir); Serial.print(F(\" \"));\n Serial.print(F(\"Lux: \")); Serial.println(tsl.calculateLux(full, ir), 6);*/\n}\n\nvoid loop(void)\n{\n int sensorValue = analogRead(LIGHT_SENSOR);\n Rsensor = (float)(1023-sensorValue)*10/sensorValue;\n\n\n Serial.print(\"TSL2591@\");\n advancedRead();\n //Serial.print(\"Grove@\");\n //Serial.print(\"\"); Serial.print(sensorValue); Serial.print(\";\");\n //Serial.print(\"\"); Serial.print(Rsensor,DEC); Serial.print(\";~\");\n //Serial.println(\"\");\n\n /*Serial.print(\"Analog read data: \");\n Serial.println(sensorValue);\n Serial.print(\"Sensor resistance: \");\n Serial.println(Rsensor,DEC);//show the ligth intensity on the serial monitor;\n Serial.println(\"\");*/\n\n delay(10000);\n}\n"
},
{
"alpha_fraction": 0.5546277165412903,
"alphanum_fraction": 0.5741127133369446,
"avg_line_length": 25.127273559570312,
"blob_id": "647ce6d91f9c6b522529d4e2f84f10a8ecf2d57e",
"content_id": "c4711edb0ee069673e1289e7467c8702600762ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1437,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 55,
"path": "/people/berkleyShofner/Python3/Light-Sensor-Readings.py",
"repo_name": "mi3nts/mintsTesting",
"src_encoding": "UTF-8",
"text": "#/dev/ttyUSB0\nimport csv\nimport time\nimport serial\nimport math\nimport os\nimport datetime\nfrom collections import OrderedDict\n\ndef split(dataString, dateTime):\n dataOut = dataString.split('@')\n if(len(dataOut) == 2):\n id = dataOut[0]\n sensorData = dataOut[1]\n TSL2591(sensorData, dateTime)\n\ndef TSL2591(sensorData, dateTime):\n dataOut = sensorData.split(';')\n length = 4\n if(len(dataOut) == (length + 1)):\n sensorDictionary = OrderedDict([\n (\"dateTime\", str(dateTime)),\n \t (\"Full\", dataOut[0]),\n \t(\"Visible\", dataOut[1]),\n (\"IR\", dataOut[2]),\n (\"Lux\", dataOut[3]),\n \t ])\n csvWriter(sensorDictionary)\n\ndef csvWriter(sensorDictionary):\n keys = list(sensorDictionary.keys())\n exists = os.path.isfile(\"/home/berkley/Documents/Python3/TSL2591.csv\")\n with open('TSL2591.csv', 'a') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=keys)\n if(not(exists)):\n writer.writeheader()\n writer.writerow(sensorDictionary)\n\n\n\nser = serial.Serial('/dev/ttyUSB0')\nser.isOpen()\nline = []\n\nwhile True:\n for x in ser.read():\n line.append(chr(x))\n if chr(x) == '~':\n dataFirst = (''.join(line))\n dataSecond = dataFirst.replace('~', '')\n split(dataSecond, datetime.datetime.now())\n line = []\n break\n\nser.close()\n"
},
{
"alpha_fraction": 0.6569892764091492,
"alphanum_fraction": 0.6838709712028503,
"avg_line_length": 36.20000076293945,
"blob_id": "f63226bcaaf21c8f1798d7c42b30e2236e587048",
"content_id": "1e0839a2f45a3ac3d498593f3d81a9d4314d1309",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 930,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 25,
"path": "/people/berkleyShofner/Light-Module-Arduino-Nano-Grove/src/main.cpp",
"repo_name": "mi3nts/mintsTesting",
"src_encoding": "UTF-8",
"text": "#include <math.h>\n#include <Wire.h>\n#include <Adafruit_Sensor.h>\n\nconst int LIGHT_SENSOR=A1; //set the sensor to pin A0\nconst int ledPin=12; //Connect the LED Grove module to Pin12, Digital 12\nconst int thresholdvalue=10; //The treshold for which the LED should turn on. Setting it lower will make it go on at more light, higher for more darkness\nfloat Rsensor; //Resistance of sensor in K\n\nvoid setup()\n{\n Serial.begin(9600); //Start the Serial connection\n pinMode(ledPin,OUTPUT); //Set the LED on Digital 12 as an OUTPUT\n}\n\nvoid loop()\n{\n int sensorValue = analogRead(LIGHT_SENSOR);\n Rsensor = (float)(1023-sensorValue)*10/sensorValue;\n Serial.println(\"the analog read data is \");\n Serial.println(sensorValue);\n Serial.println(\"the sensor resistance is \");\n Serial.println(Rsensor,DEC);//show the ligth intensity on the serial monitor;\n delay(500);\n}\n"
},
{
"alpha_fraction": 0.6551281809806824,
"alphanum_fraction": 0.6679487228393555,
"avg_line_length": 19.526315689086914,
"blob_id": "0884efd7e144862551d4f98da96ef4e2bc26a581",
"content_id": "f0c3a1977b935bdfb81c0cdd8dcfaaffbb1accd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 780,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 38,
"path": "/people/berkleyShofner/Python3/Camera.py",
"repo_name": "mi3nts/mintsTesting",
"src_encoding": "UTF-8",
"text": "import cv2\nimport csv\nimport os\nimport time\nimport datetime\nimport numpy as np\nfrom collections import OrderedDict\n\ntime = datetime.datetime.now()\n\nvideo = cv2.VideoCapture(4)\n\ncheck, frame = video.read()\n\nblue = np.sum(frame[:, :, 0])\ngreen = np.sum(frame[:, :, 1])\nred = np.sum(frame[:, :, 2])\n\ncameraDictionary = OrderedDict([\n (\"dateTime\", time),\n (\"Blue\", blue),\n (\"Green\", green),\n (\"Red\", red),\n ])\n\nkeys = list(cameraDictionary.keys())\nexists = os.path.isfile(\"/home/berkley/Documents/Python3/Camera.csv\")\nwith open('Camera.csv', 'a') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=keys)\n if(not(exists)):\n writer.writeheader()\n writer.writerow(cameraDictionary)\n\ncv2.imshow(\"Capturing\", frame)\n\ncv2.waitKey(5)\n\nvideo.release()\n"
}
] | 5 |
berkguzel/composetok8s | https://github.com/berkguzel/composetok8s | 7bddd6460028a6117274a2058cdf9d7c4d56c65e | 8baf0e78ece19a82a7b338ba095e3503c51a0923 | 490f87b8176b91a48162e84311142834384aae4b | refs/heads/master | 2023-03-26T06:39:42.463101 | 2021-03-26T08:15:10 | 2021-03-26T08:15:10 | 279,644,890 | 16 | 0 | null | 2020-07-14T17:01:40 | 2021-02-28T20:58:17 | 2021-03-26T08:15:10 | Python | [
{
"alpha_fraction": 0.6887417435646057,
"alphanum_fraction": 0.7019867300987244,
"avg_line_length": 12.545454978942871,
"blob_id": "ee273215e7e3a38c2ab462cc23c800bad3587657",
"content_id": "5cdb31a169ca0f27ef90fd5f3404cf4fea79a488",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 151,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 11,
"path": "/Dockerfile",
"repo_name": "berkguzel/composetok8s",
"src_encoding": "UTF-8",
"text": "FROM python:3-alpine\n\nWORKDIR /app\n\nCOPY . .\n\nRUN pip install --upgrade pip \n\nRUN pip install -r requirements.txt\n\nENTRYPOINT [\"python3\", \"main.py\"]\n\n\n"
},
{
"alpha_fraction": 0.5749659538269043,
"alphanum_fraction": 0.5792821645736694,
"avg_line_length": 30.22694969177246,
"blob_id": "3593e567544d956647891a27fae42ad26c85eb92",
"content_id": "7bc3c74c9aec6cb012f5f40544323f462b0c9e17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4402,
"license_type": "no_license",
"max_line_length": 381,
"num_lines": 141,
"path": "/main.py",
"repo_name": "berkguzel/composetok8s",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport sys\nimport yaml\n\ndef main():\n parser=argparse.ArgumentParser()\n parser.add_argument('-n', '--name', help='name of your app', required=True, type=str)\n parser.add_argument('-k', '--kind', help='kind of your yaml file', required=True, type=str)\n parser.add_argument('-i', '--image', help='image you want to use', type=str)\n parser.add_argument('-s', '--selector', help='name of your selector', type=str)\n parser.add_argument('-p', '--port', help='number of your port', type=str)\n parser.add_argument('-r', '--replicas', help='count of your replicas', type=str)\n args=parser.parse_args()\n \n tempDict=createTempDict()\n\n if args.kind: # we controlled kind field is empty or not\n if (list(args.kind)[0]==\"p\"): # We fixed lower inital letters\n kind=args.kind\n kind=kind.replace(\"p\",\"P\")\n api=\"v1\" #we use v1 for pods\n else:\n kind=args.kind\n api=\"v1\" #we use v1 for pods\n \n \n if (list(args.kind)[0]==\"d\"):\n kind=args.kind # We fixed lower inital letters\n kind=kind.replace(\"d\",\"D\")\n api=\"apps/v1\" #we use apps/v1 for deployments\n \n else:\n kind=args.kind\n api=\"apps/v1\" #we use apps/v1 for deployments\n \n \n if args.image:\n tempDict[\"image\"]=args.image\n \n if args.name:\n name=args.name\n \n if args.replicas and kind == \"Deployment\":\n replicas=int(args.replicas)\n else:\n replicas= None\n\n if args.port:\n tempDict[\"ports\"]=int(args.port)\n \n \n\n try:\n yamlDict={\n \"apiVersion\":api,\n \"kind\":kind, \n \"metadata\":{\"labels\":{\"app\":\"%s\"%(name,)}},\n \"spec\":{\"selector\":{\"matchLabels\":{\"app\":\"%s\"%(name,)}},\"replicas\":replicas,\"template\":{\"metadata\":{\"labels\":{\"app\":\"%s\"%(name,)}},\"spec\":{\"containers\":[{\"name\":name,\"image\":\"%s\"%(tempDict[\"image\"],),\"ports\":[{\"containerPort\":tempDict[\"ports\"]}],\"volumeMounts\":[{\"name\":tempDict[\"volumeName\"],\"mountPath\":\"%s\"%(None,)}]}],\"volumes\":[{\"name\":tempDict[\"volumeName\"]}]}}},\n } \n except NameError:\n print(\"You are missing define name or kind\")\n return None\n \n \n try:\n with open(r\"./deployment.yaml\",\"w\") as file:\n yaml.safe_dump((yamlDict),file) \n print(\"{}/{} yaml file created. \".format(kind,name))\n \n except Exception as err:\n print(err)\n \n try:\n file=open(\"deployment.yaml\",\"r\")\n print(file.read())\n \n except Exception as err:\n print(err)\n\n\n\ndef createTempDict():\n titles=''\n index=0\n with open(r\"./docker-compose.yml\") as yFile:\n dcFile=yaml.full_load(yFile) # we read yaml file in dict type\n \n\n if len(list(dcFile[\"services\"]))>1:\n print(\"services:\")\n\n for item in list(dcFile[\"services\"]):\n print(item )\n image=input(\"You have {} services in your docker-compose.yaml file, please choose a service: \".format(len(list(dcFile[\"services\"]))))\n\n try:\n index=list(dcFile[\"services\"]).index(image)\n except Exception:\n image=input(\"Please enter your service name correctly\")\n index=list(dcFile[\"services\"]).index(image)\n\n\n \n \n\n firstItem=list(dcFile[\"services\"])[int(index)]\n\n #firstItem1=list(dcFile[\"services\"])[1] # first services name from docker-compose\n titles=list(dcFile[\"services\"][firstItem])#we will hold subtitles below of big title\n #these both variables will help us to access data we need rapidly on dcFile \n\n tempDict={}\n\n for item in titles: # it gets values of titles\n value=str(dcFile[\"services\"][firstItem][item])\n tempDict[item]=value # created tempDict to use data more efficiently\n \n \n try:\n volumeName=list(dcFile[\"volumes\"])[int(index)] # we took volume name of volume created in compose file\n tempDict[\"volumeName\"]=volumeName\n except IndexError:\n tempDict[\"volumeName\"]=\"\"\n\n \n try:\n ports=tempDict[\"ports\"].split(\":\",2) #ports were getten in list form we had to convert it to str\n ports=ports[1].split(\"']\",2)\n tempDict[\"ports\"]=int(ports[0]) # \n except KeyError:\n tempDict[\"ports\"]=\"\"\n\n return tempDict\n\n\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.48275861144065857,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 13,
"blob_id": "315856e08307f80b2125d907977b80c241d77032",
"content_id": "2c1dfc96a9e61a48c781193e2545b0f220676e6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "berkguzel/composetok8s",
"src_encoding": "UTF-8",
"text": "argparse==1.4.0\nPyYAML==5.4\n\n"
},
{
"alpha_fraction": 0.7127991914749146,
"alphanum_fraction": 0.7419354915618896,
"avg_line_length": 55.52941131591797,
"blob_id": "dc6b9494916aaacd462208fd000802401c683d90",
"content_id": "3e415c92a580269caccc1210bca69e599282094c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 961,
"license_type": "no_license",
"max_line_length": 291,
"num_lines": 17,
"path": "/README.md",
"repo_name": "berkguzel/composetok8s",
"src_encoding": "UTF-8",
"text": "This repository's purpose is helping to beginners who want to jump in the Kubernetes from Docker. It will convert your docker-compose.yaml file to a different yaml file form(Pod, Deployment) to use in Kubernetes. It is not proper for either complex files nor big deployments, just for fun :)\n\n# USAGE\n1. Run `docker run -it 227500/composetok8s:0.1 --name= --kind= ` this command will give deployment.yaml file as output on terminal.\n1. Run `docker run -it -v \"$PWD:/app\" 227500/composetok8s:0.1 --name= --kind= ` this command will create deployment.yaml file in your current directory.\n\n \n\n# \n1. Run `docker build -t //yourtage .`\n1. Run `docker run -it //yourtage --name= --kind`\n1. deployment.yaml file will be showed as output in your terminal\n\n#\n1. Run `git clone https://github.com/berkguzel/composetok8s.git` in where your docker-compose.yml is.\n1. Run `python3 main.py --name= --kind=`\n1. deployment.yaml file will be created in your current directory\n"
}
] | 4 |
bitzj2015/zero-crossing-interval | https://github.com/bitzj2015/zero-crossing-interval | 75fd0dfbb736b787628906100bd39006980c6cef | 302d81ecc4ef7c20353594bdfb905c4bcd0bfe97 | 5d19cdc67b3f80fdced476cbcb2875d3ed9a2617 | refs/heads/master | 2021-01-19T04:10:24.353803 | 2019-01-07T09:02:28 | 2019-01-07T09:02:28 | 63,202,723 | 5 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.7683741450309753,
"alphanum_fraction": 0.8040089011192322,
"avg_line_length": 111.25,
"blob_id": "088a0d5e2e75fd97c631e15c2321f261643f3dce",
"content_id": "25c92e84c7e07d42bb6261418a82f78df9fe82de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 449,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 4,
"path": "/README.md",
"repo_name": "bitzj2015/zero-crossing-interval",
"src_encoding": "UTF-8",
"text": "# Acoutisc fingerprint recognition based on zero-crossing interval\nImplementation of this research project by Python. This is my first Python program, written in July of 2016, during which I took a two-week internship in Prof. Fang Deng's laboratory.\n# Paper\nXianghu YUE, Fang DENG and Yue XU. Multidimensional zero-crossing interval points: a low sampling rate acoustic fingerprint recognition method. SCIENCE CHINA Information Sciences 62, 019202 (2019).\n"
},
{
"alpha_fraction": 0.5018289685249329,
"alphanum_fraction": 0.5461819767951965,
"avg_line_length": 28.54054069519043,
"blob_id": "f7cc269c9eea9875a12c096653fb6b9cf7546831",
"content_id": "7ee60d285fa1f0c2949b3e3c120a68c1e073cb99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4534,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 148,
"path": "/zero.py",
"repo_name": "bitzj2015/zero-crossing-interval",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#coding=utf-8\nimport numpy as np\nimport xlwt\n\n\ndef establish_matrix(data, m_dim, l_max, average):\n dim = m_dim * l_max\n num_data = data.shape[1]\n amount = 0\n for i in range(0, num_data):\n amount = amount + data[0, i]\n average = 1./num_data * amount\n print(average)\n zero_data = []\n j = 0\n for i in range(0, num_data - 1):\n if (data[0, i] - average) * (data[0, i + 1] - average) <= 0:\n zero_data.append(i)\n j = j + 1\n num_zero_data = j\n w = np.zeros((1, m_dim * dim + 1))\n for i in range(1, m_dim + 1):\n j = 0\n l = []\n start_point = (i-1)*dim\n length = i * l_max\n count = 0\n while j + i <= num_zero_data-1:\n l.append(zero_data[j + i] - zero_data[j] - 1)\n j = j + 1\n # print(L)\n for j in range(1, length + 1):\n for t in range(0, len(l)):\n if l[t] == j:\n w[0, start_point + j] = w[0, start_point + j] +1\n count = count + 1\n\n if i == 1:\n for t in range(0, len(l)):\n if l[t] == 0:\n w[0, 0] = w[0, 0] + 1\n count = count + w[0, 0]\n if count != 0:\n w[0, 0:length + 1] = 1./count * w[0, 0:length + 1]\n else:\n if count != 0:\n w[0, start_point + 1:start_point + length + 1] = 1./count * w[0, start_point + 1:start_point + length + 1]\n return w\n\"\"\"\n建立声纹特征矩阵\n输入采样数据矩阵、期望的维数、相邻过零点间最大采样点数\n输出声纹特征矩阵\n\"\"\"\n\n\ndef distance(w1, w2):\n dis = np.linalg.norm(w1 - w2)\n return dis\n\"\"\"求行向量的欧氏距离\n\"\"\"\n\n\ndef model_matrix(model_data, m_dim, l_max, average):\n row = model_data.shape[0]\n dim = m_dim * l_max\n model = np.zeros((row, m_dim * dim + 1))\n for i in range(0, row):\n data = model_data[i, :]\n model[i, :] = establish_matrix(data, m_dim, l_max, average)\n return model\n\"\"\"求解模板的声纹特征矩阵\n\"\"\"\n\n\ndef comparison( model, sample_data, m_dim, l_max, average):\n num_model = w_model.shape[0]\n dim = m_dim * l_max\n d = np.zeros((num_model, m_dim))\n d_min = []\n w2 = establish_matrix(sample_data, m_dim, l_max, average)\n for i in range(0, num_model):\n w1 = np.matrix(model[i, :])\n for j in range(1, m_dim + 1):\n y1 = np.zeros((1, j * dim + 1))\n y2 = np.zeros((1, j * dim + 1))\n y1 = w1[0, 0: j * dim + 1]\n y2 = w2[0, 0: j * dim + 1]\n d[i, j - 1] = distance(y1, y2)\n for j in range(0, m_dim):\n delta = []\n for i in range(0, num_model):\n delta.append(d[i, j])\n d_min.append(min(delta)*1000)\n return d_min\n # if min_distance <= threshold:\n # print(\"Success identification!\")\n # else:\n # print(\"Failed identification!\")\n\"\"\"对比识别声纹\n\"\"\"\n\ndata_file = xlwt.Workbook()\nsheet1 = data_file.add_sheet(u'sheet1',cell_overwrite_ok = True)\ndata_file.save('Excel_sheet1.xls')\nsheet1.write(0,0,'dimension')\nsheet1.write(0,1,'tank')\nsheet1.write(0,2,'plane')\nsheet1.write(0,3,'train')\nsheet1.write(0,4,'human')\nsheet1.write(1,0,'1D')\nsheet1.write(2,0,'2D')\nsheet1.write(3,0,'3D')\nsheet1.write(4,0,'4D')\n\"\"\"建立Excel表\n\"\"\"\n\ndata_of_tank = np.matrix(np.loadtxt(\"tank.txt\", unpack='true'))\nmodel_data = np.matrix(np.loadtxt(\"human model.txt\", unpack='true'))\nw_model = model_matrix(model_data, 4, 10, 4096)\nresult1 = comparison( w_model, data_of_tank, 4, 10, 4096)\nfor i in range(0, 4):\n sheet1.write(i+1, 1, result1[i])\n\n\ndata_of_plane = np.matrix(np.loadtxt(\"plane.txt\", unpack='true'))\n# model_data_of_plane = np.matrix(np.loadtxt(\"plane model.txt\", unpack='true'))\nresult2 = comparison( w_model, data_of_plane, 4, 10, 4096)\nfor i in range(0, 4):\n sheet1.write(i+1, 2, result2[i])\n\n\ndata_of_train = np.matrix(np.loadtxt(\"train.txt\", unpack='true'))\n# model_data_of_train = np.matrix(np.loadtxt(\"train model.txt\", unpack='true'))\nresult3 = comparison( w_model, data_of_train, 4, 10, 4096)\nfor i in range(0, 4):\n sheet1.write(i+1, 3, result3[i])\n\n\ndata_of_human = np.matrix(np.loadtxt(\"human.txt\", unpack='true'))\n# model_data_of_human = np.matrix(np.loadtxt(\"human model.txt\", unpack='true'))\nresult4 = comparison( w_model, data_of_human, 4, 10, 4096)\nfor i in range(0, 4):\n sheet1.write(i+1, 4, result4[i])\n\"\"\"将计算结果写入Excel表\n\"\"\"\ndata_file.save('Excel_sheet1.xls')\n\n\n"
},
{
"alpha_fraction": 0.46742209792137146,
"alphanum_fraction": 0.5297450423240662,
"avg_line_length": 17.578947067260742,
"blob_id": "a2e7c30158e8d9d2cb432fd55f2b5ece7c1f4e91",
"content_id": "6d73d31fa0ba9a82c34b44dba1ed7a0dfb9c9e19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 353,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 19,
"path": "/test.py",
"repo_name": "bitzj2015/zero-crossing-interval",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n# f = open(\"111.txt\",'r')\n# a = f.read()\n# b=a.split(\",\")\n# y=np.loadtxt(\"111.txt\", unpack='true')\n# y = np.matrix(y)\n# print(y)\nm = np.matrix([1,2,3,4])\n# d = []\n# num = 4\n# amount = 0\n# for i in range(0, num):\n# amount = amount + m[i]\n# average = 1. / num * amount\n# print(average)\n# print(m[1:3])\nm = m[0,0:4]\nprint(m[0,0:4])\n"
}
] | 3 |
shahnehashah2/Lazy-Set-Game | https://github.com/shahnehashah2/Lazy-Set-Game | d378f88f320e6d2a174cf9fe50156feb45fdab05 | 068af94166f0c424b980636127956b60e720a34c | 1d53f4b583d81d2caa3980d5682fb300fb8d472a | refs/heads/master | 2021-01-20T08:14:43.915271 | 2017-05-03T07:26:15 | 2017-05-03T07:26:15 | 90,119,821 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.668489396572113,
"alphanum_fraction": 0.6835269927978516,
"avg_line_length": 39.63888931274414,
"blob_id": "2a4c18acadf6713b34a70ce969f9d181b2644f91",
"content_id": "97ce075ed401b52649b1e6b01df7f30b8da55423",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1463,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 36,
"path": "/templates/rules.html",
"repo_name": "shahnehashah2/Lazy-Set-Game",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n\n{% block matter %}\n<pre>\nThis app uses OpenCV2(version 3.2.0), Flask, SQLite3 with Python(version 3.x) and Jinja2.\nIn order to run it on your computer, please complete the following steps\nafter downloading the required software:\n 1.> Folder 'testcards' stores uploaded image .\n Need to create an empty folder before game starts.\n\n 2.> Folder 'train' stores raw training photos\n (only empty and striped cards of a single color)\n\n 3.> Naming convention for cards in 'train' folder:\n a.> Shape: D - Diamond, W - Wiggly, O - Oval\n b.> Number: 1, 2 or 3 for # of occurences of the symbol\n c.> Fill: S - Striped, E - Empty\n (solid cards can be ignored for now)\n For example, 1 diamond with stripes is to be stored as D1S.jpg\n The program will check for color explicitly.\n\n 4.> Folder 'trained' stores processed images from 'train' folder.\n Need to create an empty folder before game starts.\n\n 5.> The current working folder stores uploaded game image.\n\nRecommendation to snap game image for maximum accuracy of program:\n\n 1.> Photograph in a brightly lit space, with white light\n 2.> Cards should be laid out on black background\n 3.> No overlapping or cropping cards edges - Complete cards must be visible\n 4.> Maximize card area by minimizing black border area\n 5.> Avoid Flash for even brightness\n\n</pre>\n{% endblock matter %}\n"
},
{
"alpha_fraction": 0.5751484632492065,
"alphanum_fraction": 0.600121796131134,
"avg_line_length": 33.654354095458984,
"blob_id": "11822bec2f89c3c91ecc40ca443c6178d18aefcd",
"content_id": "98e2290557da43ac5f76caf9b184507e70454a54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13134,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 379,
"path": "/app.py",
"repo_name": "shahnehashah2/Lazy-Set-Game",
"src_encoding": "UTF-8",
"text": "# Thank you Brian (https://github.com/bgschiller) for this fun idea for my project\n# and Mike (http://schmidmt.com/) for helping me finish it.\n\nfrom flask import Flask, render_template, request, redirect, url_for, flash\nimport cv2\nimport numpy as np\nimport os\nimport sqlite3 as sql\nimport itertools\n\nUPLOAD_FOLDER = '.'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\n# Static folder needs to be specified because flask needs it to look for images in jinja2\n# URL path is empty string so we don't have to provide absolute path\napp = Flask(__name__, static_folder='.', static_url_path = '')\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n# Set session secret key\napp.secret_key = 'some_secret'\n\n\[email protected](\"/rules\", methods=['GET'])\ndef rules():\n return render_template('rules.html')\n\n\[email protected](\"/\")\ndef main():\n # Preprocess each image and convert then to specific sized rectangles\n # 266 x 200 (height x width) of resized image\n imagelist = os.listdir('train')\n # Empty the trained folder to re-train\n empty('trained')\n\n # Needs to be run the first time this program is executed on a computer\n for imageName in imagelist:\n # 3rd arg to imread specifies color or gray scale. >0 is color\n im = cv2.imread(os.path.join('train', imageName), 1)\n resizedImage = resizeImage(im)\n makeRectangle(resizedImage, 1, 'trained', 'doTrain', imageName)\n return render_template('index.html')\n\n\[email protected](\"/solve\", methods=['POST'])\ndef solve():\n # Get the uploaded file\n file = request.files['file']\n numOfCards = request.form['numOfCards']\n # Checking for no filename or unallowed extensions\n error = validateInput(file, numOfCards)\n if error:\n flash(error)\n # Render and do not redirect to avoid re-processing of training images\n return render_template('index.html')\n numOfCards = int(numOfCards)\n\n dest = 'testcards'\n train_set = 'trained'\n # Empty destination folder\n empty(dest)\n # Read the uploaded image and convert all card images into rectangles\n im = cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], file.filename), 1)\n makeRectangle(im, numOfCards, dest, 'doTest')\n\n # Match the dest cards to ones in train_set and store the coded names of\n # the cards in coded_name array\n coded_name = find_matches(dest, train_set)\n # Find all possible combinations of 3 cards without repetition\n all_combi = list(itertools.combinations(coded_name, 3))\n sets = find_sets(all_combi)\n if sets == \"None\":\n found = 0\n else:\n found = len(sets)\n sets_images = get_imageNames(sets)\n # Note- Images might be displayed rotated in browsers other than Firefox\n return render_template('solve.html', found=found, sets=sets, sets_images=sets_images,\n file=url_for('static', filename=file.filename))\n\n\ndef resizeImage(im):\n # im.shape is [height, width, RGB channels]\n ratio = 200.0/im.shape[1]\n dim = (200, int(im.shape[0] * ratio))\n resizedImage = cv2.resize(im, dim, interpolation = cv2.INTER_AREA)\n return resizedImage\n\n\ndef makeRectangle(im, numOfCards, folder, trainOrTest, imageName=''):\n gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n # Removing noise from the image using blur\n blur = cv2.GaussianBlur(gray,(1,1),100)\n flag, thresh = cv2.threshold(blur, 120, 255, cv2.THRESH_BINARY)\n # Using Canny Edge detection\n if trainOrTest == 'doTrain':\n edged = cv2.Canny(blur, 30, 200)\n else:\n edged = thresh\n\n # Highlight all the contours in the image\n _, contours, _ = cv2.findContours(edged,\n cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n # Sort the contours by area so we can get the outside rectangle contour\n contours = sorted(contours, key=cv2.contourArea, reverse=True)[:numOfCards]\n\n # Counter to keep a tab of number of images for naming purposes\n i = 1\n for c in contours:\n # Calculate the perimeter\n peri = cv2.arcLength(c, True)\n # For contour c, approximate the curve based on calculated perimeter.\n # 2nd arg is accuracy, 3rd arg states that the contour curve is closed\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n # Create an array of floats of desired image dimension\n h = np.array([ [0, 200],[0,0],[266, 0],[266, 200] ], np.float32)\n # Gotta change the approx data set also to float32\n approx = approx.astype(np.float32, copy=False)\n\n # Check whether approx is in portrait mode. If not, change from landscape to portrait\n x1 = approx[0][0][0]\n y1 = approx[0][0][1]\n x2 = approx[1][0][0]\n y2 = approx[1][0][1]\n x3 = approx[2][0][0]\n y3 = approx[2][0][1]\n\n # Get the distance squared of top edge and left edge\n l1 = ((x1-x2) ** 2) + ((y1-y2) ** 2)\n l2 = ((x2-x3) ** 2) + ((y2-y3) ** 2)\n\n if l2 < l1:\n # Shift the array once clockwise\n approx = shift(approx)\n\n #Transform the approx data array to h\n transform = cv2.getPerspectiveTransform(approx,h)\n\n # Apply the transformed perspective to original image\n warp = cv2.transpose(cv2.warpPerspective(im,transform,(266,200)))\n\n # The naming of files varies based on whether it is training image or test image\n if trainOrTest == 'doTrain':\n imName = imageName\n else:\n imName = str(i)+'.jpg'\n i += 1\n cv2.imwrite(os.path.join(folder, imName), warp)\n\n\n# Check whether a valid file and a valid number of cards are entered\ndef validateInput(file, numOfCards):\n error = ''\n if file.filename == '':\n error = 'No selected file'\n filename = file.filename\n if allowed_file(filename):\n if numOfCards.isdigit() and int(numOfCards) > 2:\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n else:\n # There must be atleast 3 cards to check for a set\n error = 'Number of cards must be numeric > 3'\n else:\n error = 'Only JPEG or PNG files'\n return error\n\n\n# Check whether the uploaded file is a jpg or png image\ndef allowed_file(f):\n return '.' in f and f.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef find_sets(all_combi):\n con = sql.connect('matches.db')\n cardDB = con.cursor()\n cardDB.execute('''CREATE TABLE IF NOT EXISTS Matches\n (id INT, card1_code TEXT, card2_code TEXT,\n card3_code TEXT, card1 TEXT, card2 TEXT, card3 TEXT)''')\n # Delete previous entries of the table, if any, and vacuum from memory\n cardDB.execute(\"DELETE FROM Matches\")\n cardDB.execute(\"VACUUM\")\n\n count = 0\n for i in all_combi:\n if (int(i[0][0]) + int(i[1][0]) + int(i[2][0])) % 3 == 0:\n if (int(i[0][1]) + int(i[1][1]) + int(i[2][1])) % 3 == 0:\n if (int(i[0][2]) + int(i[1][2]) + int(i[2][2])) % 3 == 0:\n if (int(i[0][3]) + int(i[1][3]) + int(i[2][3])) % 3 == 0:\n count += 1\n items = [reverse_dict(j) for j in i]\n cardDB.execute(\"INSERT INTO Matches VALUES (?,?,?,?,?,?,?)\",\\\n (count, i[0], i[1], i[2], items[0], items[1], items[2]))\n con.commit()\n cur = con.execute(\"SELECT card1, card2, card3 from Matches\")\n sets = []\n for row in cur:\n sets.append([row[0], row[1], row[2]])\n if len(sets)>0:\n return sets\n else:\n # ********** Make sure this is working************\n return \"None\"\n\n\n# Get the actual names of images in a set\n# (like [D3EG.jpg, D2EG.jpg, D1EG.jpg]) for retrieving the images from the\n# test folder. Map the image names from their codenames in database\ndef get_imageNames(sets):\n con = sql.connect('testcards.db')\n cardDB = con.cursor()\n\n setsName = []\n for set in sets:\n setName = []\n for i in set:\n cardDB.execute(\"SELECT idName from TestCards WHERE name = :i\",\\\n {\"i\": i})\n row = cardDB.fetchone()\n setName.append(row[0])\n setsName.append(setName)\n return setsName\n\n\n# Get the alphabetic code name from numeric code name\ndef reverse_dict(item):\n con = sql.connect('testcards.db')\n with con:\n cardDB = con.cursor()\n cardDB.execute(\"SELECT name from TestCards WHERE code_name = :item\",\\\n {\"item\": item})\n row = cardDB.fetchone()\n return row[0]\n\n\n# Most of the magic happens here. Match each uploaded card with 'trained' images\ndef find_matches(dest, train_set):\n testlist = os.listdir(dest)\n imagelist = os.listdir(train_set)\n id = 0\n coded_name = []\n con = sql.connect('testcards.db')\n cardDB = con.cursor()\n cardDB.execute(\"DROP TABLE IF EXISTS TestCards\")\n cardDB.execute('''CREATE TABLE TestCards\n (id INT, name TEXT, idName TEXT, code_name TEXT)''')\n\n # Need this dictionary to lookup codename for use in logic to find sets\n dict_name = {'P':'1', 'G':'2', 'R':'3', 'D':'1', 'O':'2', 'W':'3',\n 'E':'1', 'F':'2', 'S':'3'}\n\n for im1 in testlist:\n image1 = cv2.imread(os.path.join(dest, im1), 1)\n img2 = ''\n bestMatch = 4000000\n for im2 in imagelist:\n image2 = cv2.imread(os.path.join('trained', im2), 1)\n # Calculate per elements difference between two arrays\n diff = cv2.absdiff(preprocess(image1),preprocess(image2))\n # Find the images with minimum difference to get the best match\n if(np.sum(diff) < bestMatch):\n bestMatch = np.sum(diff)\n img2 = im2\n # Ignore the card with no match\n if img2 == '':\n print(\"No match\")\n continue\n\n # Add the matched card into database\n id += 1\n # It is difficult to distinguish between full and empty fill.\n # So explicit check is made in scenario where fill type is not stripe(S)\n fill = img2[2]\n if fill != 'S':\n fill = get_fill(image1)\n # Find the color of the card\n color = get_color(image1)\n # Get the shape and number of repeats in shape from name of training card\n shape = img2[0]\n repeat = img2[1]\n\n # Storing in database with two names (D2EG and 1212)\n name = shape + str(repeat) + fill + color\n name1 = dict_name[shape]+str(repeat)+dict_name[fill]+dict_name[color]\n # Storing the numeric coded name in an array for applying logic to\n # finding sets. The name in database will be later used to lookup\n # image of the card which is a part of found sets\n coded_name.append(name1)\n cardDB.execute(\"INSERT INTO TestCards VALUES (?,?,?,?)\",\\\n (id, name, im1, name1))\n cur = con.execute(\"SELECT id, name, idName, code_name from TestCards\")\n for row in cur:\n print (row)\n con.commit()\n return coded_name\n\n\n# Empty the specified folder. Called everytime the game is restarted\ndef empty(dest):\n listname = os.listdir(dest)\n for f in listname:\n os.unlink(os.path.join(dest, f))\n\n\n# Shift the array elements one time\ndef shift(seq):\n temp = seq[3].copy()\n for i in range(3, 0, -1):\n seq[i] = seq[i-1]\n seq[0] = temp\n return seq\n\n\n# Image needs to be preprocessed before comparison via absdiff()\ndef preprocess(im):\n gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray,(5,5),2 )\n thresh = cv2.adaptiveThreshold(blur,255,1,1,11,1)\n blur_thresh = cv2.GaussianBlur(thresh,(5,5),5)\n return blur_thresh\n\n\ndef get_color(im):\n height = 266\n width = 200\n # Color is returned in BGR format\n for i in range(height):\n for j in range(width):\n bgr = im[i][j]\n if sum(bgr) > 500:\n im[i][j] = [0,0,0]\n # np.mean averages over all pixels (including black)\n # To count the non-black pixels in image, create a grayscale copy of it\n im1 = cv2.cvtColor( im, cv2.COLOR_RGB2GRAY )\n bgr_mean = (np.mean(im, axis=(0,1)) * im1.size) // np.count_nonzero(im1)\n\n color=''\n blue = bgr_mean[0]\n green = bgr_mean[1]\n red = bgr_mean[2]\n\n if red>blue and red> green and red-green>50:\n color = 'R'\n elif green>blue and green>red and green-red>50:\n color = 'G'\n else:\n color = 'P'\n return color\n\n\ndef get_fill(im):\n # im should be only a full or empty card, not striped\n # Set Region of Interest to a single line passing vertically through\n # top half of card\n col = 100\n rows = range(0,133)\n\n countColor = 0\n countWhite = 0\n\n for i in rows:\n bgr = im[i][col]\n blue = bgr[0]\n green = bgr[1]\n red = bgr[2]\n # White or close to white pixel has RGB values over 150 each\n if (int(blue) + int(green) + int(red)) > 450:\n countWhite += 1\n else:\n countColor += 1\n\n if (countWhite//countColor) > 10:\n return 'E'\n else:\n return 'F'\n\n\nif __name__ == \"__main__\":\n app.run()\n"
}
] | 2 |
artur-oliveira/alura-scraper | https://github.com/artur-oliveira/alura-scraper | 6dea9dd35e30c16c06c90cf2d91684f1bc7c5303 | c1d700362acb619dc8683c29c83a008ca34414ff | b8e5c1302d05eca916d45cf76440b513ceac22d4 | refs/heads/master | 2023-06-23T11:11:49.571231 | 2021-07-22T12:16:06 | 2021-07-22T12:16:06 | 363,250,624 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5593142509460449,
"alphanum_fraction": 0.5637146830558777,
"avg_line_length": 34.64706039428711,
"blob_id": "6359de45be3634779853c548c4ce7cdeda58f25b",
"content_id": "a8ff392e8ce408f3438280dfca9bcf36332e02fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10944,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 306,
"path": "/main.py",
"repo_name": "artur-oliveira/alura-scraper",
"src_encoding": "UTF-8",
"text": "import os\nimport pickle\nimport shutil\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom decouple import config\nfrom json import JSONDecodeError\nfrom requests import Session\nfrom bs4 import BeautifulSoup\nfrom typing import List, Dict, Any\nfrom timeit import default_timer as timer\n\nCREDENTIALS: Dict = {\n 'username': config('EMAIL'),\n 'password': config('PASSWORD')\n}\n\nCATEGORIES: List[Dict] = [{'Cursos Mobile': 'mobile'},\n {'Cursos Programação': 'programacao'},\n {'Cursos Front-end': 'front-end'},\n {'Cursos Desing & UX': 'design-ux'},\n {'Cursos DevOps': 'devops'},\n {'Cursos Data Science': 'data-science'},\n {'Cursos Inovação & Gestão': 'inovacao-gestao'}]\n\n\nclass AluraScraper(object):\n \"\"\"\n Classe responsável por pegar todos os dados da Alura\n \"\"\"\n def __init__(self: 'AluraScraper', username: str, password: str, category: str) -> None:\n \"\"\"\n\n :param username: Nome do usuário\n :param password: Senha do usuário\n :param category: Categoria escolhida via menu\n \"\"\"\n self.username: str = username\n self.password: str = password\n\n self.browser: Session = Session()\n self.BASE_URL: str = 'https://cursos.alura.com.br'\n self.BASE_LOGIN_URL: str = self.BASE_URL + '/signin'\n self.CATEGORY_URL: str = self.BASE_URL + '/category/' + category\n self.signed_in: bool = False\n\n self.course_by_subcategory: List[Dict[str: str]] = []\n self.course_data: List[Dict[str: str]] = []\n\n def login(self: 'AluraScraper') -> 'AluraScraper':\n \"\"\"\n Realiza o login no site da alura, salva a sessão para que não precise ser feito o login novamente em algumas horas\n :return:\n \"\"\"\n if self.file_exists('cookie.pickle'):\n self.browser.cookies = self.load_cookies()\n self.signed_in = True\n else:\n self.browser.post(self.BASE_LOGIN_URL, data={'username': self.username, 'password': self.password})\n self.signed_in = True\n self.save_cookies()\n\n return self\n\n def get_courses(self: 'AluraScraper') -> 'AluraScraper':\n \"\"\"\n Pega todos os cursos disponíveis em uma determinada categoria\n :return:\n \"\"\"\n assert self.is_logged()\n\n soup: BeautifulSoup = BeautifulSoup(self.browser.get(self.CATEGORY_URL).content, 'lxml')\n\n lista_nomes: List[str] = [str(it.text).strip() for it in soup.find_all(attrs={'id': 'subcategory__anchor'})]\n lista_tags: List[BeautifulSoup] = soup.find_all(attrs={'class': 'card-list category__card-list'})\n\n for i in range(len(lista_tags)):\n data_list = []\n li_tags: List[BeautifulSoup] = lista_tags[i].find_all('li')\n for li in li_tags:\n data_list.append({li.get('data-course-name'):\n li.find(attrs={'class': 'course-card__course-link'}).get('href')})\n\n self.course_by_subcategory.append({lista_nomes[i]: data_list})\n\n return self\n\n def download_videos_course(self: 'AluraScraper', course: str) -> None:\n \"\"\"\n Faz o download de todos os vídeos de um determinado curso\n :param course: URL do curso que vai ser baixado\n :return:\n \"\"\"\n name: str = course.split('/')[2]\n data: dict = self.__download_m3u8_playlists(self.__get_download_links(course))\n\n self.create_folder(name)\n\n tasks_ts_files: List[tuple] = []\n task_merge_files: List[tuple] = []\n\n for key in data.keys():\n video_folder: str = name + '/' + key\n task_merge_files.append((video_folder, ))\n\n self.create_folder(video_folder)\n for i in range(len(data.get(key))):\n tasks_ts_files.append((data.get(key)[i], '%s/%s-%d.ts' % (video_folder, key, i)))\n\n start = timer()\n print('COMEÇANDO O DOWNLOAD: %d PARTES ENCONTRADAS' % len(tasks_ts_files))\n self.execute_in_thread(tasks_ts_files, self.__download_ts_files)\n print('DOWNLOAD FINALIZADO EM %.2f SEGUNDOS' % (timer() - start))\n\n print('JUNTANDO TODOS OS VÍDEOS')\n self.execute_in_thread(task_merge_files, self.__merge_ts_files)\n\n def __download_ts_files(self: 'AluraScraper', task: List[tuple]) -> None:\n \"\"\"\n Função que responsável para fazer o download dos arquivos TS (partes dos vídeos)\n :param task: Lista com tuplas que contém: O link, o nome do arquivo\n :return:\n \"\"\"\n link, filename = task\n self.download(link, filename)\n\n def __merge_ts_files(self: 'AluraScraper', task: tuple) -> None:\n folder = task[0]\n onlyfiles = [f for f in os.listdir(folder) if f.endswith('.ts')]\n\n self.merge(folder, onlyfiles)\n\n @staticmethod\n def execute_in_thread(tasks: List[tuple], function, workers=8):\n \"\"\"\n Função que executa uma função em multithreading\n :param tasks: Lista de tuplas\n :param function: função a ser executada\n :param workers: é como se fosse o número de threads 8 é suficiente, acima disso é praticamente perca de tempo\n :return:\n \"\"\"\n with ThreadPoolExecutor(max_workers=workers) as pool:\n pool.map(function, tasks)\n\n def __get_download_links(self: 'AluraScraper', course: str) -> List[str]:\n path: str = self.BASE_URL + course\n response = self.browser.get(self.BASE_URL +\n self.__get_task_link(BeautifulSoup(self.browser.get(path).content, 'lxml')))\n data = []\n count_vids: int = 1\n while 1:\n if response.url.endswith('#aulas'):\n break\n\n json_links = self.has_video_task(response.url)\n\n if json_links != '' and len(json_links) >= 1:\n data.append(json_links[0].get('link'))\n print('%d link(s) encontrado(s)' % count_vids)\n count_vids += 1\n\n response = self.browser.get(response.url + '/next')\n\n return data\n\n def __download_m3u8_playlists(self: 'AluraScraper', list_links: List[str]) -> dict:\n filename: str = 'index-%s.m3u8'\n list_links_return: dict = {}\n count: int = 1\n tasks: List[tuple] = []\n\n for link in list_links:\n tasks.append((link, filename % str(count), list_links_return))\n count += 1\n\n self.execute_in_thread(tasks, self.__download_m3u8)\n\n while len(tasks) > 0:\n tasks: List[tuple] = []\n for key in list_links_return.keys():\n if len(list_links_return.get(key)) == 0:\n tasks.append((list_links[int(key) - 1], filename % key, list_links_return))\n\n self.execute_in_thread(tasks, self.__download_m3u8)\n\n return list_links_return\n\n def __download_m3u8(self, task: list) -> None:\n link, filename, list_links_return = task\n\n self.download(link, filename)\n\n with open(filename, 'r') as playlist:\n data = ['https://video.alura.com.br' + line.strip() for\n line in playlist\n if line.strip().startswith('/hls/alura/')]\n\n list_links_return[filename.split('-')[1].split('.')[0]] = data\n os.remove(filename)\n\n def download(self: 'AluraScraper', link: str, filename: str) -> None:\n with self.browser.get(link) as res:\n with open(filename, 'wb') as f:\n for chunk in res.iter_content(chunk_size=1024):\n f.write(chunk)\n\n @staticmethod\n def merge(folder: str, data: List[str]) -> None:\n filename: str = folder.split('/')[0] + '/' + data[0].split('-')[0] + '.mp4'\n\n with open(filename, 'ab') as final:\n for item in data:\n with open(folder + '/' + item, 'rb') as temp:\n final.write(temp.read())\n\n shutil.rmtree(folder)\n\n def is_logged(self: 'AluraScraper') -> bool:\n return self.signed_in\n\n def has_video_task(self: 'AluraScraper', url_task: str) -> Any:\n try:\n return self.browser.get(url_task + '/video').json() if url_task.startswith('https://') else \\\n self.browser.get(self.BASE_URL + url_task + '/video').json()\n except JSONDecodeError:\n return ''\n\n @staticmethod\n def __get_task_link(soup: BeautifulSoup) -> str:\n return [item.get('href') for item in soup.find_all('a', attrs={'class': 'courseSectionList-section'})][0]\n\n def save_cookies(self: 'AluraScraper'):\n with open('cookie.pickle', 'wb') as f:\n pickle.dump(self.browser.cookies, f)\n\n @staticmethod\n def load_cookies():\n with open('cookie.pickle', 'rb') as f:\n return pickle.load(f)\n\n @staticmethod\n def file_exists(filename: str) -> bool:\n try:\n open(filename)\n return True\n except FileNotFoundError:\n return False\n\n @staticmethod\n def create_folder(folder: str) -> None:\n try:\n os.makedirs(folder)\n print('A PASTA %s FOI CRIADA' % folder)\n except FileExistsError:\n print('A PASTA %s JÁ EXISTE' % folder)\n\n\nclass Menu:\n def __init__(self: 'Menu'):\n self.scraper: 'AluraScraper'\n\n def show_menu(self: 'Menu') -> None:\n course = self.__choose_course()\n for item in course:\n self.scraper.download_videos_course(item)\n\n def __choose_category(self: 'Menu') -> List[Dict]:\n self.scraper = AluraScraper(CREDENTIALS.get('username'),\n CREDENTIALS.get('password'),\n self.__choose_valid_option_or_exit(CATEGORIES)).login().get_courses()\n return self.scraper.course_by_subcategory\n\n def __choose_subcategory(self: 'Menu') -> List[Dict]:\n return self.__choose_valid_option_or_exit(self.__choose_category())\n\n def __choose_course(self: 'Menu') -> List[str]:\n data = self.__choose_subcategory()\n return [list(data[i].values())[0] for i in range(len(data))]\n\n @staticmethod\n def __choose_valid_option_or_exit(data: List[Dict]) -> Any:\n print('Escolha uma das opções abaixo:')\n\n for i in range(len(data)):\n print('%d - %s' % ((i + 1), list(data[i].keys())[0]))\n print('0 - Sair\\n')\n\n while 1:\n try:\n option = int(input('Opção: '))\n\n if option < 1:\n break\n\n return list(data[option - 1].values())[0]\n\n except ValueError:\n print('Opção inválida\\n')\n except IndexError:\n print('Escolha um dos valores da lista\\n')\n\n exit(0)\n\n\nif __name__ == '__main__':\n Menu().show_menu()\n"
},
{
"alpha_fraction": 0.5873287916183472,
"alphanum_fraction": 0.6010273694992065,
"avg_line_length": 26.85714340209961,
"blob_id": "db328694366094a325db3b6376618c39c0f82f1b",
"content_id": "a16cc9c947031897a8b5ab77d6a93d54a59dcd90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 584,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 21,
"path": "/test.py",
"repo_name": "artur-oliveira/alura-scraper",
"src_encoding": "UTF-8",
"text": "import os\nfrom typing import List\nimport shutil\n\nonlyfiles = [f for f in os.listdir('react-native-design-system/1') if f.endswith('.ts')]\n\n\nprint(sorted(onlyfiles, key=lambda x: int(x.split('.')[0].split('-')[1])))\n\n\ndef merge(folder: str, data: List[str]) -> None:\n folder_path = folder.split('/')[0]\n filename: str = folder + '/' + data[0].split('-')[0] + '.mp4'\n\n with open(filename, 'ab') as final:\n for item in data:\n with open(folder + '/' + item, 'rb') as temp:\n final.write(temp.read())\n\n\nshutil.rmtree('react-native-design-system/1')"
},
{
"alpha_fraction": 0.7709497213363647,
"alphanum_fraction": 0.7709497213363647,
"avg_line_length": 35,
"blob_id": "3286217e9c357d80563ab2f4f43621ac4ad4b9b4",
"content_id": "e4d467c0ca059e3a38b653deec8b21938db0f07d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 5,
"path": "/README.md",
"repo_name": "artur-oliveira/alura-scraper",
"src_encoding": "UTF-8",
"text": "# alura-scraper\nPara rodar o projeto crie um arquivo .env colocando suas credenciais conforme o arquivo .env.example, e depois rode o programa com o comando\n```\npython main.py\n```"
}
] | 3 |
ckw1140/bert | https://github.com/ckw1140/bert | 600c47cfa46ea9d51d711fc63d43f8f5c88d9ee8 | 264d82087cffa1a8fe16d7883633bbab2c4616f3 | 17e52350fd119eb386d090638da6dd5a06f88edd | refs/heads/main | 2023-03-04T14:02:08.040953 | 2021-02-15T04:37:41 | 2021-02-15T04:37:41 | 338,710,268 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6945551037788391,
"alphanum_fraction": 0.7031872272491455,
"avg_line_length": 39.702701568603516,
"blob_id": "caf70aa30d0b29e1ffb0508aabb61393b328834c",
"content_id": "3eb9956cb8b921dec75953de98c7a2caea4a4f48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1506,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 37,
"path": "/tests/test_bert.py",
"repo_name": "ckw1140/bert",
"src_encoding": "UTF-8",
"text": "import torch\n\nfrom model.bert import BERT, BERTPretrain\nfrom model.config import Config\n\ndef test_bert():\n config = Config.load(\"./tests/config.json\")\n batch_size = 8\n\n inputs = torch.randint(config.vocab_size, (batch_size, config.sequence_length))\n segments = torch.randint(2, (batch_size, config.sequence_length))\n\n bert = BERT(config)\n outputs, outputs_cls, attention_probs = bert(inputs, segments)\n\n assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)\n assert outputs_cls.size() == (batch_size, config.hidden_dim)\n assert len(attention_probs) == config.num_layers\n assert attention_probs[0].size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)\n assert attention_probs[0].max() <= 1.0\n\n\ndef test_bert_pretrain():\n config = Config.load(\"./tests/config.json\")\n batch_size = 8\n\n inputs = torch.randint(config.vocab_size, (batch_size, config.sequence_length))\n segments = torch.randint(2, (batch_size, config.sequence_length))\n\n bert_pretrain = BERTPretrain(config)\n logits_cls, logits_lm, attention_probs = bert_pretrain(inputs, segments)\n\n assert logits_cls.size() == (batch_size, 2)\n assert logits_lm.size() == (batch_size, config.sequence_length, config.vocab_size)\n assert len(attention_probs) == config.num_layers\n assert attention_probs[0].size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)\n assert attention_probs[0].max() <= 1.0\n"
},
{
"alpha_fraction": 0.5844875574111938,
"alphanum_fraction": 0.5858725905418396,
"avg_line_length": 27.8799991607666,
"blob_id": "ccb9732a30e99450f90ae92eb879b19e152aded5",
"content_id": "505582e4304d33369f28119c0c3ade90cd4ce12a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2166,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 75,
"path": "/model/bert.py",
"repo_name": "ckw1140/bert",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom model.layers import Encoder\n\n\nclass BERT(nn.Module):\n def __init__(self, config):\n super(BERT, self).__init__()\n self.config = config\n\n self.encoder = Encoder(config)\n self.linear = nn.Linear(config.hidden_dim, config.hidden_dim)\n self.activation = torch.tanh\n\n def forward(\n self,\n inputs: torch.Tensor,\n segments: torch.Tensor,\n ):\n outputs, attention_probs = self.encoder(inputs, segments)\n\n # [batch_size, hidden_dim]\n outputs_cls = outputs[:, 0].contiguous()\n outputs_cls = self.linear(outputs_cls)\n outputs_cls = self.activation(outputs_cls)\n\n return outputs, outputs_cls, attention_probs\n\n def save(self, epoch, loss, path):\n torch.save(\n {\n \"epoch\": epoch,\n \"loss\": loss,\n \"state_dict\": self.state_dict()\n },\n path,\n )\n \n def load(self, path):\n save = torch.load(path)\n self.load_state_dict(save[\"state_dict\"])\n return save[\"epoch\"], save[\"loss\"]\n\n\nclass BERTPretrain(nn.Module):\n def __init__(self, config):\n super(BERTPretrain, self).__init__()\n self.config = config\n\n self.bert = BERT(config)\n\n # Classifier\n self.projection_cls = nn.Linear(config.hidden_dim, 2, bias=False)\n # lm\n self.projection_lm = nn.Linear(config.hidden_dim, config.vocab_size)\n self.projection_lm.weight = self.bert.encoder.enc_emb.weight\n\n def forward(\n self,\n inputs: torch.Tensor,\n segments: torch.Tensor,\n ):\n # [batch_size, sequence_length, hidden_dim]\n # [batch_size, hidden_dim]\n # [batch_size, num_heads, sequence_length, sequence_length] x num_layers\n outputs, outputs_cls, attention_probs = self.bert(inputs, segments)\n\n # [batch_size, 2]\n logits_cls = self.projection_cls(outputs_cls)\n # [batch_size, sequence_length, vocab_size]\n logits_lm = self.projection_lm(outputs)\n\n return logits_cls, logits_lm, attention_probs\n"
},
{
"alpha_fraction": 0.6803278923034668,
"alphanum_fraction": 0.7131147384643555,
"avg_line_length": 14.25,
"blob_id": "86c8f3aa16edc60017a3adde2d7746f4d6126c35",
"content_id": "744c4e615b7fbc910cef512039f9b92632011a9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 8,
"path": "/README.md",
"repo_name": "ckw1140/bert",
"src_encoding": "UTF-8",
"text": "# bert\nBERT 의 구현체입니다.\n\nhttps://paul-hyun.github.io/bert-01/\n\nhttps://paul-hyun.github.io/bert-02/\n\n위의 포스팅을 참고하면서 구현하였습니다.\n"
},
{
"alpha_fraction": 0.8684210777282715,
"alphanum_fraction": 0.8684210777282715,
"avg_line_length": 6.599999904632568,
"blob_id": "6abbd343624171132a5d813c00b28e809438da13",
"content_id": "262d79df609fb99fd74db65506522f1d8daea4a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 38,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "ckw1140/bert",
"src_encoding": "UTF-8",
"text": "numpy\npandas\nsentencepiece\ntorch\ntqdm\n"
},
{
"alpha_fraction": 0.6244365572929382,
"alphanum_fraction": 0.6307147741317749,
"avg_line_length": 35.327484130859375,
"blob_id": "15d4e82303c8caa01179c2fe39b7f8f34c11a41b",
"content_id": "6291617dba7baf8ff1e222830c8bed2678060e0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6266,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 171,
"path": "/model/layers.py",
"repo_name": "ckw1140/bert",
"src_encoding": "UTF-8",
"text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom model.utils import gelu, get_attention_pad_mask\n\n\nclass ScaledDotProductAttention(nn.Module):\n def __init__(self, config):\n super(ScaledDotProductAttention, self).__init__()\n self.config = config\n self.dropout = nn.Dropout(config.dropout_prob)\n self.scale = 1 / (config.head_dim ** 0.5)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n attention_mask: torch.Tensor,\n ):\n scores = torch.matmul(query, key.transpose(2, 3)) * self.scale\n scores.masked_fill_(attention_mask, -1e9)\n # [batch_size, num_heads, sequence_length, sequence_length]\n attention_prob = nn.Softmax(dim=-1)(scores)\n attention_prob = self.dropout(attention_prob)\n\n # [batch_size, num_heads, sequence_length, head_dim]\n context = torch.matmul(attention_prob, value)\n\n return context, attention_prob\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, config):\n super(MultiHeadAttention, self).__init__()\n self.config = config\n\n self.W_q = nn.Linear(config.hidden_dim, config.num_heads * config.head_dim)\n self.W_k = nn.Linear(config.hidden_dim, config.num_heads * config.head_dim)\n self.W_v = nn.Linear(config.hidden_dim, config.num_heads * config.head_dim)\n self.scaled_dot_product_attention = ScaledDotProductAttention(config)\n self.linear = nn.Linear(config.head_dim * config.num_heads, config.hidden_dim)\n self.dropout = nn.Dropout(config.dropout_prob)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n attention_mask: torch.Tensor,\n ):\n batch_size = query.size(0)\n\n # [batch_size, num_heads, sequence_length, head_dim]\n Q = self.W_q(query).view(batch_size, -1, self.config.num_heads, self.config.head_dim).transpose(1, 2)\n K = self.W_k(key).view(batch_size, -1, self.config.num_heads, self.config.head_dim).transpose(1, 2)\n V = self.W_v(value).view(batch_size, -1, self.config.num_heads, self.config.head_dim).transpose(1, 2)\n\n # [batch_size, num_heads, sequence_length, sequence_length]\n attention_mask = attention_mask.unsqueeze(1).repeat(1, self.config.num_heads, 1, 1)\n\n # [batch_size, num_heads, sequence_length, head_dim]\n # [batch_size, num_heads, sequence_length, sequence_length]\n context, attention_prob = self.scaled_dot_product_attention(Q, K, V, attention_mask)\n\n # [batch_size, sequence_length, num_heads * head_dim]\n context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.config.num_heads * self.config.head_dim)\n\n # [batch_size, sequence_length, hidden_dim]\n outputs = self.linear(context)\n outputs = self.dropout(outputs)\n\n return outputs, attention_prob\n\n\nclass FeedForward(nn.Module):\n def __init__(self, config):\n super(FeedForward, self).__init__()\n self.config = config\n\n self.linear1 = nn.Linear(config.hidden_dim, config.feed_forward_dim)\n self.linear2 = nn.Linear(config.feed_forward_dim, config.hidden_dim)\n self.dropout = nn.Dropout(config.dropout_prob)\n\n def forward(\n self,\n inputs: torch.Tensor,\n ):\n outputs = self.linear1(inputs)\n outputs = gelu(outputs)\n outputs = self.linear2(outputs)\n return self.dropout(outputs)\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, config):\n super(EncoderLayer, self).__init__()\n self.config = config\n\n self.self_attention = MultiHeadAttention(config)\n self.layer_norm1 = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_epsilon)\n self.feed_forward = FeedForward(config)\n self.layer_norm2 = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_epsilon)\n\n def forward(\n self,\n inputs: torch.Tensor,\n attention_mask: torch.Tensor,\n ):\n residual = inputs\n outputs, attention_prob = self.self_attention(\n query=inputs,\n key=inputs,\n value=inputs,\n attention_mask=attention_mask,\n )\n outputs = self.layer_norm1(outputs + residual)\n\n residual = outputs\n outputs = self.feed_forward(outputs)\n outputs = self.layer_norm2(outputs + residual)\n\n return outputs, attention_prob\n\n\nclass Encoder(nn.Module):\n def __init__(self, config):\n super(Encoder, self).__init__()\n self.config = config\n\n self.enc_emb = nn.Embedding(config.vocab_size, config.hidden_dim)\n self.pos_emb = nn.Embedding(config.sequence_length + 1, config.hidden_dim)\n self.seg_emb = nn.Embedding(config.num_segments, config.hidden_dim)\n\n self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.num_layers)])\n\n def forward(\n self,\n inputs: torch.Tensor,\n segments: torch.Tensor,\n ):\n batch_size = inputs.size(0)\n sequence_length = inputs.size(1)\n\n positions = torch.arange(\n sequence_length,\n device=inputs.device,\n dtype=inputs.dtype\n )\n positions = positions.expand(batch_size, sequence_length)\n positions = positions.contiguous() + 1\n\n # inputs 에서 값이 pad_token 인 위치들에 대응되는 positions 의 위치들의 값을 0으로 바꿔줍니다.\n pos_mask = inputs.eq(self.config.pad_token)\n positions.masked_fill_(pos_mask, 0)\n\n # [batch_size, sequence_length, hidden_dim]\n outputs = self.enc_emb(inputs) + self.pos_emb(positions) + self.seg_emb(segments)\n\n # [batch_size, sequence_length, sequence_length]\n attention_mask = get_attention_pad_mask(inputs, inputs, self.config.pad_token)\n\n attention_probs = []\n for layer in self.layers:\n # outputs: [batch_size, sequence_length, hidden_dim]\n # attention_probs: [batch_size, num_head, sequence_length, sequence_length]\n outputs, attention_prob = layer(outputs, attention_mask)\n attention_probs.append(attention_prob)\n\n return outputs, attention_probs\n"
},
{
"alpha_fraction": 0.7149009704589844,
"alphanum_fraction": 0.7180591225624084,
"avg_line_length": 39.488372802734375,
"blob_id": "cbee7d387f4af62f130ee99e4845683b83bf87a1",
"content_id": "1f880b9e024fb78b8c5a4a724904ee73d1c07387",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3483,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 86,
"path": "/tests/test_layers.py",
"repo_name": "ckw1140/bert",
"src_encoding": "UTF-8",
"text": "import torch\n\nfrom model.config import Config\nfrom model.layers import (\n ScaledDotProductAttention,\n MultiHeadAttention,\n FeedForward,\n EncoderLayer,\n Encoder,\n)\nfrom model.utils import get_attention_pad_mask\n\n\ndef test_scaled_dot_product_attention():\n config = Config.load(\"./tests/config.json\")\n batch_size = 8\n\n query = torch.rand([batch_size, config.num_heads, config.sequence_length, config.head_dim])\n key = torch.rand([batch_size, config.num_heads, config.sequence_length, config.head_dim])\n value = torch.rand([batch_size, config.num_heads, config.sequence_length, config.head_dim])\n attention_mask = torch.zeros([batch_size, config.num_heads, config.sequence_length, config.sequence_length])\n\n scaled_dot_product_attention = ScaledDotProductAttention(config)\n context, attention_prob = scaled_dot_product_attention(query, key, value, attention_mask)\n\n assert context.size() == (batch_size, config.num_heads, config.sequence_length, config.head_dim)\n assert attention_prob.size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)\n assert attention_prob.max() <= 1.0\n\n\ndef test_multi_head_attention():\n config = Config.load(\"./tests/config.json\")\n batch_size = 8\n\n query = torch.rand([batch_size, config.sequence_length, config.hidden_dim])\n key = torch.rand([batch_size, config.sequence_length, config.hidden_dim])\n value = torch.rand([batch_size, config.sequence_length, config.hidden_dim])\n attention_mask = torch.zeros([batch_size, config.sequence_length, config.sequence_length])\n\n multi_head_attention = MultiHeadAttention(config)\n context, attention_prob = multi_head_attention(query, key, value, attention_mask)\n\n assert context.size() == (batch_size, config.sequence_length, config.hidden_dim)\n assert attention_prob.size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)\n assert attention_prob.max() <= 1.0\n\n\ndef test_feed_forward():\n config = Config.load(\"./tests/config.json\")\n batch_size = 8\n\n inputs = torch.rand([batch_size, config.sequence_length, config.hidden_dim])\n\n feed_forward = FeedForward(config)\n outputs = feed_forward(inputs)\n\n assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)\n\n\ndef test_encoder_layer():\n config = Config.load(\"./tests/config.json\")\n batch_size = 8\n\n inputs = torch.rand([batch_size, config.sequence_length, config.hidden_dim])\n attention_mask = torch.zeros([batch_size, config.sequence_length, config.sequence_length])\n\n encoder_layer = EncoderLayer(config)\n outputs, attention_prob = encoder_layer(inputs, attention_mask)\n\n assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)\n assert attention_prob.size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)\n\n\ndef test_encoder():\n config = Config.load(\"./tests/config.json\")\n batch_size = 8\n\n inputs = torch.randint(config.vocab_size, (batch_size, config.sequence_length))\n segments = torch.randint(2, (batch_size, config.sequence_length))\n\n encoder = Encoder(config)\n outputs, attention_probs = encoder(inputs, segments)\n\n assert outputs.size() == (batch_size, config.sequence_length, config.hidden_dim)\n assert len(attention_probs) == config.num_layers\n assert attention_probs[0].size() == (batch_size, config.num_heads, config.sequence_length, config.sequence_length)\n\n"
},
{
"alpha_fraction": 0.6521739363670349,
"alphanum_fraction": 0.6669750213623047,
"avg_line_length": 29.885713577270508,
"blob_id": "244890de205b53e7365a20b2c58b55403091b876",
"content_id": "73bfc34bfdee7f169f3c049e0527558a473755cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1189,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 35,
"path": "/model/utils.py",
"repo_name": "ckw1140/bert",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef gelu(inputs: torch.Tensor):\n \"\"\"\n https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_tf_gpt2.py\n 를 참고하여 작성하였습니다.\n \"\"\"\n cdf = 0.5 * (1.0 + torch.tanh((np.sqrt(2 / np.pi) * (inputs + 0.044715 * torch.pow(inputs, 3)))))\n return inputs * cdf\n\n\ndef get_attention_pad_mask(\n query: torch.Tensor,\n key: torch.Tensor,\n pad_token: int,\n):\n \"\"\"\n attention_prob 에서 key 에 대응되는 위치가 pad_token 이라 masking 되야하는 위치에 True 값을,\n 나머지 위치들에 대해서는 False 값을 갖는 Tensor를 반환합니다.\n \"\"\"\n batch_size, query_length = query.size()\n batch_size, key_length = key.size()\n\n # attention_pad_mask.requires_grad=False\n # [batch_size, key_length]\n attention_pad_mask = key.data.eq(pad_token)\n # [batch_size, 1, key_length]\n attention_pad_mask = attention_pad_mask.unsqueeze(1)\n # [batch_size, query_length, key_length]\n attention_pad_mask = attention_pad_mask.expand(batch_size, query_length, key_length)\n return attention_pad_mask\n"
}
] | 7 |
CarterSimonson/chicago-crime-data | https://github.com/CarterSimonson/chicago-crime-data | 6c60b0c063885e6a6b3218a5f5cd4fd021ddf159 | 53ddc97e1307e8133aede311ab808a33653e9044 | ae00d14c328ad8638b63cb73b4aed4657e6c6a52 | refs/heads/master | 2020-05-18T15:37:54.904263 | 2019-05-02T02:19:46 | 2019-05-02T02:19:46 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.3803967237472534,
"alphanum_fraction": 0.43290549516677856,
"avg_line_length": 36.79411697387695,
"blob_id": "76b91feb54b14899a48bc94db34f01f2f2d9fdcd",
"content_id": "57847bcca036004a310f38ea6f21de5397910ba6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5142,
"license_type": "permissive",
"max_line_length": 161,
"num_lines": 136,
"path": "/crimes.py",
"repo_name": "CarterSimonson/chicago-crime-data",
"src_encoding": "UTF-8",
"text": "import csv\n\n#Data dictionaries\ncensus_tracts = {}\ncrime_types = {\n \"BURGLARY\": \"PROPERTY\",\n \"THEFT\": \"PROPERTY\",\n \"MOTOR VEHICLE THEFT\": \"PROPERTY\",\n \"ARSON\": \"PROPERTY\",\n \"MURDER\": \"VIOLENT\",\n \"CRIM SEXUAL ASSAULT\": \"VIOLENT\",\n \"ROBBERY\": \"VIOLENT\",\n \"ASSAULT\": \"VIOLENT\"\n}\n\n#Column index information\niucr_index = 4\nprimary_type_index = 5\nfbi_index = 14\nyear_index = 17\ncensus_tract_index = 25\n\n#Other Vars\nrows_indexed = 0\n\n#Open the crimes.csv file, and assign it to the variable \"csvfile\"\nwith open(\"crimes.csv\", \"r\") as csvfile:\n #Create a csv datareader, which will allow us to read the csv file by row\n datareader = csv.reader(csvfile)\n \n #Print the CSV initial headers\n print(str(next(datareader)))\n \n #Loop through every row of the dataset\n for row in datareader:\n #Increment our rows_indexed var\n rows_indexed += 1\n\n #Make an attempt to retrieve data from the current row, with the indexes defined above\n try:\n census_tract = row[census_tract_index]\n primary_type = row[primary_type_index]\n year = row[year_index]\n iucr = row[iucr_index]\n fbi = row[fbi_index]\n\n if(int(year) >= 2005 and int(year) <= 2017):\n #Check if the census tract currently exists in our census_tracts dictionary object\n #If it does not, set it equal to a blank census tract dictionary, defined above\n if((str(census_tract) in census_tracts) == False):\n census_tracts[str(census_tract)] = {\n \"PROPERTY\": {\n \"2005\": 0,\n \"2006\": 0,\n \"2007\": 0,\n \"2008\": 0,\n \"2009\": 0,\n \"2010\": 0,\n \"2011\": 0,\n \"2012\": 0,\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"COMBINED\": 0\n },\n \"VIOLENT\": {\n \"2005\": 0,\n \"2006\": 0,\n \"2007\": 0,\n \"2008\": 0,\n \"2009\": 0,\n \"2010\": 0,\n \"2011\": 0,\n \"2012\": 0,\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"COMBINED\": 0\n },\n \"TOTAL\": {\n \"2005\": 0,\n \"2006\": 0,\n \"2007\": 0,\n \"2008\": 0,\n \"2009\": 0,\n \"2010\": 0,\n \"2011\": 0,\n \"2012\": 0,\n \"2013\": 0,\n \"2014\": 0,\n \"2015\": 0,\n \"2016\": 0,\n \"2017\": 0,\n \"COMBINED\": 0\n }\n }\n\n #Look up this rows crime type in our crime lookup dictionary\n #Since we are using the get() function, we can set a default value if it fails to find what it's looking for\n #In this case, that default value is the string \"NONE\"\n crime_type = crime_types.get(primary_type, \"NONE\")\n \n if(crime_type != \"NONE\"):\n census_tracts[census_tract][crime_type][year] += 1\n census_tracts[census_tract][crime_type][\"COMBINED\"] += 1\n except:\n print(\"Error occured\")\n\nprint(\"Rows indexed: \" + str(rows_indexed))\n\n#Write an output CSV file for both property and violent crimes\nfor crime_type in [\"PROPERTY\", \"VIOLENT\"]:\n with open(crime_type + \".csv\", \"w\", newline=\"\") as csvfile:\n #Create a csv datawriter to write rows\n datawriter = csv.writer(csvfile)\n\n #Write our header row, consisting of \"CENSUS TRACT\" and every key from the \"years\" dictionary\n datawriter.writerow([\"CENSUS TRACT\", \"2005\", \"2006\", \"2007\", \"2008\", \"2009\", \"2010\", \"2011\", \"2012\", \"2013\", \"2014\", \"2015\", \"2016\", \"2017\", \"COMBINED\"])\n\n for census_tract, crimes in census_tracts.items():\n #Create a blank list for our data row. We will add the values we want to write\n data_row = []\n\n #Add the census tract number\n data_row.append(census_tract)\n\n #Loop through year information for that census tract\n for year, count in crimes[crime_type].items():\n data_row.append(crimes[crime_type][year])\n\n #Write our data row to the file\n datawriter.writerow(data_row)\n\n\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 31.5,
"blob_id": "8dfa1549553f3e0ec631d6a01f91a9e609d2bf1c",
"content_id": "4029f9bd1f2cdb9ff6b84663cba0d00571edbafc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 65,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 2,
"path": "/README.md",
"repo_name": "CarterSimonson/chicago-crime-data",
"src_encoding": "UTF-8",
"text": "# chicago-crime-data\nA Python script to parse Chicago crime data\n"
}
] | 2 |
idlesign/deflacue | https://github.com/idlesign/deflacue | 5edc5cbb15c23e4592d30f8ad03a054a596ddb18 | 711276d1166611e16e378ff68aaba76d00881c5c | 4eca97d57d55d559188ab9b16690af017c945293 | refs/heads/master | 2022-02-20T22:25:58.775200 | 2022-02-04T13:00:28 | 2022-02-04T13:00:28 | 7,225,866 | 38 | 15 | BSD-3-Clause | 2012-12-18T15:45:58 | 2021-04-22T16:41:24 | 2021-04-23T16:29:37 | Python | [
{
"alpha_fraction": 0.699999988079071,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 15.666666984558105,
"blob_id": "6011e33e4fee4838ca47448c84cfdf652074e08f",
"content_id": "d241df9aa412c4c26ed75784dd876a7b12dfb460",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 50,
"license_type": "permissive",
"max_line_length": 22,
"num_lines": 3,
"path": "/.coveragerc",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "[run]\ninclude = deflacue/*\nomit = deflacue/cli.py\n"
},
{
"alpha_fraction": 0.7005235552787781,
"alphanum_fraction": 0.7057591676712036,
"avg_line_length": 27.073530197143555,
"blob_id": "b4beed915995a4ca8df2224dfc23af59fad260af",
"content_id": "85660586a43d2c0113aaa9f85a2af40a1a551a70",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1910,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 68,
"path": "/README.rst",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "deflacue\n========\nhttp://github.com/idlesign/deflacue\n\n|release| |lic| |ci| |coverage|\n\n.. |release| image:: https://img.shields.io/pypi/v/deflacue.svg\n :target: https://pypi.python.org/pypi/deflacue\n\n.. |lic| image:: https://img.shields.io/pypi/l/deflacue.svg\n :target: https://pypi.python.org/pypi/deflacue\n\n.. |ci| image:: https://img.shields.io/travis/idlesign/deflacue/master.svg\n :target: https://travis-ci.org/idlesign/deflacue\n\n.. |coverage| image:: https://img.shields.io/coveralls/idlesign/deflacue/master.svg\n :target: https://coveralls.io/r/idlesign/deflacue\n\n\nWhat's that\n-----------\n\n*deflacue is a SoX based audio splitter appropriate to split audio CD images incorporated with .cue files.*\n\nIt is able to function both as a Python module and in command line mode.\n\n\nFeatures\n--------\n\n- Large variety of supported lossless input audio formats FLAC, WAV, etc. (due to SoX).\n- Batch audio files processing (including recursive path traversing).\n- Automatic audio collection hierarchy building (Artist/Year-Album/Tracknum-Title).\n- Automatic track metadata copying from .cue.\n\n\nRequirements\n------------\n\n* Python 3.6+\n* SoX command line utility - http://sox.sourceforge.net.\n\n Ubuntu users may install the following SoX packages: ``sox``, ``libsox-fmt-all``.\n\n\nUsage\n-----\n\nFrom Python\n~~~~~~~~~~~\n\n``from deflacue import deflacue`` - if you want to use it as module.\n\nUse ``Deflacue`` class for SoX interaction.\n\nUse ``CueParser`` class for .cue parsing.\n\nFrom Command Line\n~~~~~~~~~~~~~~~~~\n\n``deflacue --help`` in command line - to get help on utility usage.\n\nIn the following example we create audio collection in ``/home/idle/audio_collection/`` from audio CD images\nstored under ``/home/idle/audio_raw/`` processing Cue Sheet files created using ``windows-1251`` encoding:\n\n.. code-block::\n\n $ deflacue -e windows-1251 /home/idle/audio_raw/ -d /home/idle/audio_collection/\n\n"
},
{
"alpha_fraction": 0.6107238531112671,
"alphanum_fraction": 0.6160857677459717,
"avg_line_length": 27.25757598876953,
"blob_id": "1afb6ecddd67cab661844f9521691227dcebc756",
"content_id": "c2c67aecd9688e8bcddd03acc703e02de78e42e7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1865,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 66,
"path": "/setup.py",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "import io\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nPATH_BASE = os.path.dirname(__file__)\n\n\ndef read_file(fpath):\n \"\"\"Reads a file within package directories.\"\"\"\n with io.open(os.path.join(PATH_BASE, fpath)) as f:\n return f.read()\n\n\ndef get_version():\n \"\"\"Returns version number, without module import (which can lead to ImportError\n if some dependencies are unavailable before install.\"\"\"\n contents = read_file(os.path.join('deflacue', '__init__.py'))\n version = re.search('VERSION = \\(([^)]+)\\)', contents)\n version = version.group(1).replace(', ', '.').strip()\n return version\n\n\nsetup(\n name='deflacue',\n version=get_version(),\n url='http://github.com/idlesign/deflacue',\n\n description='deflacue is a SoX based audio splitter to split audio CD images incorporated with .cue files',\n long_description=read_file('README.rst'),\n license='BSD 3-Clause License',\n\n author='Igor `idle sign` Starikov',\n author_email='[email protected]',\n\n packages=['deflacue'],\n include_package_data=True,\n zip_safe=False,\n\n setup_requires=[] + (['pytest-runner'] if 'test' in sys.argv else []) + [],\n\n entry_points={\n 'console_scripts': ['deflacue = deflacue.cli:main'],\n },\n\n test_suite='tests',\n tests_require=[\n 'pytest',\n 'pytest-datafixtures',\n ],\n\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Sound/Audio :: Conversion',\n ],\n)\n"
},
{
"alpha_fraction": 0.6344015598297119,
"alphanum_fraction": 0.6344015598297119,
"avg_line_length": 29.579999923706055,
"blob_id": "9828e03481c8fc46579dd577874a2222d9a6cf82",
"content_id": "c7bb5f19e1fd91359d7038637d7903a2229aa653",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1529,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 50,
"path": "/deflacue/cli.py",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "import argparse\nimport logging\n\nfrom .deflacue import Deflacue, DeflacueError\n\n\ndef main():\n\n argparser = argparse.ArgumentParser('deflacue')\n\n argparser.add_argument(\n 'source_path', help='Absolute or relative source path with .cue file(s).')\n argparser.add_argument(\n '-r', help='Recursion flag to search directories under the source_path.', action='store_true')\n argparser.add_argument(\n '-d', help='Absolute or relative destination path for output audio file(s).')\n argparser.add_argument(\n '-e', help='Cue Sheet file(s) encoding.')\n argparser.add_argument(\n '--dry', help='Perform the dry run with no changes done to filesystem.', action='store_true')\n argparser.add_argument(\n '--debug', help='Show debug messages while processing.', action='store_true')\n\n parsed = argparser.parse_args()\n kwargs = {'source_path': parsed.source_path}\n\n if parsed.e is not None:\n kwargs['encoding'] = parsed.e\n\n if parsed.d is not None:\n kwargs['dest_path'] = parsed.d\n\n if parsed.debug:\n kwargs['use_logging'] = logging.DEBUG\n\n try:\n deflacue = Deflacue(**kwargs)\n\n if not deflacue.sox_check_is_available():\n raise DeflacueError(\n 'SoX seems not available. Please install it (e.g. `sudo apt-get install sox libsox-fmt-all`).'\n )\n\n if parsed.dry:\n deflacue.set_dry_run()\n\n deflacue.do(recursive=parsed.r)\n\n except DeflacueError as e:\n logging.error(e)\n"
},
{
"alpha_fraction": 0.3684210479259491,
"alphanum_fraction": 0.5263158082962036,
"avg_line_length": 19,
"blob_id": "8d2aee5b0554777abc07a7f02042d3283026830e",
"content_id": "b89310ff8ad7a85e2b000bbdaebcb1387c756f56",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 1,
"path": "/deflacue/__init__.py",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "VERSION = (2, 0, 1)"
},
{
"alpha_fraction": 0.5138678550720215,
"alphanum_fraction": 0.5163893103599548,
"avg_line_length": 23.481481552124023,
"blob_id": "ed7e3593766930555b819ce7bd09a8cb2a2a9876",
"content_id": "e93979198da6cee1cea53c3e5da0ba0afa50955d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5949,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 243,
"path": "/deflacue/parser.py",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "import logging\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple\n\nfrom .exceptions import ParserError\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef pos_to_frames(pos) -> int:\n \"\"\"Converts position (mm:ss:ff) into frames.\n\n :param pos:\n\n \"\"\"\n minutes, seconds, frames = map(int, pos.split(':'))\n seconds = (minutes * 60) + seconds\n rate = 44100\n return (seconds * rate) + (frames * (rate // 75))\n\n\nclass Context:\n \"\"\"Basic context.\"\"\"\n\n _default = {}\n\n def __init__(self, *, data: dict):\n self.data = {**self._default, **deepcopy(data)}\n\n def add(self, key: str, val: str):\n self.data[key] = val\n\n\nclass MetaContext(Context):\n \"\"\"Global .cue meta context.\"\"\"\n\n _default = {\n 'ALBUM': 'Unknown',\n 'PERFORMER': 'Unknown',\n 'DATE': None,\n }\n\n def add(self, key: str, val: str):\n if key == 'TITLE':\n key = 'ALBUM'\n super().add(key, val)\n\n\nclass FileContext(Context):\n \"\"\"File information.\"\"\"\n\n def __init__(self, *, path: str, ftype: str, data: dict):\n self.path: Path = Path(path)\n \"\"\"File path.\"\"\"\n\n self.type = ftype\n \"\"\"File type.\"\"\"\n\n self.tracks: List[TrackContext] = []\n \"\"\"Tracks in file.\"\"\"\n\n super().__init__(data=data)\n\n def __str__(self):\n return str(self.path)\n\n\nclass TrackContext(Context):\n \"\"\"Track information.\"\"\"\n\n _default = {\n 'TITLE': 'Unknown',\n }\n\n def __init__(self, *, file: FileContext, num: int, dtype: str):\n self.file = file\n \"\"\"File containing track.\"\"\"\n\n self.num = num\n \"\"\"Track number.\"\"\"\n\n self.type = dtype\n \"\"\"Track data type.\"\"\"\n\n self.start: int = 0\n \"\"\"Start position (frames).\"\"\"\n\n super().__init__(data=file.data)\n\n def __str__(self):\n return f\"{self.num} {self.title} @ {self.file}\"\n\n @property\n def title(self):\n return self.data.get('TITLE', '')\n\n @property\n def end(self) -> int:\n tracks = self.file.tracks\n end = 0\n\n for idx, track in enumerate(tracks):\n if track is self:\n try:\n end = tracks[idx+1].start\n\n except IndexError:\n pass\n\n break\n\n return end\n\n\nclass CueData:\n \"\"\"Represents data from .cue file.\"\"\"\n\n def __init__(self):\n self.meta = []\n\n self.meta = MetaContext(data={})\n \"\"\"Basic information.\"\"\"\n\n self.files: List[FileContext] = []\n \"\"\"Files in image.\"\"\"\n\n self.tracks: List[TrackContext] = []\n \"\"\"Tracks in image.\"\"\"\n\n self._current_file: Optional[FileContext] = None\n self._current_track: Optional[TrackContext] = None\n self._current_context: Context = self.meta\n\n def add_context(self, key, val):\n self._current_context.add(key, val)\n\n def add_file(self, *, path: str, ftype: str):\n file_context = FileContext(\n path=path,\n ftype=ftype,\n data=self._current_context.data\n )\n self._current_context = file_context\n self._current_file = file_context\n self.files.append(file_context)\n\n def add_track(self, *, num: int, dtype: str):\n file_context = self._current_file\n track_context = TrackContext(\n file=self._current_file,\n num=num,\n dtype=dtype,\n )\n track_context.add('TRACK_NUM', f'{num}')\n file_context.tracks.append(track_context)\n self._current_context = track_context\n self._current_track = track_context\n self.tracks.append(track_context)\n\n def add_track_index(self, *, pos: str):\n self._current_track.start = pos_to_frames(pos)\n\n\nclass CueParser:\n \"\"\"Simple Cue Sheet file parser.\"\"\"\n\n def __init__(self, lines: List[str]):\n self.lines = lines\n\n def run(self) -> CueData:\n\n cue = CueData()\n parse_cmd = self._parse_command\n unquote = self._unquote\n\n for line in self.lines:\n cmd, args = parse_cmd(line)\n\n if cmd == 'REM':\n cue.add_context(*parse_cmd(args))\n\n elif cmd == 'FILE':\n fpath, ftype = args.rsplit(' ', 1)\n fpath = unquote(fpath)\n cue.add_file(path=fpath, ftype=ftype)\n\n elif cmd == 'TRACK':\n num, _, dtype = args.partition(' ')\n cue.add_track(num=int(num), dtype=dtype)\n\n elif cmd == 'INDEX':\n num, _, pos = args.partition(' ')\n\n if num == '01':\n cue.add_context(f'{cmd} {num}', pos)\n cue.add_track_index(pos=pos)\n\n else:\n cue.add_context(cmd, args)\n\n return cue\n\n def _parse_command(self, cmd: str) -> Tuple[str, str]:\n command, _, args = cmd.partition(' ')\n args = self._unquote(args)\n\n LOGGER.debug(f'Parsed command `{command}`. Args: {args}')\n\n return command, args\n\n @classmethod\n def _unquote(cls, val: str) -> str:\n return val.strip(' \"')\n\n @classmethod\n def from_file(cls, fpath: Path, *, encoding: str = None) -> 'CueParser':\n\n def read(coding: str = None) -> Optional[CueParser]:\n\n try:\n with open(str(fpath), encoding=coding) as f:\n return CueParser([\n line.strip() for line in f.readlines()\n if line.strip()\n ])\n\n except UnicodeDecodeError:\n return None\n\n parser = read(encoding)\n\n if not parser:\n # Try unicode as a fallback.\n parser = read()\n\n if not parser:\n raise ParserError(\n 'Unable to read data from .cue file. '\n 'Please provide a correct encoding.'\n )\n\n return parser\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 22.66666603088379,
"blob_id": "07adb7c0a745833c68f7a8478f944695f2419284",
"content_id": "d73c51bcd6486a8017746bdf085969128191d963",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 6,
"path": "/deflacue/exceptions.py",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "\nclass DeflacueError(Exception):\n \"\"\"Exception type raised by deflacue.\"\"\"\n\n\nclass ParserError(Exception):\n \"\"\"Cue file parser error.\"\"\"\n"
},
{
"alpha_fraction": 0.5402963757514954,
"alphanum_fraction": 0.5663173198699951,
"avg_line_length": 26.127450942993164,
"blob_id": "e1b13642bc5e573ab7b156ce4e89cede33f6755a",
"content_id": "11c35768c528da257ffd5a5c7e6b8a2fdf2961f4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2837,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 102,
"path": "/tests/test_basic.py",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "import logging\nfrom collections import deque\nfrom pathlib import Path\n\nimport pytest\n\nfrom deflacue.deflacue import CueParser, Deflacue\nfrom deflacue.exceptions import ParserError\n\n\nclass TestParser:\n\n def test_encoding(self, datafix_dir):\n\n fpath = datafix_dir / 'vys2.cue'\n\n with pytest.raises(ParserError):\n CueParser.from_file(fpath)\n\n parser = CueParser.from_file(fpath, encoding='cp1251')\n cue = parser.run()\n\n assert cue.meta.data == {\n 'GENRE': 'Classic',\n 'DATE': '2020',\n 'COMMENT': 'Dumped',\n 'PERFORMER': 'В. С. Высоцкий',\n 'ALBUM': 'Пять песен',\n }\n\n assert len(cue.files) == 2\n assert str(cue.files[0]) == '01. Сторона А.flac'\n\n assert len(cue.tracks) == 5\n\n track = cue.tracks[3]\n assert str(track)\n assert track.start == 5426064\n assert track.end == 11205516\n assert track.data == {\n 'ALBUM': 'Пять песен',\n 'COMMENT': 'Dumped',\n 'DATE': '2020',\n 'GENRE': 'Classic',\n 'INDEX 01': '02:03:03',\n 'PERFORMER': 'В. С. Высоцкий',\n 'TITLE': '04. Песня о вещей Кассандре',\n 'TRACK_NUM': '4',\n }\n track = cue.tracks[4]\n assert track.start == 11205516\n assert track.end == 0\n assert track.file.path == Path('02. Сторона В.flac')\n\n\[email protected]\ndef sox_mock(monkeypatch):\n\n class SoxMock:\n\n def __init__(self):\n self.commands = []\n self.results = deque()\n\n def process_command(self, command, **kwargs):\n logging.getLogger('deflacue').debug(f'Executing shell command: {command}')\n self.commands.append(command)\n return 0\n\n mock = SoxMock()\n monkeypatch.setattr('deflacue.deflacue.Deflacue._process_command', mock.process_command)\n\n return mock\n\n\nclass TestDeflacue:\n\n def test_basic(self, datafix_dir, sox_mock, tmp_path, caplog):\n\n caplog.set_level(logging.DEBUG, logger='deflacue')\n\n dest = tmp_path / 'sub'\n\n deflacue = Deflacue(\n source_path=str(datafix_dir),\n dest_path=str(dest),\n encoding='cp1251',\n )\n commands = sox_mock.commands\n\n available = deflacue.sox_check_is_available()\n assert available\n assert len(commands) == 1\n\n deflacue.do(recursive=True)\n assert len(commands) == 6\n\n assert (dest / 'datafixtures' / 'В. С. Высоцкий' / '2020 - Пять песен').exists()\n caplog_text = caplog.text\n assert 'Extracting `5 - 05. История болезни.flac`' in caplog_text\n assert 'Еще Не Вечер.flac` is not found.' in caplog_text\n assert '--add-comment=\"TRACKNUMBER=4\"' in caplog_text\n"
},
{
"alpha_fraction": 0.586406409740448,
"alphanum_fraction": 0.5869948863983154,
"avg_line_length": 31.06289291381836,
"blob_id": "7c89d011d4fa736f2775aecb02fadfe87c22a8ad",
"content_id": "e01972a2acc644981a2cd041e3a31e9c3bfdbb1b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10196,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 318,
"path": "/deflacue/deflacue.py",
"repo_name": "idlesign/deflacue",
"src_encoding": "UTF-8",
"text": "\"\"\"\ndeflacue is a Cue Sheet parser and a wrapper for mighty SoX utility - http://sox.sourceforge.net/.\n\nSoX with appropriate plugins should be installed for deflacue to function.\nUbuntu users may install the following SoX packages: `sox`, `libsox-fmt-all`.\n\ndeflacue can function both as a Python module and in command line mode.\n\n\"\"\"\nimport logging\nimport os\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom subprocess import Popen, PIPE\nfrom typing import List, Dict, Union, Optional\n\nfrom .exceptions import DeflacueError\nfrom .parser import CueParser\n\nLOGGER = logging.getLogger(__name__)\nTypePath = Union[str, Path]\n\n\nCOMMENTS_CUE_TO_VORBIS = {\n 'TRACK_NUM': 'TRACKNUMBER',\n 'TITLE': 'TITLE',\n 'PERFORMER': 'ARTIST',\n 'ALBUM': 'ALBUM',\n 'GENRE': 'GENRE',\n 'DATE': 'DATE',\n 'ISRC': 'ISRC',\n 'COMMENT': 'DESCRIPTION',\n}\n\"\"\"Cue REM commands to Vorbis tags.\"\"\"\n\n\nclass Deflacue:\n \"\"\"deflacue functionality is encapsulated in this class.\n\n Usage example:\n deflacue = Deflacue('/home/idle/cues_to_process/')\n deflacue.do()\n\n This will search `/home/idle/cues_to_process/` and subdirectories\n for .cue files, parse them and extract separate tracks.\n Extracted tracks are stored in Artist - Album hierarchy within\n `deflacue` directory under each source directory.\n\n \"\"\"\n _dry_run = False # Some lengthy shell command won't be executed on dry run.\n _target_default = 'deflacue'\n\n def __init__(\n self,\n source_path: TypePath,\n *,\n dest_path: TypePath = None,\n encoding: str = None,\n use_logging: int = logging.INFO\n ):\n \"\"\"Prepares deflacue to for audio processing.\n\n :param source_path: Absolute or relative to the current directory path,\n containing .cue file(s) or subdirectories with .cue file(s) to process.\n\n :param dest_path: Absolute or relative to the current directory path\n to store output files in. If None, output files are saved in `deflacue` directory\n in the same directory as input file(s).\n\n :param encoding: Encoding used for .cue file(s).\n\n :param use_logging: Defines the verbosity level of deflacue. All messages\n produced by the application are logged with `logging` module.\n Examples: logging.INFO, logging.DEBUG.\n\n \"\"\"\n src = Path(source_path).absolute()\n self.path_source: Path = src\n self.path_target: Optional[Path] = dest_path\n self.encoding = encoding\n\n if use_logging:\n self._configure_logging(use_logging)\n\n LOGGER.info(f'Source path: {src}')\n\n if not src.exists():\n raise DeflacueError(f'Path `{src}` is not found.')\n\n if dest_path is not None:\n self.path_target = Path(dest_path).absolute()\n os.chdir(src)\n\n def _process_command(\n self,\n command: str,\n *,\n stdout=None,\n suppress_dry_run: bool = False\n ) -> int:\n \"\"\"Executes shell command with subprocess.Popen.\n Returns status code.\n\n \"\"\"\n LOGGER.debug(f'Executing shell command: {command}')\n\n if not self._dry_run or suppress_dry_run:\n prc = Popen(command, shell=True, stdout=stdout)\n prc.communicate()\n return prc.returncode\n\n return 0\n\n @classmethod\n def _configure_logging(cls, verbosity_lvl: int = logging.INFO):\n \"\"\"Switches on logging at given level.\"\"\"\n logging.basicConfig(level=verbosity_lvl, format='%(levelname)s: %(message)s')\n\n def _create_target_path(self, path: Optional[Path]):\n \"\"\"Creates a directory for target files.\"\"\"\n if self._dry_run or not path:\n return\n\n LOGGER.debug(f'Creating target path: {path} ...')\n os.makedirs(path, exist_ok=True)\n\n def set_dry_run(self):\n \"\"\"Sets deflacue into dry run mode, when all requested actions\n are only simulated, and no changes are written to filesystem.\n\n \"\"\"\n self._dry_run = True\n\n def get_dir_files(self, *, recursive: bool = False) -> Dict[Path, List[Path]]:\n \"\"\"Creates and returns dictionary of files in source directory.\n\n :param recursive: if True search is also performed within subdirectories.\n\n \"\"\"\n LOGGER.info(f'Enumerating files under the source path (recursive={recursive}) ...')\n\n files = {}\n if recursive:\n for current_dir, _, dir_files in os.walk(self.path_source):\n path = self.path_source / current_dir\n files[path] = [path / f for f in dir_files]\n\n else:\n files[self.path_source] = [\n f for f in self.path_source.iterdir()\n if f.is_file()\n ]\n\n return files\n\n def filter_target_extensions(self, files_dict: Dict[Path, List[Path]]) -> Dict[Path, List[Path]]:\n \"\"\"Takes file dictionary created with `get_dir_files` and returns\n dictionary of the same kind containing only files of supported\n types.\n\n :param files_dict:\n\n \"\"\"\n files_filtered = defaultdict(list)\n LOGGER.info('Filtering .cue files ...')\n paths = files_dict.keys()\n\n for path in paths:\n\n if path.name == self._target_default:\n continue\n\n for f in sorted(files_dict[path]):\n if f.suffix == '.cue':\n files_filtered[path].append(f)\n\n return files_filtered\n\n def sox_check_is_available(self) -> bool:\n \"\"\"Checks whether SoX is available.\"\"\"\n result = self._process_command('sox -h', stdout=PIPE, suppress_dry_run=True)\n return result == 0\n\n def sox_extract_audio(\n self,\n *,\n source_file: Path,\n pos_start_samples: int,\n pos_end_samples: int,\n target_file: Path,\n metadata: Dict[str, str] = None\n ):\n \"\"\"Using SoX extracts a chunk from source audio file into target.\n\n :param source_file: Source audio file path\n\n :param pos_start_samples: Trim position start (samples)\n\n :param pos_end_samples: Trim position end (samples)\n\n :param target_file: Trimmed audio file path\n\n :param metadata: Additional data (tags) dict.\n\n \"\"\"\n LOGGER.info(f' Extracting `{target_file.name}` ...')\n\n chunk_length_samples = ''\n if pos_end_samples:\n chunk_length_samples = f'{pos_end_samples - pos_start_samples}s'\n\n add_comment = ''\n if metadata is not None:\n LOGGER.debug(f'Metadata: {metadata}\\n')\n\n for key, val in COMMENTS_CUE_TO_VORBIS.items():\n val_meta = metadata.get(key)\n if val_meta:\n add_comment = f'--add-comment=\"{val}={val_meta}\" {add_comment}'\n\n LOGGER.debug(\n 'Extraction information:\\n'\n f' Source file: {source_file}\\n'\n f' Start position: {pos_start_samples} samples\\n'\n f' End position: {pos_end_samples} samples\\n'\n f' Length: {chunk_length_samples} sample(s)')\n\n command = (\n f'sox -V1 \"{source_file}\" '\n f'--comment=\"\" {add_comment} \"{target_file}\" '\n f'trim {pos_start_samples}s {chunk_length_samples}'\n )\n\n self._process_command(command, stdout=PIPE)\n\n def process_cue(self, *, cue_file: Path, target_path: Path):\n \"\"\"Parses .cue file, extracts separate tracks.\n\n :param cue_file: .cue filepath\n\n :param target_path: path to place files into\n\n \"\"\"\n LOGGER.info(f'\\nProcessing `{cue_file.name}`\\n')\n\n parser = CueParser.from_file(fpath=cue_file, encoding=self.encoding)\n cue = parser.run()\n\n cd_info = cue.meta.data\n tracks = cue.tracks\n\n def sanitize(val: str) -> str:\n return val.replace('/', '')\n\n title = cd_info['ALBUM']\n if cd_info['DATE'] is not None:\n title = f\"{cd_info['DATE']} - {title}\"\n\n bundle_path = target_path / sanitize(cd_info['PERFORMER']) / sanitize(title)\n self._create_target_path(bundle_path)\n\n len_tracks_count = len(str(len(tracks)))\n for track in tracks:\n track_file = track.file.path\n\n if not track_file.exists():\n LOGGER.error(f'Source file `{track_file}` is not found. Track is skipped.')\n continue\n\n track_num = str(track.num).rjust(len_tracks_count, '0')\n filename = f\"{track_num} - {sanitize(track.title)}.flac\"\n\n self.sox_extract_audio(\n source_file=track_file,\n pos_start_samples=track.start,\n pos_end_samples=track.end,\n target_file=bundle_path / filename,\n metadata=track.data\n )\n\n def do(self, *, recursive: bool = False):\n \"\"\"Main method processing .cue files in batch.\n\n :param recursive: if True .cue search is also performed within subdirectories.\n\n \"\"\"\n self._create_target_path(self.path_target)\n\n files_dict = self.filter_target_extensions(self.get_dir_files(recursive=recursive))\n\n dir_initial = os.getcwd()\n paths = sorted(files_dict.keys())\n\n for path in paths:\n os.chdir(path)\n\n LOGGER.info(f\"\\n{'====' * 10}\\nWorking on: {path}\\n\")\n\n if self.path_target is None:\n # When a target path is not specified, create `deflacue` subdirectory\n # in every directory we are working at.\n target_path = path / self._target_default\n\n else:\n # When a target path is specified, we create a subdirectory there\n # named after the directory we are working on.\n target_path = self.path_target / path.name\n\n self._create_target_path(target_path)\n\n LOGGER.info(f'Target (output) path: {target_path}')\n\n for cue in files_dict[path]:\n self.process_cue(cue_file=path / cue, target_path=target_path)\n\n os.chdir(dir_initial)\n\n LOGGER.info('We are done. Thank you.\\n')\n"
}
] | 9 |
claverru/kaggle | https://github.com/claverru/kaggle | c91eae6120f9702e5956daba36af872e6cf1530a | e1c0af81e95c4f5a5cd305b89c11e6e327bf2afe | dc995683d1e8d754dc0dc5ad8dd3418ecb40e4ba | refs/heads/master | 2023-03-07T13:25:50.253770 | 2021-02-20T10:07:56 | 2021-02-20T10:07:56 | 340,622,112 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6794871687889099,
"alphanum_fraction": 0.7628205418586731,
"avg_line_length": 42.85714340209961,
"blob_id": "7167e517f6d690f234681836d98e9f2bff633e57",
"content_id": "0612e9976d004d9b705c468adadc0fa32ea21af6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 7,
"path": "/CassavaLeafDesease/Dockerfile",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "FROM tensorflow/tensorflow:latest-gpu\r\n\r\nRUN pip install catboost==0.24.3 xgboost==1.2.1 scikit-learn==0.23.2 lightgbm==2.3.1 flaml\r\nRUN pip install pandas timm\r\n\r\nRUN pip install torch==1.7.0+cu110 torchvision==0.8.1+cu110 -f https://download.pytorch.org/whl/torch_stable.html\r\nRUN pip install pytorch_lightning"
},
{
"alpha_fraction": 0.5906111598014832,
"alphanum_fraction": 0.6023029088973999,
"avg_line_length": 26.81122398376465,
"blob_id": "3077585c2329a3347f84517fd0e96a5b370ac9a5",
"content_id": "02b0ae4c8f76146a805db327681a3c34d96d8177",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5645,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 196,
"path": "/Riid!/model.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\r\n\r\nfrom official.nlp.keras_nlp.layers import (\r\n TransformerEncoderBlock,\r\n PositionEmbedding\r\n)\r\nfrom official.nlp.modeling.layers import TransformerDecoderBlock\r\n\r\n\r\ndef create_padding_mask(seqs):\r\n mask = tf.cast(tf.reduce_all(tf.math.equal(seqs, 0.), axis=-1), tf.float32)\r\n return mask[:, tf.newaxis, :]\r\n\r\n\r\ndef create_look_ahead_mask(size, shift_right=0):\r\n mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, shift_right)\r\n return mask\r\n\r\n\r\ndef create_bundle_mask(task, windows_size):\r\n es = '...b, ...c -> ...bc'\r\n return tf.cast(\r\n tf.einsum(es, task, task) != tf.square(task)[:, tf.newaxis, :], \r\n tf.float32\r\n )+tf.eye(windows_size)\r\n\r\ndef get_series_model(\r\n n_features,\r\n content_ids,\r\n task_container_ids,\r\n part_ids,\r\n tag_ids,\r\n windows_size=64,\r\n d_model=24,\r\n num_heads=4,\r\n n_encoder_layers=2,\r\n n_decoder_layers=2\r\n ):\r\n # Input\r\n inputs = tf.keras.Input(shape=(windows_size, n_features), name='inputs') \r\n \r\n # Divide branches\r\n lag, content, content_type, task, user_ans, \\\r\n ans_correctly, lapse, exp, correct_ans, part = \\\r\n tf.unstack(inputs[..., :-6], axis=-1)\r\n tags = inputs[..., -6:]\r\n\r\n # Masks\r\n pad_mask = create_padding_mask(inputs)\r\n la_mask = create_look_ahead_mask(windows_size)\r\n bundle_mask = create_bundle_mask(task, windows_size)\r\n mask = (1-tf.maximum(pad_mask, la_mask))*bundle_mask\r\n \r\n # Create embeddings\r\n answer_emb_layer = tf.keras.layers.Embedding(7, d_model)\r\n user_ans_emb = answer_emb_layer(user_ans)\r\n correct_ans_emb = answer_emb_layer(correct_ans)\r\n\r\n content_type_emb = tf.keras.layers.Embedding(3, d_model)(content_type)\r\n content_emb = tf.keras.layers.Embedding(content_ids, d_model)(content)\r\n # task_emb = tf.keras.layers.Embedding(task_container_ids, d_model)(task)\r\n task_emb = tf.keras.layers.experimental.EinsumDense(\r\n '...x,xy->...y', d_model)(task[..., tf.newaxis])\r\n \r\n ans_correctly_emb = tf.keras.layers.Embedding(5, d_model)(ans_correctly)\r\n exp_emb = tf.keras.layers.Embedding(4, d_model)(exp)\r\n part_emb = tf.keras.layers.Embedding(part_ids, d_model)(part)\r\n \r\n tags = tf.ragged.boolean_mask(tags, tags!=0)\r\n tags_emb = tf.keras.layers.Embedding(tag_ids, d_model)(tags)\r\n tags_emb = tags_emb.to_tensor(shape=(None, windows_size, None, d_model))\r\n tags_emb = tf.reduce_sum(tags_emb, -2)\r\n\r\n # Time features\r\n time = tf.stack([lag, lapse], -1)\r\n time_emb = tf.keras.layers.experimental.EinsumDense(\r\n '...x,xy->...y', d_model)(time)\r\n \r\n # Position \r\n pos_emb = PositionEmbedding(windows_size)(content_emb)\r\n\r\n # Add emb\r\n e = tf.keras.layers.Add()([\r\n pos_emb,\r\n content_emb,\r\n correct_ans_emb,\r\n content_type_emb,\r\n part_emb,\r\n tags_emb\r\n ])\r\n\r\n d = tf.keras.layers.Add()([\r\n pos_emb,\r\n ans_correctly_emb,\r\n user_ans_emb,\r\n exp_emb,\r\n task_emb,\r\n time_emb\r\n ])\r\n\r\n for _ in range(n_encoder_layers):\r\n e = TransformerEncoderBlock(\r\n num_heads,\r\n d_model*2,\r\n 'swish',\r\n output_dropout=0.2,\r\n attention_dropout=0.1,\r\n inner_dropout=0.4\r\n )([e, mask])\r\n\r\n for _ in range(n_encoder_layers):\r\n d, _ = TransformerDecoderBlock(\r\n num_heads,\r\n d_model*2,\r\n 'swish',\r\n dropout_rate=0.2,\r\n attention_dropout_rate=0.1,\r\n intermediate_dropout=0.4\r\n )([d, e, mask, mask])\r\n\r\n\r\n output_a = tf.keras.layers.Dense(\r\n 4, activation='softmax', name='a')(d)\r\n\r\n correct_ids = tf.cast(tf.where(correct_ans>1, correct_ans-2, 0), tf.int32)\r\n\r\n output_c = tf.gather(output_a, correct_ids, batch_dims=2)\r\n output_c = tf.keras.layers.Lambda(tf.identity, name='c')(output_c)\r\n \r\n return tf.keras.Model(inputs, [output_a, output_c], name='model')\r\n\r\n\r\nloss_object_a = tf.keras.losses.SparseCategoricalCrossentropy(\r\n reduction=tf.keras.losses.Reduction.NONE)\r\n\r\ndef loss_function_a(real, pred):\r\n mask = tf.not_equal(real, 4)\r\n\r\n real = tf.clip_by_value(real, 0, 3)\r\n pred = tf.where(tf.math.is_finite(pred), pred, 0.5)\r\n\r\n loss_ = loss_object_a(real, pred)\r\n\r\n mask = tf.cast(mask, dtype=loss_.dtype)\r\n loss_ *= mask\r\n\r\n return tf.reduce_sum(loss_)/tf.reduce_sum(mask)\r\n\r\n\r\n\"\"\"\r\nloss_object_c = tf.keras.losses.SparseCategoricalCrossentropy(\r\n reduction=tf.keras.losses.Reduction.NONE)\r\n\r\ndef loss_function_c(real, pred):\r\n mask = tf.not_equal(real, 2)\r\n\r\n real = tf.clip_by_value(real, 0, 1)\r\n pred = tf.where(tf.math.is_finite(pred), pred, 0.5)\r\n\r\n loss_ = loss_object_c(real, pred)\r\n\r\n mask = tf.cast(mask, dtype=loss_.dtype)\r\n loss_ *= mask\r\n\r\n return tf.reduce_sum(loss_)/tf.reduce_sum(mask)\r\n\"\"\"\r\n\r\n\r\nloss_object_c = tf.keras.losses.BinaryCrossentropy(\r\n reduction=tf.keras.losses.Reduction.NONE)\r\n\r\ndef loss_function_c(real, pred):\r\n mask = tf.not_equal(real, 2)\r\n\r\n real = tf.clip_by_value(real, 0, 1)\r\n pred = tf.where(tf.math.is_finite(pred), pred, 0)\r\n\r\n real = real[..., tf.newaxis]\r\n pred = pred[..., tf.newaxis]\r\n\r\n loss_ = loss_object_c(real, pred)\r\n\r\n mask = tf.cast(mask, dtype=loss_.dtype)\r\n loss_ *= mask\r\n\r\n return tf.reduce_sum(loss_)/tf.reduce_sum(mask)\r\n\r\n\r\nclass AUC(tf.keras.metrics.AUC):\r\n\r\n def update_state(self, y_true, y_pred, sample_weight=None):\r\n y_true = y_true[:, -1]\r\n y_pred = y_pred[:, -1]#, 1]\r\n # y_pred = tf.where(tf.math.is_finite(y_pred), y_pred, 0)\r\n return super(AUC, self).update_state(\r\n y_true, y_pred, sample_weight=sample_weight)"
},
{
"alpha_fraction": 0.7544642686843872,
"alphanum_fraction": 0.7544642686843872,
"avg_line_length": 20.600000381469727,
"blob_id": "635fa47cbf8ba668d63ecd796dc0f9d68bfe78d6",
"content_id": "2bcce27e6915b782d2a9293dc615ff7fec5cb97f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 224,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 10,
"path": "/Riid!/Dockerfile",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "FROM tensorflow/tensorflow:latest-gpu\r\n\r\n\r\nRUN apt-get update && apt-get install git\r\n\r\n\r\n# git clone https://github.com/tensorflow/models\r\n# move official out of models\r\n\r\nRUN pip install pandas gin-config tensorflow-addons"
},
{
"alpha_fraction": 0.5110186338424683,
"alphanum_fraction": 0.5228942036628723,
"avg_line_length": 30.68000030517578,
"blob_id": "6a26ca30ebabd534b531ba04b4d99282d4c0a2d5",
"content_id": "200e087510414030f56e0a6d56da5a06b5360dfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8168,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 250,
"path": "/CassavaLeafDesease/train.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import os\r\nimport glob\r\nimport math\r\n\r\nimport timm\r\nimport torch\r\nimport pandas as pd\r\nfrom PIL import Image\r\nimport pytorch_lightning as pl\r\nfrom timm.data import create_transform\r\nfrom sklearn.model_selection import RepeatedStratifiedKFold\r\n\r\nfrom bitempered import bi_tempered_logistic_loss\r\n\r\n\r\nclass CassavaDataset(torch.utils.data.Dataset):\r\n\r\n def __init__(self, df, transforms=None):\r\n super().__init__()\r\n self.paths = df['image_path'].to_numpy()\r\n self.labels = pd.get_dummies(df['label']).to_numpy()\r\n self.transforms = transforms\r\n\r\n def __len__(self):\r\n return len(self.paths)\r\n\r\n def __getitem__(self, index):\r\n img_path = self.paths[index]\r\n label = self.labels[index]\r\n img = Image.open(img_path).convert(\"RGB\")\r\n if self.transforms is not None:\r\n image = self.transforms(img)\r\n return image, label\r\n \r\n\r\nclass CasavaDataModule(pl.LightningDataModule):\r\n\r\n def __init__(self, train_df, val_df, batch_size, img_size):\r\n super().__init__()\r\n self.batch_size = batch_size\r\n self.img_size = img_size\r\n self.train_df = train_df\r\n self.val_df = val_df\r\n\r\n def setup(self, stage=None):\r\n\r\n train_T = create_transform(\r\n input_size=self.img_size,\r\n is_training=True,\r\n color_jitter=0.25,\r\n auto_augment='rand-m9-mstd0.5-inc1',\r\n interpolation='bicubic',\r\n re_prob=0.25,\r\n re_mode='pixel',\r\n re_count=1,\r\n vflip=0.5\r\n )\r\n val_T = create_transform(\r\n input_size=self.img_size,\r\n is_training=False,\r\n color_jitter=0.4,\r\n auto_augment='rand-m9-mstd0.5-inc1',\r\n interpolation='bicubic',\r\n re_prob=0.25,\r\n re_mode='pixel',\r\n re_count=1,\r\n )\r\n self.train_ds = CassavaDataset(self.train_df, transforms=train_T)\r\n self.val_ds = CassavaDataset(self.val_df, transforms=val_T)\r\n \r\n def train_dataloader(self):\r\n return torch.utils.data.DataLoader(\r\n dataset=self.train_ds, \r\n batch_size=self.batch_size, \r\n num_workers=5,\r\n drop_last=True,\r\n shuffle=True\r\n )\r\n\r\n def val_dataloader(self):\r\n return torch.utils.data.DataLoader(\r\n dataset=self.val_ds, \r\n batch_size=self.batch_size, \r\n num_workers=5\r\n )\r\n\r\n\r\nclass CasavaModel(pl.LightningModule):\r\n\r\n def __init__(self, \r\n arch, \r\n pretrained,\r\n img_size,\r\n n_classes=5,\r\n lr=3e-4, \r\n plat_fac=0.5, \r\n plat_pat=1):\r\n super().__init__()\r\n\r\n self.model = timm.create_model(arch, pretrained=True)\r\n\r\n if 'deit' in arch or 'vit' in arch:\r\n\r\n old_stride_size = self.model.patch_embed.proj.stride[0]\r\n old_img_size = self.model.patch_embed.img_size[0]\r\n features = old_img_size**2//old_stride_size**2\r\n\r\n stride_size = math.ceil(math.sqrt(img_size**2//features))\r\n stride = (stride_size, stride_size)\r\n\r\n new_weight = torch.nn.functional.interpolate(\r\n self.model.patch_embed.proj.weight, \r\n size=stride, \r\n mode='bicubic', \r\n align_corners=True\r\n )\r\n self.model.patch_embed.proj.weight = torch.nn.Parameter(new_weight)\r\n self.model.patch_embed.proj.stride = stride\r\n self.model.patch_embed.proj.kernel_size = stride\r\n self.model.patch_embed.img_size = (img_size, img_size)\r\n self.model.head = torch.nn.Linear(\r\n in_features=self.model.head.in_features, \r\n out_features=n_classes, \r\n bias=True\r\n )\r\n else:\r\n self.model.eval()\r\n try:\r\n self.model.fc = torch.nn.Sequential(\r\n torch.nn.Dropout(0.3),\r\n torch.nn.Linear(\r\n in_features=self.model.fc.in_features, \r\n out_features=n_classes)\r\n )\r\n except:\r\n self.model.classifier = torch.nn.Sequential(\r\n torch.nn.Dropout(0.3),\r\n torch.nn.Linear(\r\n in_features=self.model.classifier.in_features, \r\n out_features=n_classes)\r\n )\r\n \r\n self.acc_object = pl.metrics.Accuracy()\r\n \r\n self.lr = lr\r\n self.plat_fac = plat_fac\r\n self.plat_pat = plat_pat\r\n\r\n self.save_hyperparameters()\r\n\r\n def forward(self, x):\r\n x = self.model(x)\r\n return x\r\n\r\n def training_step(self, batch, batch_idx):\r\n x, y_true = batch\r\n y_pred = self.forward(x)\r\n loss = bi_tempered_logistic_loss(y_pred, y_true, 0.7, 1.3, 0.1).mean()\r\n acc = self.acc_object(y_pred, y_true.argmax(-1))\r\n self.log('train_loss', loss, on_epoch=True, prog_bar=True, on_step=False)\r\n self.log('train_acc', acc, on_epoch=True, prog_bar=True, on_step=False)\r\n return loss\r\n \r\n def validation_step(self, batch, batch_idx):\r\n x, y_true = batch\r\n y_pred = self.forward(x)\r\n loss = bi_tempered_logistic_loss(y_pred, y_true, 0.7, 1.3, 0.1).mean()\r\n acc = self.acc_object(y_pred, y_true.argmax(-1))\r\n self.log('val_loss', loss, prog_bar=True)\r\n self.log('val_acc', acc, prog_bar=True)\r\n return loss\r\n\r\n def configure_optimizers(self):\r\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\r\n return {\r\n 'optimizer': optimizer,\r\n 'lr_scheduler': torch.optim.lr_scheduler.ReduceLROnPlateau(\r\n optimizer, factor=self.plat_fac, patience=self.plat_pat),\r\n 'monitor': 'val_acc'\r\n }\r\n\r\n\r\nARCH = 'swsl_resnext50_32x4d'\r\n# FOLDS = [4, 5, 6, 7, 8, 9]\r\nFOLDS = list(range(10))\r\nIMG_SIZE = 512\r\nBATCH_SIZE = 32\r\nLEARNING_RATE = 1.5e-4\r\n\r\nTRAIN_IMG_DIR = 'data/train_images/'\r\nOLD_TRAIN_IMG_DIR = 'data/olddata/train/**/**'\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n df = pd.read_csv('data/train.csv')\r\n df['image_path'] = TRAIN_IMG_DIR + df.image_id\r\n\r\n suspicious = pd.read_csv('data/suspicious.csv')\r\n df['suspicious'] = suspicious['suspicious'].astype('uint8')+1\r\n df['strats'] = (df.label+1)*df.suspicious\r\n\r\n # Old data competition\r\n old_df = pd.DataFrame(glob.glob(OLD_TRAIN_IMG_DIR), columns=['image_path'])\r\n d = {'cbb': 0, 'cbsd': 1, 'cgm': 2, 'cmd': 3, 'healthy': 4}\r\n old_df['label'] = old_df.image_path.apply(lambda x: d[x.split(os.sep)[-2]])\r\n\r\n skf = RepeatedStratifiedKFold(n_splits=5, n_repeats=2, random_state=152)\r\n\r\n for i, (train_idx, val_idx) in enumerate(skf.split(df, df['strats'])):\r\n\r\n if i not in FOLDS:\r\n continue\r\n\r\n print(f'Training Fold {i}')\r\n \r\n torch.cuda.empty_cache()\r\n \r\n train_df = df.iloc[train_idx].copy()\r\n val_df = df.iloc[val_idx].copy()\r\n\r\n # Old data competition\r\n train_df = pd.concat([train_df, old_df])\r\n\r\n\r\n casava_data_module = CasavaDataModule(\r\n train_df, val_df, BATCH_SIZE, IMG_SIZE)\r\n \r\n casava_model = CasavaModel(ARCH, True, IMG_SIZE, 5, lr=LEARNING_RATE)\r\n \r\n trainer = pl.Trainer(\r\n accumulate_grad_batches=32,\r\n gpus=1, \r\n benchmark=True,\r\n logger=pl.loggers.CSVLogger(\r\n save_dir='lightning_logs', \r\n name=ARCH),\r\n precision=16,\r\n callbacks=[\r\n pl.callbacks.ProgressBar(),\r\n pl.callbacks.EarlyStopping(\r\n monitor='val_acc', patience=5, mode='max'),\r\n pl.callbacks.ModelCheckpoint(\r\n monitor='val_acc',\r\n mode='max', \r\n filename='{epoch}_{val_acc:.4f}')\r\n ]\r\n )\r\n \r\n trainer.fit(casava_model, casava_data_module)"
},
{
"alpha_fraction": 0.6233183741569519,
"alphanum_fraction": 0.6431240439414978,
"avg_line_length": 22.12612533569336,
"blob_id": "79eae835210df0681ae36f485bb19371eef808e5",
"content_id": "4c31b1df9a79790c5abe5347c31ecf429f29b6d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2676,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 111,
"path": "/Riid!/train.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import os\r\nimport pickle\r\nimport logging\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom official.nlp.transformer.optimizer import LearningRateSchedule\r\n\r\nfrom utils import *\r\nfrom model import *\r\n\r\n\r\ntf.get_logger().setLevel(logging.ERROR)\r\n\r\nprint('Loading DF')\r\ndf = pd.read_csv(\r\n 'data/train.csv.zip',\r\n usecols=dtype.keys(),\r\n dtype=dtype,\r\n # nrows=10**6\r\n)\r\n\r\ndf = df.set_index('user_id')\r\n\r\nquestions = pd.read_csv(\r\n 'data/questions.csv', \r\n dtype=dtype_questions,\r\n usecols=dtype_questions.keys(),\r\n index_col='question_id'\r\n)\r\n\r\nlectures = pd.read_csv(\r\n 'data/lectures.csv', \r\n dtype=dtype_lectures,\r\n usecols=dtype_lectures.keys(),\r\n)\r\n\r\nprint('Transforming')\r\nquestions, part_ids, lecture_map, tag_ids = transform_questions(\r\n questions, lectures)\r\ndf, content_ids, task_container_ids = transform_df(df, questions, lecture_map)\r\n\r\n\r\nprint(df.dtypes)\r\n\r\nwindows_size = 176\r\nd_model = 440\r\nnum_heads = 4\r\nn_encoder_layers = 4\r\nn_decoder_layers = 4\r\n\r\nwarm_steps = 6000\r\ntrain_ratio = 0.98\r\nepochs = 100\r\npatience = 3\r\nbatch_size = 128\r\nvalidation_freq = [18, 20, 22] + list(range(24, epochs+1))\r\ns_train = RiidSequence(df, windows_size, batch_size, train_ratio)\r\ns_val = RiidSequence(df, windows_size, batch_size, train_ratio, subset='val')\r\n\r\nprint(s_train.user_example().columns)\r\nprint(f'#users: {len(df.index.unique())}')\r\nprint(f'Batches in train: {len(s_train)}')\r\nprint(f'Batches in val: {len(s_val)}')\r\n\r\nn_features = s_train[0][0].shape[-1]\r\n\r\nlearning_rate = LearningRateSchedule(0.1, d_model, warm_steps)\r\noptimizer = tf.keras.optimizers.Adam(\r\n learning_rate, beta_1=0.95, beta_2=0.999, epsilon=1e-8, clipvalue=2.)\r\n\r\ntf.keras.backend.clear_session()\r\nmodel = get_series_model(\r\n n_features,\r\n content_ids,\r\n task_container_ids,\r\n part_ids,\r\n tag_ids,\r\n windows_size=windows_size,\r\n d_model=d_model,\r\n num_heads=num_heads,\r\n n_encoder_layers=n_encoder_layers,\r\n n_decoder_layers=n_decoder_layers\r\n )\r\n\r\nmodel.compile(\r\n optimizer=optimizer, \r\n loss={'a': loss_function_a, 'c': loss_function_c}, \r\n metrics={'c': AUC()}\r\n)\r\n\r\nhistory = model.fit(\r\n s_train,\r\n validation_data=s_val,\r\n epochs=epochs,\r\n workers=4,\r\n shuffle=True,\r\n use_multiprocessing=True,\r\n validation_freq=validation_freq,\r\n callbacks=tf.keras.callbacks.EarlyStopping(\r\n patience=patience, \r\n monitor='val_c_auc', \r\n mode='max', \r\n restore_best_weights=True),\r\n)\r\n\r\nmodel.save('model')\r\npickle.dump(\r\n history.history, open('history.pkl', 'wb'), protocol=pickle.HIGHEST_PROTOCOL)"
},
{
"alpha_fraction": 0.5445719361305237,
"alphanum_fraction": 0.5622242093086243,
"avg_line_length": 17.912281036376953,
"blob_id": "fc23fdddc7dd318029f2595df0290854668c7655",
"content_id": "164575aae95d263542dfdebc027f2ef723d20704",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1133,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 57,
"path": "/CassavaLeafDesease/train_automl.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import glob\r\n\r\nimport numpy as np\r\nimport torch\r\nimport pandas as pd\r\nfrom flaml import AutoML\r\n\r\ncs = ['0', '1', '2', '3', '4']\r\n\r\npaths = sorted(glob.glob('preds/**/**'))\r\n\r\nprint('Loading DFs')\r\ndfs = []\r\nfold = []\r\nfor i, path in enumerate(paths):\r\n print(path)\r\n df = pd.read_csv(path)\r\n df[cs] = torch.softmax(torch.tensor(df[cs].to_numpy()), -1).numpy()\r\n fold.append(df)\r\n if (i+1)%5==0:\r\n df = pd.concat(fold).sort_values('image_id').reset_index(drop=True)\r\n if dfs:\r\n df = df.drop(columns=['image_id', 'label'])\r\n dfs.append(df)\r\n fold = []\r\n\r\ndf = pd.concat(dfs, axis=1)\r\n\r\nprint(df.head())\r\n\r\nX = df.drop(columns=['label', 'image_id'])\r\ny = df['label']\r\n\r\n\r\nprint(X.head())\r\nprint(X.shape)\r\n\r\nprint('Training')\r\nautoml = AutoML()\r\n\r\nautoml.fit(\r\n X.to_numpy(), \r\n y.to_numpy(),\r\n time_budget=30000,\r\n max_iter=100,\r\n estimator_list=['lgbm'],\r\n metric='accuracy',\r\n task='classification',\r\n eval_method='cv',\r\n n_splits=5,\r\n # ensemble=True\r\n)\r\n\r\n\r\nimport pickle\r\n\r\npickle.dump(automl.model, open('L2.pkl', 'wb'), pickle.HIGHEST_PROTOCOL)"
},
{
"alpha_fraction": 0.5514018535614014,
"alphanum_fraction": 0.5763239860534668,
"avg_line_length": 26,
"blob_id": "c66dae1878fd40bd86f90ff66d202df956b69e46",
"content_id": "e393b36e7b5f8494292ac22d180a0d06e1c8c939",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 23,
"path": "/Riid!/local_testing.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import os\r\n\r\nimport pandas as pd\r\n\r\ndtype_test = {\r\n 'row_id': 'int64', \r\n 'timestamp': 'int64', \r\n 'user_id': 'int32', \r\n 'content_id': 'int16', \r\n 'content_type_id': 'int8',\r\n 'task_container_id': 'int16', \r\n 'user_answer': 'int8', \r\n 'answered_correctly': 'int8', \r\n 'prior_question_elapsed_time': 'float32', \r\n 'prior_question_had_explanation': 'boolean',\r\n}\r\n\r\n\r\ndef test_generator(test_dir):\r\n for root, dirs, files in os.walk(test_dir, topdown=False):\r\n for name in files:\r\n test_path = os.path.join(root, name)\r\n yield pd.read_csv(test_path, dtype=dtype_test, index_col=0)"
},
{
"alpha_fraction": 0.542206883430481,
"alphanum_fraction": 0.5675597786903381,
"avg_line_length": 26.45081901550293,
"blob_id": "061ac44904be24b42771c592cdcbe2973e6c0d7d",
"content_id": "a245e8cb37d64da12764a8839cc0d9a929ded125",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3471,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 122,
"path": "/CassavaLeafDesease/save_scripts.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import os\r\nimport glob\r\nfrom pathlib import Path\r\n\r\nimport timm\r\nimport torch\r\nimport pandas as pd\r\nimport pytorch_lightning as pl\r\nfrom sklearn.model_selection import RepeatedStratifiedKFold\r\n\r\nfrom train_torch import CasavaDataModule, TRAIN_IMG_DIR, OLD_TRAIN_IMG_DIR\r\n\r\n\r\nclass ResNext101(pl.LightningModule):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.model = timm.create_model('swsl_resnext101_32x4d', pretrained=False)\r\n self.model.eval()\r\n self.model.fc = torch.nn.Sequential(\r\n torch.nn.Dropout(0.3),\r\n torch.nn.Linear(in_features=2048, out_features=5)\r\n ) \r\n \r\n\r\n def forward(self, x):\r\n x = self.model(x)\r\n return x\r\n \r\n\r\nclass ResNest101(pl.LightningModule):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.model = timm.create_model('resnest101e', pretrained=False)\r\n self.model.eval()\r\n self.model.fc = torch.nn.Sequential(\r\n torch.nn.Dropout(0.3),\r\n torch.nn.Linear(in_features=2048, out_features=5)\r\n )\r\n\r\n def forward(self, x):\r\n x = self.model(x)\r\n return x\r\n\r\n\r\nclass ViT(pl.LightningModule):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.model = timm.create_model('vit_base_patch16_384', pretrained=False)\r\n new_weight = torch.nn.functional.interpolate(\r\n self.model.patch_embed.proj.weight, \r\n size=(20, 20), \r\n mode='bicubic', \r\n align_corners=True\r\n )\r\n self.model.patch_embed.proj.weight = torch.nn.Parameter(new_weight)\r\n self.model.patch_embed.proj.stride = (20, 20)\r\n self.model.patch_embed.proj.kernel_size = (20, 20)\r\n self.model.patch_embed.img_size = (480, 480)\r\n self.model.head = torch.nn.Linear(\r\n in_features=self.model.head.in_features, out_features=5)\r\n\r\n def forward(self, x):\r\n x = self.model(x)\r\n return x\r\n\r\n\r\n\r\nclass ResNext50(pl.LightningModule):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.model = timm.create_model('swsl_resnext50_32x4d', pretrained=False)\r\n self.model.eval()\r\n self.model.fc = torch.nn.Sequential(\r\n torch.nn.Dropout(0.3),\r\n torch.nn.Linear(in_features=2048, out_features=5)\r\n ) \r\n \r\n def forward(self, x):\r\n x = self.model(x)\r\n return x\r\n\r\n\r\ndef load_model(path):\r\n print('Loading', path)\r\n if 'resnest101' in path:\r\n return ResNest101.load_from_checkpoint(path)\r\n elif 'resnext101' in path:\r\n return ResNext101.load_from_checkpoint(path)\r\n elif 'vit' in path:\r\n return ViT.load_from_checkpoint(path)\r\n elif '50' in path:\r\n return ResNext50.load_from_checkpoint(path)\r\n else:\r\n return 'NO'\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n paths = sorted(glob.glob('lightning_logs/**/**/checkpoints/**'))\r\n\r\n for path in paths:\r\n\r\n if 'resnext50' not in path:\r\n continue\r\n\r\n torch.cuda.empty_cache()\r\n model = load_model(path)\r\n model = model.cpu()\r\n model.freeze()\r\n arch = path.split(os.sep)[1]\r\n fold = path.split(os.sep)[2].split('_')[-1]\r\n new_dir = f'models/{arch}'\r\n Path(new_dir).mkdir(parents=True, exist_ok=True)\r\n new_path = os.path.join(new_dir, f'{fold}.pt')\r\n\r\n script = model.to_torchscript()\r\n torch.jit.save(script, new_path)\r\n"
},
{
"alpha_fraction": 0.56686931848526,
"alphanum_fraction": 0.5781155228614807,
"avg_line_length": 25.433332443237305,
"blob_id": "1c877a92b8448ae285a4cfbc5b4ab9458fc0e3f0",
"content_id": "b2d3764c6bb1df170831234af469cff63fc58db0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3290,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 120,
"path": "/Riid!/inference.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import os\r\nimport math\r\nimport time\r\nimport logging\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom utils import *\r\n# from model_2 import *\r\n\r\nfrom local_testing import test_generator\r\n\r\n\r\nwindows_size = 128\r\nd_model = 256\r\nnum_heads = 4\r\nn_encoder_layers = 3\r\nn_decoder_layers = 3\r\nbs = 64\r\n\r\ndf = pd.read_csv(\r\n 'data/train.csv.zip',\r\n usecols=dtype.keys(),\r\n dtype=dtype,\r\n nrows=10**5\r\n)\r\n# df = df[df.answered_correctly!=-1]\r\ndf = df.groupby('user_id').tail(windows_size)\r\n\r\nquestions = pd.read_csv(\r\n 'data/questions.csv', \r\n dtype=dtype_questions,\r\n usecols=dtype_questions.keys(),\r\n index_col='question_id'\r\n)\r\n\r\nlectures = pd.read_csv(\r\n 'data/lectures.csv', \r\n dtype=dtype_lectures,\r\n usecols=dtype_lectures.keys(),\r\n)\r\n\r\nquestions, part_ids, lecture_map, tag_ids = transform_questions(questions, lectures)\r\ndf, content_ids, task_container_ids = transform_df(df, questions, lecture_map)\r\ndf = {uid: u.drop(columns='user_id') for uid, u in df.groupby('user_id')}\r\n\r\n\r\ns_infer = Inference(df, windows_size)\r\ncolumns = list(s_infer.users_d[115].columns)\r\nprint(f'\\n{columns}\\n')\r\n\r\nn_features = len(columns)\r\n\r\ntf.keras.backend.clear_session()\r\nmodel = tf.saved_model.load('models/model_21')\r\n\r\n\r\ncolumns[columns.index('answered_correctly')] = 'user_id'\r\ncolumns = [c for c in columns if c not in questions.columns] + ['row_id']\r\n\r\n\r\nwhile True:\r\n iter_test = test_generator('data/tests')\r\n s = time.time()\r\n for test in iter_test:\r\n\r\n try:\r\n prior_correct = eval(test['prior_group_answers_correct'].iloc[0])\r\n # prior_correct = [a for a in prior_correct]# if a != -1]\r\n except:\r\n prior_correct = []\r\n\r\n if prior_correct:\r\n prior_test.insert(\r\n s_infer.c_indices['answered_correctly'], \r\n 'answered_correctly', \r\n prior_correct\r\n )\r\n for uid, user in prior_test.groupby('user_id'):\r\n s_infer.update_user(uid, user.drop(columns='user_id'))\r\n\r\n # Save for later\r\n non_lectures_mask = test['content_type_id'] == 0\r\n \r\n # Filter test\r\n test = test[columns]\r\n\r\n # Add global features\r\n test, _, _ = transform_df(test, questions, lecture_map, ~non_lectures_mask)\r\n\r\n # Save test for later\r\n prior_test = test.drop(columns='row_id').copy()\r\n\r\n # Make x\r\n x = np.apply_along_axis(\r\n s_infer.get_user_for_inference,\r\n 1,\r\n test.drop(columns='row_id').to_numpy()\r\n )\r\n\r\n # Predict\r\n predictions = []\r\n for i in range(math.ceil(len(x)/bs)):\r\n b = tf.convert_to_tensor(x[i*bs:(i+1)*bs], dtype=tf.float32)\r\n predictions.append(model.signatures['serving_default'](b)['output'])\r\n predictions = tf.concat(predictions, axis=0)\r\n\r\n # predictions = predictions[:, :, 1]\r\n # idx = np.any(x!=0, -1).sum(-1, keepdims=True)-1\r\n # predictions = np.take_along_axis(predictions.numpy(), idx, -1)\r\n\r\n predictions = predictions[:, -1, 1]\r\n \r\n test['answered_correctly'] = predictions\r\n \r\n # env.predict(test[['row_id', 'answered_correctly']])\r\n\r\n print(time.time()-s)"
},
{
"alpha_fraction": 0.5667797923088074,
"alphanum_fraction": 0.5874985456466675,
"avg_line_length": 28.194345474243164,
"blob_id": "d7873714edbee3a2c1314457b2f5cfa1656ff303",
"content_id": "a5952012ec2781f8812845fd4f84fe82830d3885",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8543,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 283,
"path": "/Riid!/utils.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import random\r\nimport collections\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\n\r\ndtype = {\r\n 'answered_correctly': 'int8',\r\n # 'row_id': 'int64',\r\n 'timestamp': 'int64',\r\n 'user_id': 'int32',\r\n 'content_id': 'int16',\r\n 'content_type_id': 'int8',\r\n 'task_container_id': 'int16',\r\n 'user_answer': 'int8',\r\n 'prior_question_elapsed_time': 'float32',\r\n 'prior_question_had_explanation': 'boolean'\r\n}\r\n\r\ndtype_questions = {\r\n 'question_id': 'int32',\r\n # 'bundle_id': 'int32',\r\n 'correct_answer': 'int8',\r\n 'part': 'int8',\r\n 'tags': 'object',\r\n}\r\n\r\ndtype_lectures = {\r\n 'lecture_id': 'int32',\r\n 'tag': 'int16',\r\n 'part': 'int8',\r\n # 'type_of': 'object'\r\n}\r\n\r\n\r\ndef transform_questions(questions, lectures):\r\n # tags\r\n tag_names = [f'tag{i}' for i in range(6)]\r\n questions[tag_names] = tf.keras.preprocessing.sequence.pad_sequences(\r\n questions.tags.fillna('').str.split().to_numpy(), \r\n dtype=np.int16, \r\n value=-1\r\n )+1\r\n questions.drop(columns='tags', inplace=True)\r\n \r\n lectures[tag_names] = tf.keras.preprocessing.sequence.pad_sequences(\r\n lectures.tag.apply(lambda x: [x]).to_numpy(), \r\n dtype=np.int16,\r\n maxlen=6,\r\n value=-1\r\n )+1\r\n lectures.drop(columns='tag', inplace=True)\r\n lectures['correct_answer'] = -1\r\n lectures['correct_answer'] = lectures['correct_answer'].astype('int8')\r\n\r\n # Concat both\r\n start = len(questions)\r\n lecture_map = \\\r\n dict(zip(lectures.lecture_id, range(start, len(lectures)+start)))\r\n lectures.drop(columns='lecture_id', inplace=True)\r\n lectures.index = lecture_map.values()\r\n questions = pd.concat([questions, lectures])\r\n questions[tag_names] = questions[tag_names].astype('uint8')\r\n \r\n # Embeddings\r\n part_ids = questions.part.max()+1\r\n tag_ids = questions[tag_names].max().max()+1\r\n return questions, part_ids, lecture_map, tag_ids\r\n\r\n\r\ndef transform_df(df, questions, lecture_map, lectures_mask=None):\r\n\r\n if lectures_mask is None:\r\n lectures_mask = df.answered_correctly==-1\r\n\r\n df.loc[lectures_mask, 'content_id'] = \\\r\n df.loc[lectures_mask, 'content_id'].map(lecture_map)\r\n\r\n df['prior_question_had_explanation'] = (\r\n df.prior_question_had_explanation.astype('float32').fillna(2)+1\r\n ).astype('int8')\r\n\r\n df['timestamp'] = df['timestamp'].astype('float32')\r\n df['prior_question_elapsed_time'] = \\\r\n df['prior_question_elapsed_time'].astype('float32')\r\n\r\n df['prior_question_elapsed_time'] = np.log1p(df.prior_question_elapsed_time)\r\n\r\n df['prior_question_elapsed_time'] = \\\r\n df.prior_question_elapsed_time.fillna(2.714793e+00)\r\n df['prior_question_elapsed_time'] = \\\r\n (df.prior_question_elapsed_time-2.714793e+00)/4.546208e+00\r\n\r\n content_ids = questions.index.max()+2\r\n df = df.join(questions, on='content_id')\r\n df['content_type_id'] += 1\r\n df['content_id'] = df['content_id'].astype('int16')\r\n df['content_id'] += 1\r\n # df['task_container_id'] += 1\r\n df['task_container_id'] /= 10000\r\n task_container_ids = 10001\r\n return df, content_ids, task_container_ids\r\n\r\n\r\ndef fill_zeros_with_last(arr):\r\n prev = np.arange(len(arr))\r\n prev[arr == 0] = 0\r\n prev = np.maximum.accumulate(prev)\r\n return arr[prev]\r\n\r\ndef transform_ts(np_ts):\r\n log_delta = np.concatenate([[3.656529e+00], np.log1p(np.diff(np_ts))])\r\n log_delta = fill_zeros_with_last(log_delta)\r\n u, c = np.unique(np_ts, return_counts=True)\r\n div = np.where(np_ts == u[:, np.newaxis], c[:, np.newaxis], 0).sum(0)\r\n return (log_delta-3.656529e+00)/5.163177e+00/div\r\n\r\n\r\ndef add_features_to_user(user):\r\n user['timestamp'] = transform_ts(user.timestamp.to_numpy())\r\n user['answered_correctly'] = user['answered_correctly'].shift(fill_value=2)+2\r\n user['user_answer'] = user['user_answer'].shift(fill_value=4)\r\n user[['user_answer', 'correct_answer']] += 2\r\n return user\r\n\r\n\r\nclass RiidSequence(tf.keras.utils.Sequence):\r\n\r\n def __init__(self, \r\n df, \r\n windows_size,\r\n batch_size=512,\r\n train_ratio=0.8,\r\n subset='train'):\r\n self.df = df\r\n self.windows_size = windows_size\r\n self.batch_size = batch_size\r\n self.subset = subset\r\n \r\n user_indices = self.df.index.unique()\r\n idx = int(len(user_indices)*train_ratio)\r\n \r\n if self.subset == 'train':\r\n self.user_indices = user_indices[:idx]\r\n c = self.df.index.value_counts().clip(upper=1000)\r\n c = c[c.index.isin(self.user_indices)]\r\n self.c = c/c.sum()\r\n else:\r\n self.user_indices = user_indices[idx:]\r\n \r\n self.mapper = self._build_mapper()\r\n \r\n def _build_mapper(self):\r\n\r\n if self.subset == 'train':\r\n indices = np.random.choice(\r\n self.c.index, len(self.user_indices), p=self.c).tolist()\r\n a, b = np.unique(indices, return_counts=True)\r\n ids = np.random.choice(len(a), len(a), replace=False)\r\n a, b = a[ids], b[ids]\r\n indices = np.repeat(a, b).tolist()\r\n else:\r\n indices = []\r\n for uid in self.user_indices:\r\n indices.extend([(uid, idx) for idx in range(len(self.df.loc[[uid]]))])\r\n \r\n li = len(indices)\r\n return [indices[i:i+self.batch_size] for i in range(0, li, self.batch_size)]\r\n\r\n def on_epoch_end(self):\r\n self.mapper = self._build_mapper()\r\n\r\n def __len__(self):\r\n return len(self.mapper)\r\n\r\n def __getitem__(self, idx):\r\n if self.subset == 'train':\r\n x, ya, yc = self.get_train(idx)\r\n else:\r\n x, ya, yc = self.get_val(idx)\r\n x = tf.keras.preprocessing.sequence.pad_sequences(\r\n x, self.windows_size, dtype='float32', padding='pre', value=0.)\r\n ya = tf.keras.preprocessing.sequence.pad_sequences(\r\n ya, self.windows_size, dtype='uint8', padding='pre', value=4)\r\n yc = tf.keras.preprocessing.sequence.pad_sequences(\r\n yc, self.windows_size, dtype='uint8', padding='pre', value=2)\r\n return x, [ya, yc]\r\n\r\n def user_example(self):\r\n user = self.df.loc[[115]].copy()\r\n return add_features_to_user(user)\r\n\r\n def get_x_y(self, uid):\r\n user = self.df.loc[[uid]].copy()\r\n \r\n uya = user['user_answer'].copy().to_numpy()\r\n uya[uya==-1] = 4\r\n \r\n uyc = user['answered_correctly'].copy().to_numpy()\r\n uyc[uyc==-1] = 2\r\n \r\n ux = add_features_to_user(user).to_numpy()\r\n return ux, uya, uyc\r\n\r\n def get_train(self, idx):\r\n x, ya, yc = [], [], []\r\n\r\n for uid, n in np.array(np.unique(self.mapper[idx], return_counts=True)).T:\r\n ux, uya, uyc = self.get_x_y(uid)\r\n limit = max(1, len(ux)-self.windows_size+1)\r\n idxs = np.random.choice(limit, n)\r\n \r\n for idx in idxs:\r\n x.append(ux[idx:idx+self.windows_size])\r\n ya.append(uya[idx:idx+self.windows_size])\r\n yc.append(uyc[idx:idx+self.windows_size])\r\n\r\n return x, ya, yc\r\n\r\n def get_val(self, idx):\r\n x, ya, yc = [], [], []\r\n\r\n mapper = {}\r\n for uid, idx in self.mapper[idx]: \r\n mapper.setdefault(uid, []).append(idx)\r\n\r\n for uid in mapper:\r\n ux, uya, uyc = self.get_x_y(uid)\r\n for idx in mapper[uid]:\r\n if uyc[idx] == 2: # 2 means lecture\r\n continue\r\n high = idx+1\r\n low = max(0, high-self.windows_size)\r\n x.append(ux[low:high])\r\n ya.append(uya[low:high])\r\n yc.append(uyc[low:high])\r\n\r\n return x, ya, yc\r\n\r\nclass Inference():\r\n\r\n def __init__(self, users_d, windows_size):\r\n self.users_d = users_d\r\n self.windows_size = windows_size\r\n self.c_indices = {\r\n c: i for i, c in enumerate(self.users_d[115].columns)}\r\n\r\n def get_user_for_inference(self, user_row):\r\n ac = self.c_indices['answered_correctly']\r\n ua = self.c_indices['user_answer']\r\n t = self.c_indices['timestamp']\r\n ca = self.c_indices['correct_answer']\r\n \r\n uid = user_row[ac]\r\n user_row[ac] = 2\r\n\r\n user_row = user_row[np.newaxis, ...]\r\n\r\n if uid in self.users_d:\r\n x = np.concatenate([self.users_d[uid], user_row])\r\n else:\r\n x = user_row\r\n \r\n x[:, ac] = np.roll(x[:, ac], 1) + 2\r\n x[:, ua] = np.roll(x[:, ua], 1) + 2\r\n x[:, ca] += 2\r\n x[:, t] = transform_ts(x[:, t])\r\n \r\n if x.shape[0] < self.windows_size:\r\n return np.pad(x, [[self.windows_size-x.shape[0], 0], [0, 0]])\r\n elif x.shape[0] > self.windows_size:\r\n return x[-self.windows_size:]\r\n else:\r\n return x\r\n\r\n def update_user(self, uid, user):\r\n if uid in self.users_d:\r\n self.users_d[uid] = \\\r\n np.concatenate([self.users_d[uid], user])[-self.windows_size-5:]\r\n else:\r\n self.users_d[uid] = user"
},
{
"alpha_fraction": 0.6979166865348816,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 23,
"blob_id": "25a427ec9e97630a36af7ca190562790adac5449",
"content_id": "e3c5cd1fe1db5f16c0a5425e7ef4a2d999369b19",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 4,
"path": "/setup_script.sh",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "pip install -q kaggle\nmkdir ~/.kaggle\ncp kaggle.json ~/.kaggle/\nchmod 600 ~/.kaggle/kaggle.json "
},
{
"alpha_fraction": 0.5586334466934204,
"alphanum_fraction": 0.5743305683135986,
"avg_line_length": 27.29729652404785,
"blob_id": "b95ff33ece004b4383f57cae59d82ab50c3f7fe8",
"content_id": "56f1c6a30eef000da6069b0fb5c133a07938ee5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2166,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 74,
"path": "/CassavaLeafDesease/inference.py",
"repo_name": "claverru/kaggle",
"src_encoding": "UTF-8",
"text": "import os\r\nimport glob\r\nfrom pathlib import Path\r\n\r\nimport torch\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\nfrom sklearn.model_selection import RepeatedStratifiedKFold, StratifiedKFold\r\n\r\nfrom train_torch import CasavaDataModule, TRAIN_IMG_DIR, BATCH_SIZE, IMG_SIZE\r\n\r\n\r\narch_seed = {\r\n 'resnest101e': 43,\r\n 'swsl_resnext101_32x4d': 28,\r\n 'swsl_resnext50_32x4d': 152\r\n\r\n}\r\n\r\ndf = pd.read_csv('data/train.csv')\r\ndf['image_path'] = TRAIN_IMG_DIR + df.image_id\r\n\r\nsuspicious = pd.read_csv('data/suspicious.csv')\r\ndf['suspicious'] = suspicious['suspicious'].astype('uint8')+1\r\ndf['strats'] = (df.label+1)*df.suspicious\r\n\r\nfor arch, seed in arch_seed.items():\r\n if '50' not in arch:\r\n continue\r\n\r\n skf = RepeatedStratifiedKFold(n_splits=5, n_repeats=2, random_state=seed)\r\n \r\n for i, (train_idx, val_idx) in enumerate(skf.split(df, df['strats'])):\r\n path = f'models/{arch}/{i}.pt'\r\n print(f'Inference: {path}')\r\n\r\n \r\n train_df = df.iloc[train_idx].copy()\r\n \r\n val_df = df.iloc[val_idx].copy()\r\n\r\n casava_data_module = CasavaDataModule(\r\n train_df, val_df, BATCH_SIZE, IMG_SIZE)\r\n \r\n casava_data_module.setup()\r\n \r\n val_dataloader = casava_data_module.val_dataloader()\r\n \r\n torch.cuda.empty_cache()\r\n model = torch.jit.load(path)\r\n model = model.cuda()\r\n \r\n y_pred = []\r\n y_true = []\r\n\r\n for x, y in tqdm(val_dataloader, ncols=100):\r\n pred = model(x.to('cuda')).data.cpu()\r\n y_pred.append(pred)\r\n y_true.append(y)\r\n\r\n y_true = torch.cat(y_true).numpy()\r\n y_pred = torch.cat(y_pred).numpy()\r\n\r\n print('Acc: ', (y_pred.argmax(-1)==y_true.argmax(-1)).mean())\r\n\r\n preds_df = pd.DataFrame(y_pred, columns=list(range(5)))\r\n preds_df['image_id'] = val_df['image_id'].to_numpy()\r\n preds_df['label'] = val_df['label'].to_numpy()\r\n\r\n pred_dir = f'preds/{arch}'\r\n Path(pred_dir).mkdir(parents=True, exist_ok=True)\r\n pred_path = os.path.join(pred_dir, f'{i}.csv')\r\n\r\n preds_df.to_csv(pred_path, index=False)"
}
] | 12 |
ATOM1Z3R/Mp3Player | https://github.com/ATOM1Z3R/Mp3Player | 12aa86459e0d7f141e67a654c84c9b3325e8a99e | 64886e686393606d34c53845a8cb97f9bef8c0a5 | 5302bd6c13ab7ec7b403ab016913a8aeedf688b0 | refs/heads/master | 2023-01-19T13:09:05.700573 | 2020-11-21T13:50:05 | 2020-11-21T13:50:05 | 314,823,964 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4583333432674408,
"alphanum_fraction": 0.6875,
"avg_line_length": 15,
"blob_id": "726bbe17e3a93762b3a6873a43277ce9b88c4ff3",
"content_id": "7d77b0dfaf44e5bedeaee9c08f57fd2214ef4896",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 96,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 6,
"path": "/requirements.txt",
"repo_name": "ATOM1Z3R/Mp3Player",
"src_encoding": "UTF-8",
"text": "keyboard==0.13.5\npsutil==5.7.3\npydub==0.24.1\nsimpleaudio==1.0.4\nSQLAlchemy==1.3.20\ntqdm==4.52.0\n"
},
{
"alpha_fraction": 0.6912878751754761,
"alphanum_fraction": 0.6960227489471436,
"avg_line_length": 27.432432174682617,
"blob_id": "7ac731f33b342d603e58834d67b5f0d24a2e1288",
"content_id": "85428d9084cd3d03da67b7c2edcd3efacb566779",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1056,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 37,
"path": "/Models.py",
"repo_name": "ATOM1Z3R/Mp3Player",
"src_encoding": "UTF-8",
"text": "try:\n from sqlalchemy import Integer, String, Column, ForeignKey\n from sqlalchemy.ext.declarative import declarative_base\n from sqlalchemy.orm import relationship, sessionmaker, backref\n from sqlalchemy import create_engine\nexcept ImportError:\n print(\"sqlalchemy module is required\")\n exit()\n\nBase = declarative_base()\n\nclass PlayList(Base):\n __tablename__ = 'playlists'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(50))\n\nclass PLDetails(Base):\n __tablename__ = 'pldetails'\n\n id = Column(Integer, primary_key=True)\n element_path = Column(String(250))\n playlist_id = Column(Integer, ForeignKey('playlists.id'))\n playlist = relationship(\"PlayList\", backref=backref('pldetails'))\n\nclass Settings(Base):\n __tablename__ = 'settings'\n\n id = Column(Integer, primary_key=True)\n volume = Column(Integer)\n\ndef dbconnect():\n engine = create_engine('sqlite:///atomdb.db')\n Session = sessionmaker(bind=engine)\n session = Session()\n Base.metadata.create_all(engine)\n return session\n "
},
{
"alpha_fraction": 0.5208828449249268,
"alphanum_fraction": 0.5317487120628357,
"avg_line_length": 30.319149017333984,
"blob_id": "cb217d40dda2021ee59f4a80cd2549182085699a",
"content_id": "56b62663a415475daf0f671bcf1f32c91a890573",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2945,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 94,
"path": "/audiolib.py",
"repo_name": "ATOM1Z3R/Mp3Player",
"src_encoding": "UTF-8",
"text": "try:\n from pydub import AudioSegment\n from pydub.playback import play\nexcept ImportError:\n print(\"pydub and simpleaudio module is required\")\n print(\"ffmpeg app is required\")\n exit()\ntry:\n from tqdm import tqdm\nexcept ImportError:\n print(\"tqdm module is required\")\n exit()\ntry:\n import keyboard\nexcept ImportError:\n print(\"keyboard module is required\")\n exit()\ntry:\n import psutil\nexcept ImportError:\n print(\"psutil module is required\")\n exit()\nfrom multiprocessing import Process, Queue\nfrom settingslib import getVolume\nfrom os import getpid, kill, path\nimport time\n\n\ndef createAudioSegment(file_path):\n file_format = \"\".join(file_path.split('.')[-1:])\n audio = AudioSegment.from_file(file_path, file_format)\n return audio\n\ndef audioInfo(audio_segment, file_path):\n duration = int(audio_segment.duration_seconds)\n file_name = \"\".join((path.basename(file_path)).split('.')[:1])\n \n print(f\"Now Playing: {file_name}\")\n for _ in tqdm(range(duration), ncols=85):\n time.sleep(1)\n print(\"\\033[A\\033[A \\033[A\")\n \ndef audioStart(audio_segment):\n audio = audio_segment + getVolume()\n play(audio)\n\ndef audioController(audio_proc, status_proc, queue):\n paused = False\n terminate = False\n procs = (psutil.Process(audio_proc), psutil.Process(status_proc))\n while True:\n time.sleep(0.1)\n if keyboard.is_pressed('alt+\\\\'):\n if paused == False:\n paused = True\n for p in procs:\n p.suspend()\n time.sleep(0.3)\n else:\n paused = False\n for p in procs:\n p.resume()\n time.sleep(0.3)\n elif keyboard.is_pressed('alt+;'):\n for p in procs:\n kill(p.pid, 9)\n print('\\nTerminated by User')\n terminate = True\n break\n elif keyboard.is_pressed('alt+]'):\n print(\"\\033[A \\033[A\")\n for p in procs:\n kill(p.pid, 9)\n break\n queue.put(terminate)\n\ndef startPlayList(audioList):\n for i in audioList:\n if \"\".join(i.split('.')[-1:]) in ['wav', 'mp3', 'mp4', 'm4a', 'flac', 'ogg']:\n try:\n terminate = Queue()\n play = Process(target=audioStart, args=(createAudioSegment(i),))\n info = Process(target=audioInfo, args=(createAudioSegment(i),i))\n play.start()\n info.start()\n ac = Process(target=audioController, args=(play.pid, info.pid, queue))\n ac.start()\n play.join()\n info.join()\n ac.terminate()\n if terminate.get():\n break\n except FileNotFoundError:\n continue\n\n"
},
{
"alpha_fraction": 0.545643150806427,
"alphanum_fraction": 0.5726141333580017,
"avg_line_length": 23.149999618530273,
"blob_id": "9ff7aa5afd8b1d3281e81c58f31fa518b6ec25fc",
"content_id": "af0abf496b017503627c3211e314e6ec8cf1cfb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 20,
"path": "/settingslib.py",
"repo_name": "ATOM1Z3R/Mp3Player",
"src_encoding": "UTF-8",
"text": "from Models import *\n\nsession = dbconnect()\n\ndef getVolume():\n volume = session.query(Settings).first()\n return volume.volume\n\ndef setVolume(value):\n if value < 101 and value > 0:\n if value == 50:\n vol = 0\n else:\n vol = value - 50\n volume = session.query(Settings).first()\n volume.volume = vol \n session.commit()\n return f\"Volume set to {value}\"\n else:\n return \"ERROR | Type value between 1 and 100\""
},
{
"alpha_fraction": 0.6725460290908813,
"alphanum_fraction": 0.6733129024505615,
"avg_line_length": 28.659090042114258,
"blob_id": "1c8b5e22818cdab10723e9c4b4a527053318cd06",
"content_id": "3935d1552493e41bd6a605fce3822714c5acea47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1304,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 44,
"path": "/playlistlib.py",
"repo_name": "ATOM1Z3R/Mp3Player",
"src_encoding": "UTF-8",
"text": "from Models import *\nfrom os.path import isfile, join\nfrom os import listdir\n\nsession = dbconnect()\n\ndef listPlayList():\n list_pl = session.query(PlayList).all()\n return list_pl\n\ndef createPlayList(plname):\n playList = PlayList(name=plname)\n session.add(playList)\n session.commit()\n added = session.query(PlayList).filter(PlayList.name==plname).order_by(PlayList.id.desc()).first()\n return f\"CREATED // ID: {added.id} | Name: {added.name}\"\n\ndef removePlayList(id_playlist):\n del_obj = session.query(PlayList).get(id_playlist)\n if del_obj == None:\n return \"Playlist not Exist\"\n session.delete(del_obj)\n session.commit()\n return \"Playlist Deleted\"\n\ndef addToPlayList(folder_path, playlist_id):\n pldetails = PLDetails(element_path=folder_path, playlist_id=playlist_id)\n session.add(pldetails)\n session.commit()\n\ndef findPlayList(id):\n count = session.query(PlayList).filter(PlayList.id==id).count()\n if count == 1:\n return True\n else:\n return False\n\ndef getPlayList(id):\n list_pl = []\n for record in session.query(PLDetails).filter(PLDetails.playlist_id == id):\n path = record.element_path\n for r in [i for i in listdir(path) if isfile(join(path, i))]:\n list_pl.append(join(path,r))\n return list_pl"
},
{
"alpha_fraction": 0.5505350828170776,
"alphanum_fraction": 0.5529131889343262,
"avg_line_length": 28.68235206604004,
"blob_id": "c499a8c453ab4a5f398701b0b4812ca9f6f4ea0c",
"content_id": "f879ccf78b67c670cd08f0d11124e67726e734c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2523,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 85,
"path": "/AtomPlayer.py",
"repo_name": "ATOM1Z3R/Mp3Player",
"src_encoding": "UTF-8",
"text": "# simpleaudio and ffmpeg required\nfrom Models import *\nfrom playlistlib import *\nfrom audiolib import *\nfrom settingslib import setVolume\nfrom os import listdir, system\nfrom os.path import isdir\nimport sys\nimport getopt\n\ndef usage():\n print(\"\"\"AtomPlayer\n \nUsage: atomplayer.py -p [playlist_id/catalog/audio_file]\n-l --list - list playlists\n-c --create [playlist_name] - create playlist with given name\n-a --add [playlist_id] -f --folder [catalog_path] - add catalog to playlist\n-r --remove [playlist_id] - remove playlist with given id\n-v --volume [value] - set volume to given value\n\nControlls:\nalt+\\\\ - pause/resume audio\nalt+] - next audio\nalt+; - quit program\n\nExamples:\natomplayer.py -p audio.mp3\natomplayer.py -a 4 c:\\\\audio\\\\playlist\natomplayer.py -r 9\n\"\"\")\n\ndef getAudioList(catalog_path):\n audioList = listdir(catalog_path)\n return [catalog_path+\"\\\\\"+s for s in audioList]\n\ndef main():\n folder_path = \"\"\n clear = lambda:system(\"cls\")\n add_flag = False\n\n if not len(sys.argv[1:]):\n usage()\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hlc:r:a:f:p:v:\",\n [\"help\",\"list\",\"create\",\"folder\",\"add\",\"remove\", \"volume\", \"play\"])\n except getopt.GetoptError as err:\n print(str(err))\n usage()\n\n for o,a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n elif o in (\"-l\", \"--list\"):\n for item in listPlayList():\n print(f\"ID: {item.id} | {item.name}\")\n elif o in (\"-c\", \"--create\"):\n print(createPlayList(a))\n elif o in (\"-r\", \"--remove\"):\n print(removePlayList(int(a)))\n elif o in (\"-v\", \"--volume\"):\n print(setVolume(int(a)))\n elif o in (\"-f\", \"--folder\"):\n folder_path = a\n elif o in (\"-a\", \"--add\"):\n add_flag = True\n playlist_id = a\n elif o in (\"-p\", \"--play\"):\n try:\n if a.isdigit() and int(a) >= 0:\n startPlayList(getPlayList(int(a)))\n elif isdir(a):\n startPlayList(getAudioList(a))\n else:\n startPlayList([a])\n except KeyboardInterrupt:\n print(\"ERROR | Invalid argument. If file name contains spaces use quote: \\\"file name\\\".\")\n \n if add_flag == True and folder_path != \"\":\n addToPlayList(folder_path, playlist_id)\n print(\"Catalog has been added\")\n add_flag = False\n\nif __name__==\"__main__\":\n main()\n"
}
] | 6 |
preddy5/FtpSearch | https://github.com/preddy5/FtpSearch | f01106a89b0ee52dadcb5bc53229b055b4d96499 | ecfbdd882e628312bd6dd67a053cbf19c7421794 | 84a56f3fc83b7da0b2e18c7670107800ba85fa97 | refs/heads/master | 2021-05-26T12:51:16.784462 | 2013-01-18T19:37:54 | 2013-01-18T19:37:54 | 7,683,202 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6648351550102234,
"alphanum_fraction": 0.6648351550102234,
"avg_line_length": 29.25,
"blob_id": "3952f267b4981ff4fd105a1ae20ef310acf68cd0",
"content_id": "43ab1339abd858c54faa6f0cedc22521f8f3402d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 12,
"path": "/exceptions.py",
"repo_name": "preddy5/FtpSearch",
"src_encoding": "UTF-8",
"text": "\nclass FtpSearch(Exception):\n \"\"\"Base class for exceptions raised by Ftpsearch\"\"\"\n\n\nclass UrlNotFound(FtpSearch):\n \"\"\"Exception for url not found\"\"\"\n def __init__(self,message):\n super(UrlNotFound,self).__init__(message)\n\nclass NoProtocol(FtpSearch):\n def __init__(self):\n super(NoProtocol,self).__init__('Wrong Protocol or No Protocol')\n"
},
{
"alpha_fraction": 0.8403361439704895,
"alphanum_fraction": 0.8403361439704895,
"avg_line_length": 38.33333206176758,
"blob_id": "4955ebf388988304465b9aa21e44e4fd59e51d8a",
"content_id": "7c016287094b98a2b50104ac9b874d4417235fe4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 119,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 3,
"path": "/__init__.py",
"repo_name": "preddy5/FtpSearch",
"src_encoding": "UTF-8",
"text": "from FtpSearch.main import FtpRepo,SetProxy\nfrom FtpSearch.search import Page,OpenUrl\nfrom FtpSearch.version import *\n\n"
},
{
"alpha_fraction": 0.5800933241844177,
"alphanum_fraction": 0.5878693461418152,
"avg_line_length": 41.400001525878906,
"blob_id": "2bb2e2ec014f69007b05d183bf5fe9c33089c5dc",
"content_id": "644b6fcae8dfd073f78c4dd7169938d496b6a286",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 643,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 15,
"path": "/proxy_package.py",
"repo_name": "preddy5/FtpSearch",
"src_encoding": "UTF-8",
"text": "\nimport urllib2\n\nclass ProxyPackage(object):\n \"\"\"\n This class is used for installing python packages or to use urllib or urllib2 under proxy server\n create a instance of this class and urlopen will show no error\n \"\"\"\n def __init__(self,kind='http',name='',password='',proxy='',port=''):\n if name=='':\n auth = '%s://%s:%s' % (kind, proxy , port)\n else:\n auth = '%s://%s:%s@%s:%s' % (kind,name , password , proxy , port)\n self.handler = urllib2.ProxyHandler({kind: auth})\n self.opener = urllib2.build_opener(self.handler)\n urllib2.install_opener(self.opener)\n \n"
},
{
"alpha_fraction": 0.5259391665458679,
"alphanum_fraction": 0.5339892506599426,
"avg_line_length": 30,
"blob_id": "377142f295d2ff3a101c7e90ee652ab86db4521b",
"content_id": "410371952b63c6c9e83729a81b40b4fe25765e15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1118,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 36,
"path": "/main.py",
"repo_name": "preddy5/FtpSearch",
"src_encoding": "UTF-8",
"text": "import urllib2\nfrom FtpSearch.exceptions import *\nfrom FtpSearch.search import *\nfrom FtpSearch.proxy_package import *\n \nclass FtpRepo(Page):\n \"\"\"\n This class is used to search repo for the keyword\n \n \"\"\"\n def __init__(self,url):\n self.url = url\n super(FtpRepo,self).__init__(url)\n\n def searchRepo(self,keyword, depth, strict_search=False):\n try:\n self.search(keyword, strict_search) \n #print self.page_info\n leng = self.page_info[0].split().__len__()-1\n except:\n return\n for i in self.page_info:\n if depth-1 == 0:\n break\n l = i.split()\n if not '.' in l[-1]:\n FtpRepo(self.url+'/'+\"%20\".join(l[leng:])+'/').searchRepo(keyword,depth-1,strict_search)\n \n\nclass SetProxy(ProxyPackage):\n \"\"\"\n This class is used to set proxy for searching the repo\n \"\"\"\n\n def __init__(self,kind='http',name='',password='',proxy='',port=''):\n super(SetProxy,self).__init__(kind,name,password,proxy,port)\n\n\n"
},
{
"alpha_fraction": 0.5452282428741455,
"alphanum_fraction": 0.5485477447509766,
"avg_line_length": 24.595745086669922,
"blob_id": "c98f3c8b8a14dc8402910945fdedd5c0e4ba0dd1",
"content_id": "0daa32e41ef3a02ef93ebffd3982b79f6909d1b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1205,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 47,
"path": "/search.py",
"repo_name": "preddy5/FtpSearch",
"src_encoding": "UTF-8",
"text": "\nimport urllib2\nfrom FtpSearch.exceptions import*\n\nprotocols = 'ftp://'\n\nclass OpenUrl(object):\n \n \"\"\"\n This class opens a url after checking the protocol and has a class\n attribute which is urlopen instance of the passed url\n \"\"\"\n\n def __init__(self,url):\n if(not url[0:6]== protocols):\n raise NoProtocol()\n self.url = url\n try:\n self.instance =urllib2.urlopen(self.url)\n except: \n return\n def instance():\n return self.instance\n\n\nclass Page(OpenUrl):\n\n \"\"\"\n This class instance prints the occurence of the keyword in url given\n \"\"\"\n \n def __init__(self,url):\n super(Page,self).__init__(url)\n\n def search(self,keyword, strict_search=False):\n self.page_info = self.instance.readlines()\n if not strict_search:\n keyword = keyword.lower()\n self._search(keyword,strict_search)\n\n def _search(self,keyword,strict_search):\n for i in self.page_info:\n l = i.split()\n for j in l:\n if not strict_search:\n j = j.lower()\n if keyword in j:\n print self.url,' ',j\n\n"
}
] | 5 |
marty-sullivan/troposphere | https://github.com/marty-sullivan/troposphere | 51edee0ad8b82dc7315bef2f0559881722be4d0d | bed427d77edb31da6d9294d30fab475f93599082 | 5a594c0b11b90bb9daa1eb5f1b76f8d537c73d7d | refs/heads/master | 2020-04-20T18:11:26.363803 | 2019-02-04T00:56:52 | 2019-02-04T00:56:52 | 169,011,101 | 0 | 1 | BSD-2-Clause | 2019-02-04T00:55:47 | 2019-02-02T19:32:37 | 2019-02-02T19:38:03 | null | [
{
"alpha_fraction": 0.602902352809906,
"alphanum_fraction": 0.6134564876556396,
"avg_line_length": 27.074073791503906,
"blob_id": "7469043fdbc4e6e50cb4e150c2d4e7c9a7a1ab66",
"content_id": "d50f357082ee4b9eebd83082ffcb2a99cc11836d",
"detected_licenses": [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 758,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 27,
"path": "/tests/test_codebuild.py",
"repo_name": "marty-sullivan/troposphere",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom troposphere import codebuild\n\n\nclass TestCodeBuild(unittest.TestCase):\n def test_linux_environment(self):\n environment = codebuild.Environment(\n ComputeType='BUILD_GENERAL1_SMALL',\n Image='aws/codebuild/ubuntu-base:14.04',\n Type='LINUX_CONTAINER'\n )\n environment.to_dict()\n\n def test_windows_environment(self):\n environment = codebuild.Environment(\n ComputeType='BUILD_GENERAL1_LARGE',\n Image='aws/codebuild/windows-base:1.0',\n Type='WINDOWS_CONTAINER'\n )\n environment.to_dict()\n\n def test_source_codepipeline(self):\n source = codebuild.Source(\n Type='CODEPIPELINE'\n )\n source.to_dict()\n"
},
{
"alpha_fraction": 0.5329629778862,
"alphanum_fraction": 0.5377777814865112,
"avg_line_length": 28.34782600402832,
"blob_id": "376d05247a1bbbaeb408883961ea65ae81c5e038",
"content_id": "e4d4bcc5b3efe2ac0a6e4bc1283d5b2f92b3b07f",
"detected_licenses": [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2700,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 92,
"path": "/tests/test_elasticloadbalancerv2.py",
"repo_name": "marty-sullivan/troposphere",
"src_encoding": "UTF-8",
"text": "import unittest\n\nfrom troposphere.elasticloadbalancingv2 import Action, RedirectConfig, \\\n FixedResponseConfig\n\n\nclass TestListenerActions(unittest.TestCase):\n def test_redirect_action(self):\n Action(\n Type='redirect',\n RedirectConfig=RedirectConfig(\n StatusCode='HTTP_301',\n Protocol='HTTPS',\n Host='api.troposphere.org',\n Path='redirect/#{path}'\n )\n ).to_dict()\n\n def test_fixed_response_action(self):\n Action(\n Type='fixed-response',\n FixedResponseConfig=FixedResponseConfig(\n ContentType='text/plain',\n MessageBody='I am a fixed response',\n StatusCode='200'\n )\n ).to_dict()\n\n def test_forward_action(self):\n Action(\n Type='forward',\n TargetGroupArn=''\n ).to_dict()\n\n def test_redirect_action_config_one_of(self):\n with self.assertRaises(ValueError):\n RedirectConfig(\n StatusCode='HTTP_200'\n ).to_dict()\n\n def test_fixed_response_config_one_of(self):\n with self.assertRaises(ValueError):\n FixedResponseConfig(\n ContentType='application/octet-stream',\n ).to_dict()\n\n def test_forward_action_requires_target_arn(self):\n with self.assertRaises(ValueError):\n Action(\n Type='forward'\n ).to_dict()\n\n def test_fixed_response_requires_fixed_response_config(self):\n with self.assertRaises(ValueError):\n Action(\n Type='fixed-response'\n ).to_dict()\n\n def test_redirect_action_requires_redirect_config(self):\n with self.assertRaises(ValueError):\n Action(\n Type='redirect'\n ).to_dict()\n\n def test_target_arn_only_forward(self):\n with self.assertRaises(ValueError):\n Action(\n Type='redirect',\n TargetGroupArn=''\n ).to_dict()\n\n def test_redirect_config_only_with_redirect(self):\n with self.assertRaises(ValueError):\n Action(\n Type='forward',\n RedirectConfig=RedirectConfig(\n StatusCode='HTTP_301',\n )\n ).to_dict()\n\n def test_fixed_response_config_only_with_fixed_response(self):\n with self.assertRaises(ValueError):\n Action(\n Type='forward',\n FixedResponseConfig=FixedResponseConfig(\n ContentType='text/plain',\n )\n ).to_dict()\n\n\nif __name__ == '__main__':\n unittest.main()\n"
}
] | 2 |
allenyang79/member-system | https://github.com/allenyang79/member-system | 6480af3c7a1ad306d121b8576108e3886247ea06 | d45292fbf69c2fd28708b8842ffb9e4496502437 | 297f77d523b04db42a186a987dfe66bbc502eef9 | refs/heads/master | 2020-12-25T15:07:55.875700 | 2016-06-26T16:04:15 | 2016-06-27T02:16:32 | 61,340,104 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.571295976638794,
"alphanum_fraction": 0.577608585357666,
"avg_line_length": 28.922222137451172,
"blob_id": "8845327944ed8f4620a99b23f35ef9ae9b182840",
"content_id": "b1fedccbf0ddb6cf4ecbf3c6f4f604fab7a936a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5386,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 180,
"path": "/app/auth.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport functools\nimport json\nimport os\nimport sys\nimport time\nimport traceback\nimport binascii\nimport flask\n#from flask_jwt import JWT, jwt_required, current_identity\nfrom werkzeug.security import safe_str_cmp\nimport jwt\nimport jwt.exceptions\n\nfrom app.error import InvalidError\n#from app.auth import AuthManager\nfrom app.models.models import Admin\n\n\nclass LoginFailError(InvalidError):\n def __init__(self, message='Login fail.', status_code=403):\n super(LoginFailError, self).__init__(message, status_code)\n\n\nclass UnauthorizedError(InvalidError):\n def __init__(self, message='Unauthorized.', status_code=403):\n super(UnauthorizedError, self).__init__(message, status_code)\n\n\nclass AuthManager(object):\n \"\"\"A auth plugin for flask.\n\n AuthManger auth the user by JWT encode/decode. and put a jwt key on cookie.\n if auth fail, it will riase UnauthorizedError or LoginFailError.\n and you can custom errorHandler on flask app to handler this type of error.\n\n \"\"\"\n def __init__(self, app=None):\n self.app = None\n self.mode = 'deny_first'\n self.whitelist = []\n\n if app is not None:\n self.app = app\n self.init_app(self.app)\n\n def init_app(self, app):\n self.app = app\n self.app.extensions['jwt'] = self\n self.app.before_request(self.before_request)\n\n def before_request(self):\n if hasattr(flask.request, 'url_rule') and hasattr(flask.request.url_rule, 'endpoint'):\n endpoint = flask.request.url_rule.endpoint\n else:\n endpoint = None\n\n #print \"==before_login on AuthManager==\"\n #print self.app.view_functions.keys()\n #if not endpoint:\n # pass\n #elif endpoint in self.whitelist:\n # pass\n #elif self.auth():\n # pass\n\n def login_user(self, payload):\n \"\"\"\n .. code-block:: python\n\n payload = {\n ...\n 'exp': '(Expiration Time) Claim',\n }\n \"\"\"\n\n if 'exp' not in payload:\n payload['exp'] = int(time.time()) + self.app.config.get('JWT_EXPIRE_TIME', 86400)\n token = jwt.encode(payload, self.app.config['JWT_SECRET'], algorithm='HS256', headers={'salt': binascii.hexlify(os.urandom(16))})\n resp = flask.make_response()\n resp.headers['content-type'] = 'application/json; charset=utf-8'\n resp.set_cookie('jwt', token, expires=payload['exp'])\n\n\n #username = payload['username']\n if getattr(self, '_load_user') and hasattr(self._load_user, '__call__'):\n flask.g.me = self._load_user(payload)\n\n return resp\n\n def logout_user(self):\n resp = flask.make_response()\n resp.headers['content-type'] = 'application/json; charset=utf-8'\n resp.set_cookie('jwt', '', expires=0)\n return resp\n #flask.request.set_cookie('jwt', None, expires=0)\n #return True\n\n def auth(self):\n try:\n encoded = flask.request.cookies.get('jwt')\n if not encoded:\n return False, 'No JWT token.'\n\n payload = jwt.decode(encoded, self.app.config['JWT_SECRET'], algorithms=['HS256'])\n if not payload:\n return False, 'Payload is empty.'\n\n if int(time.time()) > payload['exp'] > int(time.time()):\n return False, 'JWT token expired.'\n\n #username = payload['username']\n if getattr(self, '_load_user') and hasattr(self._load_user, '__call__'):\n flask.g.me = self._load_user(payload)\n else:\n raise Exception('please implement load_user to mixin.')\n return True, None\n except jwt.exceptions.DecodeError as e:\n return False, 'Jwt deocode fail.'\n\n\n ##jwt.encode({'exp': 1371720939}, 'secret')\n # def add_whitelist(self, rule):\n # self.whitelist.append(rule.func_name)\n # return rule\n\n def load_user(self, func):\n self._load_user = func\n\n def login_required(self, func):\n \"\"\"Wrap a endpoint function to validate before __call__.\n\n .. code-block:: python\n\n\n @app.route('/hello')\n @am.login_required\n def hello():\n return {}\n\n \"\"\"\n @functools.wraps(func)\n def decorated_view(*args, **kwargs):\n if flask.request.method in ('OPTIONS'):\n return func(*args, **kwargs)\n\n is_auth, message = self.auth()\n if is_auth:\n return func(*args, **kwargs)\n raise UnauthorizedError(message)\n # elif current_app.login_manager._login_disabled:\n # return func(*args, **kwargs)\n # elif not current_user.is_authenticated:\n # return current_app.login_manager.unauthorized()\n return decorated_view\n\n\n def me(self, silence=False):\n if flask.has_request_context():\n if hasattr(flask.g, 'me'):\n return flask.g.me\n\n if not silence:\n raise UnauthorizedError('current user has not login.')\n\n return None\n\n\ndef init_auth(app):\n am = AuthManager(app)\n\n @am.load_user\n def load_user(payload):\n \"\"\"Payload from jwt decode.\"\"\"\n _id = payload['username']\n user = Admin.get_one(_id)\n return user\n\n return am\n"
},
{
"alpha_fraction": 0.5671223998069763,
"alphanum_fraction": 0.5678219199180603,
"avg_line_length": 30.3247013092041,
"blob_id": "cd628a881c99179a557671d6246afcb6333ca5c7",
"content_id": "269064c5f665a02b95d0b6fafd9f631bb4ad1c1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15725,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 502,
"path": "/app/models/orm.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport copy\nimport functools\nimport os\nimport sys\nimport weakref\nimport datetime\nimport bson\nimport logging\nfrom collections import namedtuple\n\nfrom app.error import InvalidError\nfrom app.db import db\n\n\nlogger = logging.getLogger()\n\nclass ModelError(InvalidError):\n \"\"\"Base model operator error.\"\"\"\n pass\n\n\nclass ModelDeclareError(ModelError):\n \"\"\"Error on declare a new Model\"\"\"\n pass\n\n\nclass ModelInvaldError(InvalidError):\n \"\"\"Invalid model operator.\"\"\"\n pass\n\n\nclass ModelParserError(InvalidError):\n \"\"\"Parse from dict fail.\"\"\"\n pass\n\n\nclass ModelSaveError(InvalidError):\n \"\"\"Base model operator error.\"\"\"\n pass\n\n\nclass Field(object):\n \"\"\"Decalre a propery for Model\"\"\"\n field_key = None\n raw_field_key = None\n\n def __init__(self, raw_field_key=None, **kw):\n \"\"\"\n :param str raw_field_key:\n :param default: value or function\n\n \"\"\"\n self.raw_field_key = raw_field_key\n\n if 'default' in kw:\n self.default = kw['default']\n\n def __get__(self, instance, cls):\n if not instance:\n return self\n else:\n if self.raw_field_key not in instance._attrs:\n if hasattr(self, 'default'):\n # if has `default`, then use this `default` to generate value\n if hasattr(self.default, '__call__'):\n instance._attrs[self.raw_field_key] = self.value_in(instance, self.default())\n else:\n instance._attrs[self.raw_field_key] = self.value_in(instance, self.default)\n else:\n return None\n return self.value_out(instance, instance._attrs[self.raw_field_key])\n\n def __set__(self, instance, value):\n \"\"\" set value to instance's field.\n\n TODO: how to handle none value???\n\n \"\"\"\n if value is None:\n instance._attrs[self.raw_field_key] = None\n else:\n instance._attrs[self.raw_field_key] = self.value_in(instance, value)\n\n def register(self, cls, field_key):\n \"\"\" Bind the property name with model cls.\n When declare complete. this function will call by Model's Meta. and bind by the property name.\n \"\"\"\n self.field_key = field_key\n if self.raw_field_key is None:\n self.raw_field_key = field_key\n cls._config[field_key] = self\n\n def value_in(self, instance, value):\n \"\"\"The value from external to instance._attrs\"\"\"\n return value\n\n def value_out(self, instance, value):\n \"\"\" The value from instance._attrs to external\"\"\"\n return value\n\n def encode(self, instance, target):\n \"\"\" Encode external value to another data type that json.dumps can process. \"\"\"\n if self.raw_field_key in instance._attrs:\n target[self.field_key] = getattr(instance, self.field_key)\n\n def decode(self, instance, payload):\n \"\"\" decode external value from another data type that json.loads can process. \"\"\"\n if self.field_key in payload:\n value = payload[self.field_key]\n setattr(instance, self.field_key, value)\n\n\n\nclass IDField(Field):\n def __init__(self, raw_field_key='_id', **kw):\n if 'default' not in kw:\n kw['default'] = lambda: str(bson.ObjectId())\n kw['raw_field_key'] = raw_field_key\n super(IDField, self).__init__(**kw)\n\n\nclass StringField(Field):\n def __init__(self, **kw):\n if 'default' not in kw:\n kw['default'] = ''\n super(StringField, self).__init__(**kw)\n\n def value_in(self, instance, value):\n if isinstance(value, basestring):\n return value\n return \"%s\" % (value)\n\n def value_out(self, instance, value):\n return value\n\n\nclass BoolField(Field):\n def __init__(self, **kw):\n if 'default' not in kw:\n kw['default'] = False\n super(BoolField, self).__init__(**kw)\n\n def value_in(self, instance, value):\n return bool(value)\n\n def value_out(self, instance, value):\n return value\n\n\nclass IntField(Field):\n def __init__(self, **kw):\n if 'default' not in kw:\n kw['default'] = 0\n super(IntField, self).__init__(**kw)\n\n def value_in(self, instance, value):\n return int(value)\n\n def value_out(self, instance, value):\n return value\n\n\nclass DateField(Field):\n def __init__(self, **kw):\n \"\"\" DateField\n :param datetime default: default can be like ex: lamda: datetime.date.today()\n \"\"\"\n # if 'default' not in kw:\n # kw['default'] = datetime.datetime.now().replace(minute=0, hour=0, second=0, microsecond=0)\n super(DateField, self).__init__(**kw)\n\n def value_in(self, instance, value):\n if isinstance(value, datetime.date):\n return datetime.datetime.combine(value, datetime.datetime.min.time())\n elif isinstance(value, datetime.datetime):\n return value.replace(minute=0, hour=0, second=0, microsecond=0)\n raise ModelInvaldError('`DateField` only accept `date` value, not `%s`' % repr(value))\n\n def value_out(self, instance, value):\n return value.date()\n\n def encode(self, instance, target):\n if self.raw_field_key in instance._attrs:\n target[self.field_key] = getattr(instance, self.field_key).strftime('%Y-%m-%d')\n\n def decode(self, instance, payload):\n if self.field_key in payload:\n try:\n value = datetime.datetime.strptime(payload[self.field_key], '%Y-%m-%d').date()\n setattr(instance, self.field_key, value)\n except Exception as e:\n logger.warning(e)\n logger.warning('can not decode `%s` `%s`', self.field_key, payload[self.field_key])\n\n\n\nclass ListField(Field):\n def __init__(self, **kw):\n \"\"\" ListField.\n \"\"\"\n if 'default' not in kw:\n kw['default'] = lambda: []\n super(ListField, self).__init__(**kw)\n\n def value_in(self, instance, value):\n return list(value)\n\n\nclass TupleField(Field):\n\n def __init__(self, np, **kw):\n \"\"\" TupleField.\n :param namedtuple np: ex: namedtuple('Point', ['x', 'y'], verbose=True)\n \"\"\"\n if not np:\n raise ModelDeclareError('Declare a tuple field without namedtuple `np`.')\n super(TupleField, self).__init__(**kw)\n self.np = np\n\n def value_in(self, instance, value):\n return value.__dict__\n\n def value_out(self, instance, value):\n if isinstance(value, dict):\n return self.np(**value)\n return None\n\n def encode(self, instance, target):\n if self.raw_field_key in instance._attrs:\n target[self.field_key] = getattr(instance, self.field_key).__dict__\n\n def decode(self, instance, payload):\n if self.field_key in payload:\n try:\n value = self.np(**payload[self.field_key])\n setattr(instance, self.field_key, value)\n except Exception as e:\n logger.warning(e)\n logger.warning('can not decode `%s` `%s`', self.field_key, payload[self.field_key])\n\n\nclass ClassReadonlyProperty(object):\n \"\"\"a propery declare on class, and it is readonly and share with all instance.\n It is good to declare _table or _config.\n \"\"\"\n\n def __init__(self, default_value=lambda: None):\n self.default_value = default_value\n self.values = weakref.WeakKeyDictionary()\n\n def __get__(self, instance, cls):\n if cls not in self.values:\n if hasattr(self.default_value, '__call__'):\n self.values[cls] = self.default_value()\n else:\n self.values[cls] = self.default_value\n\n return self.values[cls]\n\n def __set__(self, instance, value):\n raise ModelInvaldError('`ClassReadonlyProperty` is readonly.')\n\n\nclass InstanceReadonlyProperty(object):\n def __init__(self, default_value=lambda: None):\n self.default_value = default_value\n self.values = weakref.WeakKeyDictionary()\n\n def __get__(self, instance, cls):\n if instance:\n if instance not in self.values:\n if hasattr(self.default_value, '__call__'):\n self.values[instance] = self.default_value()\n else:\n self.values[instance] = self.default_value\n return self.values[instance]\n raise ModelInvaldError('`InstanceReadonlyProperty` can not work on class level.')\n\n def __set__(self):\n raise ModelInvaldError('`InstanceReadonlyProperty` is readonly.')\n\n\nclass Meta(type):\n def __new__(meta_cls, cls_name, cls_bases, cls_dict):\n cls = type.__new__(meta_cls, cls_name, cls_bases, cls_dict)\n if cls_name == 'Base':\n return cls\n primary_key_exists = False\n for field_key, field in cls_dict.items():\n if isinstance(field, Field):\n field.register(cls, field_key)\n if isinstance(field, IDField):\n if primary_key_exists:\n raise ModelDeclareError('model %s can not set primary_key `%s` twice.' % (cls_name, field_key))\n primary_key_exists = True\n\n if cls._table is None:\n raise ModelDeclareError('declare Moedl without _table.')\n\n if cls._primary_key is None:\n raise ModelDeclareError('declare Moedl without IDField.')\n return cls\n\n\nclass FetchResult(object):\n def __init__(self, cls, cursor):\n self.cls = cls\n self.root_cursor = cursor\n self.cursor = self.root_cursor.clone()\n\n def __iter__(self):\n return self\n\n def __getitem__(self, key):\n return self.cls.get_one(raw=self.cursor[key])\n\n def next(self):\n return self.cls.get_one(raw=next(self.cursor))\n\n def sort(self, key, sort):\n self.cursor = self.cursor.sort(key, sort)\n return self\n\n def limit(self, limit):\n self.cursor = self.cursor.limit(limit)\n return self\n\n def skip(self, skip):\n self.cursor = self.cursor.skip(skip)\n return self\n\n def rewind(self):\n self.cursor.rewind()\n return self\n\n def reset(self):\n self.cursor = self.root_cursor.clone()\n return self\n\n @property\n def total(self):\n return self.root_cursor.count()\n\n\nclass Base(object):\n __metaclass__ = Meta\n\n _config = ClassReadonlyProperty(lambda: {})\n _attrs = InstanceReadonlyProperty(lambda: {})\n\n _table = ClassReadonlyProperty()\n _primary_key = ClassReadonlyProperty()\n\n @classmethod\n def _find(cls, query={}):\n \"\"\"Proxy to db.collection.find.\"\"\"\n return db[cls._table].find(query)\n\n @classmethod\n def _insert_one(cls, payload):\n \"\"\"Proxy to db.collection.insert_one.\"\"\"\n result = db[cls._table].insert_one(payload)\n if not result.inserted_id:\n raise ModelInvaldError('create instance fail.')\n return result.inserted_id\n\n @classmethod\n def _update_one(cls, query={}, payload={}):\n \"\"\"Proxy to db.collection.update_one.\"\"\"\n if not query:\n raise ModelInvaldError('can update by empty query.')\n\n if not payload:\n raise ModelInvaldError('can update by empty payload.')\n\n result = db[cls._table].update_one(query, {\n '$set': payload\n })\n\n if result.matched_count == 1:\n return True\n else:\n return False\n\n @classmethod\n def get_one(cls, _id=None, raw=None):\n if _id and raw is None:\n raw = db[cls._table].find_one({'_id': _id}, projection={field.raw_field_key: True for field in cls._config.values()})\n if not raw:\n return None\n elif raw and _id is None:\n pass\n else:\n raise ModelInvaldError('get_one arguemtn errors.')\n\n instance = cls({})\n instance._attrs.update(raw)\n return instance\n\n @classmethod\n def fetch(cls, query={}, sort=None, offset=None, limit=None):\n cursor = cls._find(query)\n return FetchResult(cls, cursor)\n\n @classmethod\n def create(cls, payload={}):\n for field_key, field in cls._config.iteritems():\n if field_key not in payload:\n if hasattr(field, 'default'):\n if hasattr(field.default, '__call__'):\n payload[field_key] = field.default()\n else:\n payload[field_key] = field.default\n instance = cls(payload)\n instance.save()\n return instance\n\n def __init__(self, payload={}):\n \"\"\" Do not use Foo() to create new instance.\n instate cls.create or cls.get_one() is better.\n \"\"\"\n for field_key, value in payload.items():\n if field_key in self._config:\n setattr(self, field_key, value)\n else:\n raise ModelError('create a `%s` instance with unfield key, value (%s, %s).' % (type(self).__name__, field_key, value))\n\n # self._attrs.update(_attrs)\n # for k, v in values.items():\n # if k not in self._config:\n # raise ModelError('create a `%s` instance with unfield key,value (%s, %s).' % (type(self), k, v))\n # setattr(self, k, v)\n\n def is_new(self):\n #primary_field = self._config[self._primary_key]\n # if primary_field.raw_field_key not in self._attrs:\n # return True\n if db[self._table].find_one({'_id': self.get_id()}, ('_id')):\n return False\n return True\n\n def get_id(self):\n return getattr(self, self._primary_key)\n\n def save(self, allow_fields=None):\n \"\"\"Save _attrs in to database.\n\n :param list allow_fields: it will only save allow_fields.\n \"\"\"\n cls = type(self)\n payload = {}\n\n fields = set(self._config.keys())\n if allow_fields:\n fields = set(allow_fields) & fields\n\n for k in fields:\n # if self.is_new() and isinstance(self._config[k], IDField):\n # pass # pass if primary_key\n if k == self._primary_key:\n continue\n elif k in self._attrs:\n payload[k] = self._attrs[k]\n\n if self.is_new():\n primary_field = self._config[self._primary_key]\n payload['_id'] = self._attrs[primary_field.raw_field_key]\n if cls._insert_one(payload):\n return True\n\n else:\n if cls._update_one({'_id': self.get_id()}, payload):\n return True\n raise ModelSaveError('can not save instance of `%s`' % type(self))\n\n def to_jsonify(self):\n \"\"\" return a dict, that can be dump to json.\n \"\"\"\n result = {\n '__class__': type(self).__name__\n }\n for field_key, field in self._config.iteritems():\n field.encode(self, result)\n return result\n\n\n def update_from_jsonify(self, payload, allow_fields=None):\n \"\"\"update a value from external dict by json.loads().\"\"\"\n for field_key, field in self._config.iteritems():\n field.decode(self, payload)\n return self\n\n @classmethod\n def from_jsonify(cls, payload):\n if '__class__' in payload and payload['__class__'] == cls.__name__:\n instance = cls({})\n for field_key, field in cls._config.iteritems():\n field.decode(instance, payload)\n return instance\n raise ModelParserError('can not parse `%s` to `%s` instance.' % (payload, cls.__name__))\n"
},
{
"alpha_fraction": 0.5825860500335693,
"alphanum_fraction": 0.5927104353904724,
"avg_line_length": 29.060869216918945,
"blob_id": "b91de98cd1f1d1462952d819d0500031b20552d7",
"content_id": "9abd51c2e616c7a367f0f0a4b4eade965d2de796",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3457,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 115,
"path": "/app/models/models.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom collections import namedtuple\nimport app.error as error\n\nfrom passlib.hash import pbkdf2_sha256\n\nfrom app.db import db\nfrom . import Base\nfrom . import IDField, StringField, DateField, BoolField, ListField, TupleField\n\n\nclass Admin(Base):\n _table = 'admins'\n _primary_key = 'admin_id'\n\n admin_id = IDField(raw_field_key='_id')\n enabled = BoolField()\n\n @classmethod\n def login(cls, _id, password):\n raw = db[cls._table].find_one({'_id': _id}, {'_id': 1, 'password': 1, 'enabled': 1})\n if raw and raw.get('enabled', False):\n return cls.valid_password(password, raw['password'])\n return False\n\n @classmethod\n def hash_password(cls, password):\n if isinstance(password, unicode):\n password = password.encode('utf-8')\n return pbkdf2_sha256.encrypt(password, rounds=10 ** 5, salt_size=16)\n\n @classmethod\n def valid_password(cls, password, encoded):\n if isinstance(password, unicode):\n password = password.encode('utf-8')\n return pbkdf2_sha256.verify(password, encoded)\n\n def update_password(self, password):\n result = db[self._table].update_one({'_id': self.get_id()}, {\n '$set': {'password': self.hash_password(password)}\n })\n if result.matched_count:\n return True\n raise error.InvalidError('update password fail')\n\n\nclass Person(Base):\n _table = 'persons'\n _primary_key = 'person_id'\n\n person_id = IDField(raw_field_key='_id')\n social_id = StringField()\n name = StringField()\n birthday = DateField()\n gender = StringField()\n\n phone_0 = StringField()\n phone_1 = StringField()\n phone_2 = StringField()\n\n address_0 = StringField()\n address_1 = StringField()\n\n email_0 = StringField()\n email_1 = StringField()\n\n education = StringField()\n job = StringField()\n\n register_date = DateField()\n unregister_date = DateField()\n\n baptize_date = DateField()\n baptize_priest = StringField()\n\n gifts = ListField() # ['aa', 'bb', 'cc']\n groups = ListField() # [group_id, group_id, group_id]\n events = ListField() # {date:'', 'title': 'bala...'}\n relations = ListField() # {rel: 'parent', person_id: '1231212'}\n #TupleField(namedtuple('Relation', ('rel', 'person_id')), {'rel':None, 'person_id':None})\n\n note = StringField()\n\n def get_relations(self):\n rows = {row['_id']: row for row in self.relations}\n persons, total = Person.fetch({'_id': {'$in': rows.keys()}})\n for p in persons:\n if p._id in rows:\n rows[p._id]['person'] = p\n return rows.values()\n # return Person.fetch({'_id': {'$in': _ids}})\n\n def build_relation(self, rel, other_person_id, due=False):\n item = {'rel': rel, 'person_id': other_person_id}\n other_person_ids = [item['person_id'] for item in self.relations]\n if other_person_id in other_person_ids:\n raise error.InvalidError('relation is existed.')\n\n else:\n self.relations.append(item)\n self.save(allow_fields=('relations',))\n if due:\n other_person = type(self).get_one(other_person_id)\n other_person.build_relation(rel, self.get_id())\n\n return True\n\n\nclass Group(Base):\n _table = 'groups'\n _primary_key = 'group_id'\n\n group_id = IDField()\n name = StringField()\n note = StringField()\n"
},
{
"alpha_fraction": 0.7147002220153809,
"alphanum_fraction": 0.7388781309127808,
"avg_line_length": 29.41176414489746,
"blob_id": "7c6e4214f3d6ffa14a9527860f17269cd2da0242",
"content_id": "9a9c1002ba9dcd1b372415fd335cba66a8cceef7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1034,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 34,
"path": "/app/logger.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport argparse\nimport logging\nimport logging.handlers # import RotatingFileHandler\n\n\nlogger = logging.getLogger('')\n\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n '%(asctime)s %(filename)s:%(lineno)d\\t[%(thread)8.8s][%(levelname)5.5s] - %(message)s',\n datefmt=\"%Y-%m-%d %H:%M:%S\"\n)\n\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\n\nlog_path = os.path.join(os.path.dirname(__file__), \"../logs/log.log\")\nfile_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=1000 * 1000, backupCount=10)\nfile_handler.setFormatter(formatter)\nfile_handler.setLevel(logging.DEBUG)\nlogger.addHandler(file_handler)\n\n#loggers[logID] = logger\n\"\"\"\naccess_logger = logging.getLogger('werkzeug')\nlog_path = os.path.join(os.path.dirname(__file__),\"../logs/access.log\")\naccess_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=1000 * 1000, backupCount=10)\nlogger.addHandler(access_handler)\n\"\"\"\n"
},
{
"alpha_fraction": 0.5939849615097046,
"alphanum_fraction": 0.6065163016319275,
"avg_line_length": 18,
"blob_id": "77a03310a493e6d61442b1a668d2d2d1b8a66ac6",
"content_id": "6c5c1cd399f88929eb83eb52413c9929cc5c32a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 21,
"path": "/tests/test_db.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nfrom app.config import config\n#from app.db import db\nfrom app.db import db\nimport unittest\n\n\nclass TestDB(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_db(self):\n db.tests.insert_one({'_id': '1234'})\n one = db.tests.find_one()\n self.assertTrue(one)\n db.tests.drop()\n"
},
{
"alpha_fraction": 0.6557376980781555,
"alphanum_fraction": 0.6557376980781555,
"avg_line_length": 16.079999923706055,
"blob_id": "19f1b0b6a1cf000354dbdcfa929328ac7e37638e",
"content_id": "9cbe2c1fe061195c5516be9c3044f7e01ef84cc2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 427,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 25,
"path": "/app/db.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport functools\nimport pymongo\n\nfrom werkzeug.local import LocalProxy\nfrom app.config import config\n\n\n_db = None\n_client = None\n\ndef _init_db():\n client = pymongo.MongoClient(config['DB_HOST'], config['DB_PORT'])\n db = client[config['DB_NAME']]\n return client, db\n\n\ndef find_db():\n global _client, _db\n if not _db:\n _client, _db = _init_db()\n return _db\n\ndb = LocalProxy(find_db)\n"
},
{
"alpha_fraction": 0.6109785437583923,
"alphanum_fraction": 0.6121718287467957,
"avg_line_length": 17.15217399597168,
"blob_id": "6307982742740de6ebfc59b3b51443b8502400d7",
"content_id": "3a1a91fb73d7e2e0554ca8bc05102aebf473b347",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 838,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 46,
"path": "/app/config.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport argparse\n\nconfig = {}\n\nparser = argparse.ArgumentParser(\n description='custom config'\n)\n\nparser.add_argument(\n '--config', '-f',\n help='load custom config in configs',\n default='default'\n)\n\nparser.add_argument(\n '--debug',\n action='store_true',\n help='debug mode',\n default=False\n)\n\n\ndef _parse_args():\n \"\"\"parse args from cli.\n\n You can mock this function for unittest.\n \"\"\"\n args = parser.parse_args()\n return args\n\ndef load_config():\n global config\n if config:\n print 'pass load_config if loaded.'\n return\n\n args = _parse_args()\n m = __import__('configs.default', fromlist=['default'])\n config.update(m.config)\n\n m = __import__('configs.%s' % args.config, fromlist=[args.config])\n config.update(m.config)\n\n\n\n"
},
{
"alpha_fraction": 0.632488489151001,
"alphanum_fraction": 0.6336405277252197,
"avg_line_length": 18.727272033691406,
"blob_id": "23f5d3be84ac035c2f552eb52a3539a74ba75cb2",
"content_id": "df890ffc2edc2bba22001254d196e3ee4f4683e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 868,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 44,
"path": "/tests/__init__.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os, sys\nimport atexit\nimport re\n\nimport unittest\nimport mock\n\n#import app.config as config\nfrom app.config import parser, config\nimport app.server\n\nbox = None\n\ndef setup():\n print \"custom config\"\n def side_effect():\n return parser.parse_args(['--config', 'test', '--debug'])#from\n\n mock_load_config = mock.patch('app.config._parse_args', side_effect=side_effect)\n mock_load_config.start()\n\n print \"custom db\"\n from mongobox import MongoBox\n global box, db, client\n box = MongoBox()\n box.start()\n\n def side_effect():\n client = box.client() # pymongo client\n db = client['test']\n return client, db\n\n mock_init_db = mock.patch('app.db._init_db', side_effect=side_effect)\n mock_init_db.start()\n\n\n\ndef bye():\n global box\n if box:\n box.stop()\n\natexit.register(bye)\n"
},
{
"alpha_fraction": 0.4553571343421936,
"alphanum_fraction": 0.5535714030265808,
"avg_line_length": 27,
"blob_id": "050ce5d8d10e25aa6db630d135a1d1ba5271b148",
"content_id": "0a129946da2fdad035e3cd85fac40a03ce8cb1d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 112,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 4,
"path": "/scripts/run-mongo.sh",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nDIR=\"$(cd \"$(dirname \"$0\")/..\" && pwd)\"\ndocker run --rm -p 27017:27017 -v ${DIR}/db:/data/db mongo\n"
},
{
"alpha_fraction": 0.5862069129943848,
"alphanum_fraction": 0.5862069129943848,
"avg_line_length": 8.333333015441895,
"blob_id": "ac04bfaa8f6ac93da39613016860387318d5dc8b",
"content_id": "ab5fda1cdfefdf04be49fc8278722d2d02db77e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 3,
"path": "/configs/test.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "import os, sys\nconfig = {\n}\n\n"
},
{
"alpha_fraction": 0.6109215021133423,
"alphanum_fraction": 0.6211603879928589,
"avg_line_length": 19.928571701049805,
"blob_id": "e0724874f17dbb5696bfe6d96bbba36fa8af29f8",
"content_id": "c36b52e8b2c163e462d6e975071efe12c7429cee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 293,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 14,
"path": "/scripts/test-once.sh",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nDIR=\"$(cd \"$(dirname \"$0\")/..\" && pwd)\"\nif [ -z \"$1\" ]\nthen\n modules=\"tests\"\nelse\n modules=\"$1\"\nfi\n\n# --with-coverage --cover-inclusive --cover-package=app\necho ${modules}\ncd ${DIR}\nPYTHONPATH=./ nosetests -v --with-coverage --cover-inclusive --cover-package=app ${modules}\n"
},
{
"alpha_fraction": 0.579299807548523,
"alphanum_fraction": 0.5850391983985901,
"avg_line_length": 32.082279205322266,
"blob_id": "8dcf64e027dd7b5c2ed8c22264d3851d6b9a9745",
"content_id": "b732417f8e662ff6742df242325c8eeb6e514e89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5227,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 158,
"path": "/app/server.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport json\nimport sys\n\nfrom flask import Flask\nfrom flask import current_app, g\nfrom flask import request, jsonify\n\nimport app.config as config\nimport app.utils as utils\nimport app.auth as auth\n\nfrom app.logger import logger\n\nfrom app.error import InvalidError\nfrom app.auth import AuthManager\nfrom app.view import blueprint\nfrom app.models.models import Admin\n\n\nclass CustomFlask(Flask):\n def make_response(self, rv):\n if isinstance(rv, (dict, list)):\n return super(CustomFlask, self).make_response((json.dumps(rv, cls=self.json_encoder), 200, {\n 'Content-type': 'application/json; charset=utf-8'\n }))\n elif isinstance(rv, tuple) and isinstance(rv[0], (list, dict)):\n resp = super(CustomFlask, self).make_response((json.dumps(rv[0], cls=self.json_encoder),) + rv[1:])\n resp.headers['Content-type'] = 'application/json; charset=utf-8'\n return resp\n\n return super(CustomFlask, self).make_response(rv)\n\n\ndef main():\n config.load_config()\n\n main_app = CustomFlask(__name__)\n main_app.config.update(config.config)\n\n main_app.json_encoder = utils.BSONJSONEncoder\n main_app.json_decoder = utils.BSONJSONDecoder\n main_app.url_map.converters['ObjectId'] = utils.ObjectIdConverter\n\n am = auth.init_auth(main_app)\n\n main_app.register_blueprint(blueprint)\n\n # init admin\n admin = Admin.get_one('admin')\n if not admin:\n admin = Admin.create({\n 'admin_id': config.config['DEFAULT_ADMIN_USERNAME'],\n 'enabled': True,\n })\n admin.update_password(config.config['DEFAULT_ADMIN_PASSWORD'])\n\n ###############################################\n # CORS OPTIONS request fix\n ###############################################\n\n @main_app.before_request\n def option_autoreply():\n if request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n h = resp.headers\n # Allow the origin which made the XHR\n h['Access-Control-Allow-Origin'] = request.headers.get('Origin', '*')\n # Allow Credentials\n h['Access-Control-Allow-Credentials'] = 'true'\n # Allow the actual method\n h['Access-Control-Allow-Methods'] = request.headers['Access-Control-Request-Method']\n # Allow for cache $n seconds\n h['Access-Control-Max-Age'] = 3600 if config.config[\"MODE\"] == \"production\" else 1\n # We also keep current headers\n if 'Access-Control-Request-Headers' in request.headers:\n h['Access-Control-Allow-Headers'] = request.headers.get('Access-Control-Request-Headers', '')\n return resp\n\n @main_app.after_request\n def allow_origin(response):\n if request.method == 'OPTIONS':\n return response\n\n response.headers['Access-Control-Allow-Headers'] = request.headers.get('Access-Control-Request-Headers', '')\n response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', '*')\n response.headers['Access-Control-Allow-Credentials'] = 'true'\n response.headers['Access-Control-Allow-Methods'] = 'GET,POST,OPTIONS'\n response.headers['Access-Control-Max-Age'] = 1728000\n return response\n\n @main_app.errorhandler(InvalidError)\n def handle_invalid_error(error):\n if config.config['MODE'] == 'production':\n if isinstance(error, auth.UnauthorizedError):\n return {'success': False, 'message': 'Unauthorized.'}, error.status_code\n elif isinstance(error, auth.LoginFailError):\n return {'success': False, 'message': 'Login fail.'}, error.status_code\n return error.to_dict(), error.status_code\n\n @main_app.route('/')\n @main_app.route('/index')\n def index():\n return {\n 'success': True,\n }\n\n @main_app.route('/login', methods=['POST'])\n def login():\n payload = request.json\n\n username = str(payload.get('username', '')).strip()\n password = str(payload.get('password', '')).strip()\n if not username or not password:\n return {'success': False, 'message': 'login fail'}, 403\n\n if Admin.login(username, password):\n resp = am.login_user({'username': username})\n\n resp.data = json.dumps({\n 'success': True,\n 'message': 'login success',\n 'data': am.me().to_jsonify()\n })\n return resp\n return {'success': False, 'message': 'login fail'}, 403\n\n @main_app.route('/logout')\n def logout():\n resp = am.logout_user()\n resp.data = json.dumps({\n 'success': True,\n 'message': 'logout success',\n 'data': None\n })\n return resp\n\n\n @main_app.route('/user/me')\n @am.login_required\n def user_me():\n #me = g.me\n return {\n 'success': True,\n 'data': am.me().to_jsonify()\n }\n\n @main_app.route('/error')\n def rasie_error():\n raise InvalidError('error', 400)\n\n return main_app\n\n\nif __name__ == '__main__':\n main_app = main()\n main_app.run(debug=True)\n"
},
{
"alpha_fraction": 0.4195402264595032,
"alphanum_fraction": 0.6494252681732178,
"avg_line_length": 13.5,
"blob_id": "24727c018905ee613780b6d124a55a588fb12416",
"content_id": "ecea2069a6c79a64c7877ae8cb5c4bc2fa312de7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 174,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 12,
"path": "/requirement.txt",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "Flask-JWT==0.3.2\nFlask==0.10.1\nPyJWT==1.4.0\nWerkzeug==0.11.10\narrow==0.7.0\ngevent==1.1.1\ngraphene==0.10.1\nnose==1.3.7\nnose==1.3.7\npasslib==1.6.5\npymongo==3.2.2\ntinydb==3.2.0\n"
},
{
"alpha_fraction": 0.5209580659866333,
"alphanum_fraction": 0.5508981943130493,
"avg_line_length": 21.066667556762695,
"blob_id": "58f3e8d25d6fbaaed9c3f0e0dfd740e03d329546",
"content_id": "b9cc28c8fe42cc3e01a527852faba0325046ebb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 15,
"path": "/configs/default.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "import os, sys\n\nconfig = {\n 'MODE': 'development',\n #'DB_PATH': os.path.join(os.path.dirname(__file__),'../db')\n 'DB_HOST': 'localhost',\n 'DB_PORT': 27017,\n 'DB_NAME': 'church',\n\n 'DEFAULT_ADMIN_USERNAME': 'admin',\n 'DEFAULT_ADMIN_PASSWORD': '1234',\n\n 'JWT_SECRET': '1&2,s@#sa;jd9',\n 'JWT_EXPIRE': 86400\n}\n\n\n\n"
},
{
"alpha_fraction": 0.5032303333282471,
"alphanum_fraction": 0.5173738598823547,
"avg_line_length": 31.7257137298584,
"blob_id": "a82fa7bd87acddc018ea2ee324622dbc62e3c5d2",
"content_id": "0999c65ea93da74d10f125aabecfb634d30959f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5727,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 175,
"path": "/tests/test_server.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport json\nimport unittest\nimport bson\n\nfrom app.config import config\nfrom app.server import main\nfrom app.db import db\nfrom app.models.models import Person\nfrom app.models.models import Group\n\n\nclass TestServer(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.main_app = main()\n self.main_app.debug = True\n self.client = self.main_app.test_client()\n\n def tearDown(self):\n db.persons.delete_many({})\n db.groups.delete_many({})\n\n def test_person_create_update(self):\n \"\"\"/person/create\"\"\"\n post = {\n 'name': 'Bill',\n 'phone_0': '0988'\n }\n r = self.client.post('/person/create', data=json.dumps(post), content_type='application/json')\n self.assertEqual(r.status_code, 200)\n\n person_id = json.loads(r.data)['data']['person_id']\n r = self.client.get('/person/one/%s' % person_id)\n self.assertEqual(r.status_code, 200)\n\n _person = json.loads(r.data)['data']\n self.assertEqual(_person['name'], post['name'])\n self.assertEqual(_person['phone_0'], post['phone_0'])\n\n post = {\n 'phone_1': 'phine_1_update',\n 'address_0': 'address_0_update'\n }\n r = self.client.post('/person/one/%s/update' % person_id, data=json.dumps(post), content_type='application/json')\n self.assertEqual(r.status_code, 200)\n\n r = self.client.get('/person/one/%s' % person_id)\n self.assertEqual(r.status_code, 200)\n\n _person = json.loads(r.data)['data']\n self.assertEqual(_person['phone_1'], post['phone_1'])\n self.assertEqual(_person['address_0'], post['address_0'])\n\n\n def test_person_build_relation(self):\n \"\"\"/person/<_id>/relation\"\"\"\n db.persons.insert_many([{\n '_id': 'id_0',\n 'name': 'Bill'\n }, {\n '_id': 'id_1',\n 'name': 'John'\n }])\n post = {\n 'rel': 'family',\n 'person_id': 'id_1'\n }\n r = self.client.post('/person/id_0/relation', data=json.dumps(post), content_type='application/json')\n self.assertEqual(r.status_code, 200)\n for row in db.persons.find():\n if row['_id'] == 'id_0':\n self.assertIn({'rel': 'family', 'person_id': 'id_1'}, row['relations'])\n\n if row['_id'] == 'id_1':\n self.assertIn({'rel': 'family', 'person_id': 'id_0'}, row['relations'])\n\n r = self.client.post('/person/id_0/relation', data=json.dumps(post), content_type='application/json')\n self.assertEqual(r.status_code, 400)\n\n def test_person_list(self):\n \"\"\"/person/list\"\"\"\n db.persons.insert_many([{\n '_id': 'id_1',\n 'name': 'Bill'\n }, {\n '_id': 'id_2',\n 'name': 'John'\n }, {\n '_id': 'id_3',\n 'name': 'Mary',\n }])\n\n r = self.client.get('/person/list')\n self.assertEqual(r.status_code, 200)\n\n result = json.loads(r.data)['data']\n for row in result:\n if row['person_id'] == 'id_1':\n self.assertEqual(row['name'], 'Bill')\n elif row['person_id'] == 'id_2':\n self.assertEqual(row['name'], 'John')\n elif row['person_id'] == 'id_3':\n self.assertEqual(row['name'], 'Mary')\n\n r = self.client.get('/person/list?term=john')\n self.assertEqual(r.status_code, 200)\n result = json.loads(r.data)['data']\n self.assertEqual(result[0]['name'], 'John')\n\n def test_person_one(self):\n \"\"\"/person/one/<_id>\"\"\"\n db.persons.insert_many([{\n '_id': 'id_1',\n 'name': 'Bill'\n }, {\n '_id': 'id_2',\n 'name': 'John'\n }])\n r = self.client.get('/person/one/id_1')\n self.assertEqual(r.status_code, 200)\n\n result = json.loads(r.data)['data']\n self.assertEqual(result['person_id'], 'id_1')\n self.assertEqual(result['name'], 'Bill')\n\n def test_group(self):\n \"\"\"/group/create\"\"\"\n payload = {\n 'name': 'group-1',\n 'note': 'this is note'\n }\n r = self.client.post('/group/create', data=json.dumps(payload), content_type='application/json')\n self.assertEqual(r.status_code, 200)\n result = json.loads(r.data)['data']\n group_id = result['group_id']\n\n _group = db.groups.find_one({'_id': group_id})\n self.assertEqual(_group['name'], payload['name'])\n self.assertEqual(_group['note'], payload['note'])\n\n payload = {\n 'name': 'group-1-update',\n }\n r = self.client.post('/group/one/%s/update' % group_id, data=json.dumps(payload), content_type='application/json')\n self.assertEqual(r.status_code, 200)\n result = json.loads(r.data)['data']\n\n _group = db.groups.find_one({'_id': result['group_id']})\n self.assertEqual(_group['name'], payload['name'])\n\n def test_group_list(self):\n \"\"\"/group/list\"\"\"\n db.groups.insert_many([{\n '_id': 'id_0',\n 'name': 'group-0'\n }, {\n '_id': 'id_1',\n 'name': 'group-1'\n }])\n r = self.client.get('/group/list')\n result = json.loads(r.data)['data']\n for row in result:\n if row['group_id'] == 'id_0':\n self.assertEqual(row['name'], 'group-0')\n elif row['group_id'] == 'id_1':\n self.assertEqual(row['name'], 'group-1')\n\n r = self.client.get('/group/one/id_1')\n result = json.loads(r.data)['data']\n self.assertEqual(result['name'], 'group-1')\n"
},
{
"alpha_fraction": 0.5510316491127014,
"alphanum_fraction": 0.5537826418876648,
"avg_line_length": 21.577640533447266,
"blob_id": "60a4c3ee40a3bf35f650c87696b01478473572b7",
"content_id": "3d0822d0b18db3c32d03c45e4c8fdf7118d793e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3635,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 161,
"path": "/app/view.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport re\n\nfrom flask import Blueprint#, render_template, abort\nfrom flask import request, jsonify\nfrom app.logger import logger\nfrom app.error import InvalidError\nfrom app.models.models import Person, Group\n\nblueprint = Blueprint('view', __name__)\n\n######################\n# Person\n######################\n\[email protected]('/person/one/<_id>')\ndef person_one(_id):\n person = Person.get_one(_id)\n return {\n 'success': True,\n 'data': person.to_jsonify()\n }\n\n\[email protected]('/person/one/<_id>/update', methods=['POST'])\ndef person_one_update(_id):\n person = Person.get_one(_id)\n if not person:\n raise InvalidError('Person(%s) is not existed.' % _id)\n\n payload = request.json\n allow_field = (\n 'name',\n 'phone_0',\n 'phone_1',\n 'phone_2',\n 'address_0',\n 'address_1',\n 'email_0',\n 'email_1',\n 'education',\n 'job',\n 'birthday',\n 'register_day',\n 'unregister_day',\n 'baptize_date',\n 'baptize_priest',\n 'gifts',\n 'groups',\n 'events',\n 'note'\n )\n person.update_from_jsonify(payload, allow_field)\n person.save()\n return {\n 'success': True,\n 'data': person.to_jsonify()\n }\n\n\[email protected]('/person/<_id>/relation', methods=['POST'])\ndef person_build_relation(_id):\n payload = request.json\n if 'rel' not in payload or 'person_id' not in payload:\n raise InvalidError('`rel` and `person_id` should in payload.')\n\n person = Person.get_one(_id)\n if not person:\n raise InvalidError('Person(%s) is not existed.' % _id)\n\n person.build_relation(payload['rel'], payload['person_id'], due=True)\n return {'success': True}\n\n\[email protected]('/person/list')\ndef person_list():\n term = str(request.values.get('term', ''))\n group = str(request.values.get('group', ''))\n #limit = int(request.values.get('limit', 0))\n #offset = int(request.values.get('offset', 0))\n\n query = {}\n if term:\n query['name'] = {'$regex': re.escape(term), '$options': 'i'}\n\n if group:\n pass\n #query['name'] = {'$regex': re.escape(term), '$options': 'i'}\n\n result = Person.fetch(query)\n data = []\n for person in result:\n data.append(person.to_jsonify())\n\n return {\n 'success': True,\n 'data': data,\n }\n\n\[email protected]('/person/create', methods=['POST'])\ndef person_create():\n payload = request.json\n p = Person.create(payload)\n\n return {\n 'success': True,\n 'data': p.to_jsonify()\n }\n\n######################\n# group\n######################\n\n\[email protected]('/group/create', methods=['POST'])\ndef group_create():\n payload = request.json\n group = Group.create(payload)\n return {\n 'success': True,\n 'data': group.to_jsonify()\n }\n\n\[email protected]('/group/one/<_id>/update', methods=['POST'])\ndef group_one_update(_id):\n group = Group.get_one(_id)\n if not group:\n raise InvalidError('Group(%s) is not existed.' % _id)\n\n payload = request.json\n group.update_from_jsonify(payload)\n group.save()\n\n return {\n 'success': True,\n 'data': group.to_jsonify()\n }\n\n\[email protected]('/group/one/<_id>')\ndef group_one(_id):\n group = Group.get_one(_id)\n return {\n 'success': True,\n 'data': group.to_jsonify()\n }\n\n\[email protected]('/group/list')\ndef group_list():\n result = Group.fetch()\n data = []\n for group in result:\n data.append(group.to_jsonify())\n\n return {\n 'success': True,\n 'data': data,\n }\n"
},
{
"alpha_fraction": 0.5151515007019043,
"alphanum_fraction": 0.5352170467376709,
"avg_line_length": 25.83516502380371,
"blob_id": "43f231740f7e7b667d57e99c61e2c89e6fa32d6b",
"content_id": "8cdfcb99767f46744588e210f79cbbf7e27a213f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2442,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 91,
"path": "/tests/test_models.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport datetime\nimport unittest\n\nfrom app.config import config\nfrom app.db import db\nfrom app.models.models import Person\nfrom app.models.models import Group\n\n\nclass TestModel(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_person(self):\n \"\"\"Person\"\"\"\n p = Person.create({\n 'name': 'John',\n 'phone_0': '0988'\n })\n\n self.assertEqual(p.name, 'John')\n self.assertEqual(p.phone_0, '0988')\n\n p.name = 'Bill'\n self.assertEqual(p.name, 'Bill')\n\n p.phone_1 = '0989'\n self.assertEqual(p.phone_1, '0989')\n\n p.birthday = datetime.datetime.strptime('2016-11-12', '%Y-%m-%d')\n p.save()\n\n raw = db.persons.find_one({'_id': p.get_id()})\n self.assertEqual(raw['name'], 'Bill')\n self.assertEqual(raw['phone_0'], '0988')\n self.assertEqual(raw['birthday'], datetime.datetime.strptime('2016-11-12', '%Y-%m-%d'))\n\n with self.assertRaises(Exception) as ctx:\n # can only assign datetime object to `birthday`\n p.birthday = 'anything'\n\n p = Person.get_one(p.get_id())\n p_other = Person.create({\n 'name': 'Mary'\n })\n p.build_relation('family', p_other.get_id(), due=True)\n\n p = Person.get_one(p.get_id())\n self.assertIn({\n 'rel': 'family',\n 'person_id': p_other.get_id()\n }, p.relations)\n\n p_other = Person.get_one(p_other.get_id())\n self.assertIn({\n 'rel': 'family',\n 'person_id': p.get_id()\n }, p_other.relations)\n\n # test fetch\n fetch_result = Person.fetch()\n self.assertEqual(fetch_result.total, 2)\n for p in fetch_result:\n self.assertIsInstance(p, Person)\n\n def test_group(self):\n \"\"\"Group\"\"\"\n payload = {\n 'name': 'group-01',\n 'note': 'this is my group'\n }\n g = Group.create(payload)\n\n self.assertEqual(g.name, payload['name'])\n self.assertEqual(g.note, payload['note'])\n\n raw = db.groups.find_one({'_id': g.get_id()})\n self.assertEqual(g.name, raw['name'])\n self.assertEqual(g.note, raw['note'])\n\n g.name = 'group-01-fix'\n g.save()\n\n raw = db.groups.find_one({'_id': g.get_id()})\n self.assertEqual(g.name, 'group-01-fix')\n"
},
{
"alpha_fraction": 0.6536523699760437,
"alphanum_fraction": 0.6549118161201477,
"avg_line_length": 25.46666717529297,
"blob_id": "eb283d6f00128473ffec974474d80a4e47a6eed2",
"content_id": "cad74847de001752516b637008bbb683b6a03305",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 794,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 30,
"path": "/app/utils.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom werkzeug.routing import BaseConverter\nimport json\nimport bson\nimport bson.json_util\n\n\nclass BSONJSONEncoder(json.JSONEncoder):\n def default(self, o):\n try:\n return bson.json_util.default(o)\n except Exception as e:\n return super(BSONJSONEncoder, self).default(o)\n\n\nclass BSONJSONDecoder(json.JSONDecoder):\n \"\"\" Do nothing custom json decoder \"\"\"\n\n def __init__(self, *args, **kargs):\n _ = kargs.pop('object_hook', None)\n super(BSONJSONDecoder, self).__init__(object_hook=bson.json_util.object_hook, *args, **kargs)\n\n\nclass ObjectIdConverter(BaseConverter):\n def to_python(self, value):\n return bson.ObjectId(value)\n\n def to_url(self, value):\n return BaseConverter.to_url(value['$oid'])\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5227272510528564,
"avg_line_length": 13.666666984558105,
"blob_id": "640f7015fd415a0dc29885c3d770dd0ecbafb95e",
"content_id": "2b6682a406bcdb0fc7d52f434c71a2341b081573",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 44,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 3,
"path": "/app/models/__init__.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom .orm import *\n"
},
{
"alpha_fraction": 0.5843568444252014,
"alphanum_fraction": 0.5908209681510925,
"avg_line_length": 27.38532066345215,
"blob_id": "31ab3801e01c896f9f5027db6559b40f385229df",
"content_id": "1418d31ea7ae2f6b890989f86ad7c793a66c0c86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3094,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 109,
"path": "/tests/test_auth.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport Cookie\nimport os\nimport sys\nimport json\nimport unittest\nimport bson\n\nimport werkzeug.http\nimport jwt\n\nimport app.server as server\nfrom app.config import config\nfrom app.db import db\nfrom app.error import InvalidError\nfrom app.auth import UnauthorizedError, LoginFailError\nfrom app.models.models import Admin\n\nimport mock\n\n\nclass TestServer(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.main_app = server.main()\n cls.main_app.debug = True\n\n def setUp(self):\n self.client = self.main_app.test_client()\n\n def tearDown(self):\n db.persons.delete_many({})\n db.groups.delete_many({})\n\n def test_unauthorized_error(self):\n \"\"\"Test UnauthorizedError is a InvalidError.\"\"\"\n err = UnauthorizedError(\"unauthorized.\")\n self.assertIsInstance(err, InvalidError)\n\n err = LoginFailError(\"login fail.\")\n self.assertIsInstance(err, InvalidError)\n\n def test_heartbeat(self):\n \"\"\"Test heartbeat.\"\"\"\n r = self.client.get('/')\n self.assertEqual(json.loads(r.data), {\n 'success': True\n })\n\n r = self.client.get('/index')\n self.assertEqual(json.loads(r.data), {\n 'success': True\n })\n\n def test_default_admin(self):\n \"\"\"Test default admin.\"\"\"\n admin = db.admins.find_one({'_id': config['DEFAULT_ADMIN_USERNAME']})\n self.assertTrue(admin)\n\n post = {\n 'username': config['DEFAULT_ADMIN_USERNAME'],\n 'password': config['DEFAULT_ADMIN_PASSWORD']\n }\n r = self.client.post('/login', data=json.dumps(post), content_type='application/json')\n self.assertEqual(r.status_code, 200)\n\n def test_auth(self):\n\n db.admins.insert_many([{\n '_id': 'john',\n 'password': Admin.hash_password('1234'),\n 'enabled': True\n }])\n\n post = {\n 'username': 'john',\n 'password': '1234'\n }\n r = self.client.post('/login', data=json.dumps(post), content_type='application/json')\n self.assertEqual(r.status_code, 200)\n cookies = r.headers.getlist('Set-Cookie')\n encoded = None\n for cookie in cookies:\n key, value = werkzeug.http.parse_cookie(cookie).items()[0]\n if key == 'jwt':\n encoded = value\n\n payload = jwt.decode(encoded, self.main_app.config['JWT_SECRET'], algorithms=['HS256'])\n self.assertEqual(payload['username'], 'john')\n\n r = self.client.get('/user/me')\n self.assertEqual(r.status_code, 200)\n result = json.loads(r.data)['data']\n self.assertEqual(result['admin_id'], 'john')\n\n def test_unauth(self):\n \"\"\"Test unauth.\"\"\"\n\n r = self.client.get('/user/me')\n self.assertEqual(r.status_code, 403)\n\n def test_raise_error(self):\n \"\"\"Test raise error.\"\"\"\n\n r = self.client.get('/error')\n self.assertEqual(r.status_code, 400)\n self.assertEqual(json.loads(r.data), {'success': False, 'message': 'error'})\n"
},
{
"alpha_fraction": 0.5174619555473328,
"alphanum_fraction": 0.534092366695404,
"avg_line_length": 30.77642250061035,
"blob_id": "c62cde5e9c304058bff61187984560d399f69b8d",
"content_id": "80e028a2cebcaa211c753fb349d40b95c6786a1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7817,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 246,
"path": "/tests/test_orm.py",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport json\nimport os\nimport sys\nimport datetime\nfrom collections import namedtuple\n\nimport unittest\nfrom app.config import config\nfrom app.db import db\nfrom app.models import ModelError, ModelInvaldError, ModelDeclareError\nfrom app.models import Meta, Base, ClassReadonlyProperty\nfrom app.models import Field, IDField, StringField, BoolField, IntField, DateField, ListField, TupleField\n\n\nclass TestDB(unittest.TestCase):\n def tearDown(self):\n for col_name in db.collection_names():\n db[col_name].drop()\n\n def test_db(self):\n \"\"\" Test basic db operator. \"\"\"\n db.tests.insert_one({'name': 'test-name'})\n r = db.tests.find_one({'name': 'test-name'})\n self.assertEqual(r['name'], 'test-name')\n\n db.tests.insert_one({'_id': '_id', 'a': 'A', 'b': 'B', 'c': 'c'})\n\n def test_operator(self):\n \"\"\" Test declare a ModelClass. \"\"\"\n Point = namedtuple('Point', ['x', 'y'], False)\n\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField()\n default_str_field = StringField(default='hello')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n\n tuple_field = TupleField(np=Point, default=lambda: Point(x=0, y=0))\n\n for field_key in ('foo_id', 'str_field', 'default_str_field', 'date_field', 'int_field', 'bool_field', 'list_field', 'tuple_field'):\n self.assertIn(field_key, Foo._config)\n\n class Bar(Base):\n _table = ClassReadonlyProperty('bars')\n _primary_key = ClassReadonlyProperty('_id')\n\n self.assertNotEqual(Foo._config, Bar._config)\n\n self.assertEqual(Foo._primary_key, 'foo_id')\n self.assertEqual(Foo._table, 'foos')\n self.assertEqual(Foo.foo_id.raw_field_key, '_id')\n\n foo = Foo()\n self.assertEqual(foo._config, Foo._config)\n self.assertTrue(foo.is_new())\n self.assertEqual(foo.default_str_field, 'hello')\n\n foo = Foo.create({'str_field': 'any string'})\n self.assertFalse(foo.is_new())\n self.assertIsNotNone(foo.foo_id)\n self.assertEqual(foo.str_field, 'any string')\n self.assertEqual(foo.int_field, 0)\n\n foo.int_field = 100\n self.assertEqual(foo.int_field, 100)\n\n foo.int_field = '200'\n self.assertEqual(foo.int_field, 200)\n\n self.assertIsNone(foo.date_field)\n foo.date_field = datetime.datetime(2016, 12, 01, 1, 2, 3, 4)\n self.assertEqual(foo.date_field, datetime.date(2016, 12, 1))\n\n self.assertEqual(foo.list_field, [])\n foo.list_field = [0, 1, 2, 3]\n self.assertEqual(foo.list_field, [0, 1, 2, 3])\n\n foo.str_field = None\n self.assertEqual(foo._attrs['str_field'], None)\n foo.save()\n\n _foo = db.foos.find_one({'_id': foo.foo_id})\n self.assertEqual(_foo, foo._attrs)\n\n\n\n\n\n with self.assertRaises(ModelInvaldError):\n foo.date_field = 1234\n\n with self.assertRaises(ModelError) as ctx:\n foo = Foo.create({'other': 'other'})\n\n\n def test_jsonify_encode(self):\n \"\"\" Test jsonify encode to dict for json dumps.\"\"\"\n\n Point = namedtuple('Point', ['x', 'y'], False)\n\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n foo = Foo.create({\n 'int_field': 100,\n 'list_field': [1, 2, 3],\n })\n\n _foo = foo.to_jsonify()\n self.assertEqual('Foo', _foo['__class__'])\n self.assertEqual(_foo['foo_id'], foo.foo_id,)\n self.assertEqual(_foo['str_field'], 'this is default')\n self.assertEqual(_foo['int_field'], 100)\n self.assertEqual(_foo['list_field'], [1, 2, 3])\n self.assertNotIn('tuple_field', _foo)\n self.assertNotIn('date_field', _foo)\n\n Point = namedtuple('Point', ['x', 'y'], False)\n\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n list_field = ListField()\n tuple_field = TupleField(np=Point, default=lambda: Point(x=1, y=2))\n\n foo = Foo.create({})\n _foo = foo.to_jsonify()\n self.assertEqual(_foo['tuple_field'], {'x': 1, 'y': 2})\n self.assertEqual(_foo['list_field'], [])\n\n\n def test_jsonify_decode(self):\n \"\"\" Test jsonify decode from dict for json loads.\"\"\"\n\n Point = namedtuple('Point', ['x', 'y'], False)\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n json_str = '''{\n \"__class__\": \"Foo\",\n \"foo_id\": \"1234\",\n \"str_field\": \"anything\",\n \"int_field\": 123,\n \"date_field\": \"2014-12-13\",\n \"bool_field\": false,\n \"tuple_field\":{\n \"x\": 1,\n \"y\": 2\n }\n }'''\n foo = Foo.from_jsonify(json.loads(json_str))\n\n self.assertEqual(foo.foo_id, '1234')\n self.assertEqual(foo.int_field, 123)\n self.assertEqual(foo.bool_field, False)\n self.assertEqual(foo.date_field, datetime.date(2014, 12, 13))\n Point = namedtuple('Point', ['x', 'y'], False)\n self.assertEqual(foo.tuple_field, Point(x=1, y=2))\n\n def test_declare_error(self):\n \"\"\" Test by error case.\"\"\"\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n pass\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n _id = IDField()\n _id_2 = IDField()\n\n def test_fetch(self):\n \"\"\"Test fetch by Model.\"\"\"\n\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('_id')\n\n _id = IDField()\n name = StringField()\n age = IntField()\n\n foos = [{\n '_id': 'id_0',\n 'name': 'Bill',\n 'age': 10,\n }, {\n '_id': 'id_1',\n 'name': 'John',\n 'age': 30\n }, {\n '_id': 'id_2',\n 'name': 'Mary',\n 'age': 20\n }, {\n '_id': 'id_3',\n 'name': 'Tommy',\n 'age': 40\n }]\n db.foos.insert_many(foos)\n\n r = Foo.fetch({})\n self.assertEqual(r.total, 4)\n self.assertItemsEqual([f.name for f in r], [f['name'] for f in foos])\n\n r = Foo.fetch({'_id': 'id_2'})\n self.assertEqual(r.total, 1)\n self.assertEqual(r[0]._id, 'id_2')\n self.assertEqual(r[0].name, 'Mary')\n self.assertEqual(r[0].age, 20)\n\n r = Foo.fetch({'age': {'$gt': 20}})\n self.assertEqual(r.total, 2)\n self.assertTrue(r[0].age > 20)\n self.assertTrue(r[1].age > 20)\n\n r = Foo.fetch({'name': 'John'})\n self.assertEqual(r.total, 1)\n self.assertEqual(r[0].name, 'John')\n"
},
{
"alpha_fraction": 0.5942028760910034,
"alphanum_fraction": 0.6014492511749268,
"avg_line_length": 21.66666603088379,
"blob_id": "b5cda0a998b3b83c3dcdedc106fbaab0c4359805",
"content_id": "2ce1304ef3acd8f0b2133b74ae10aa1050268f27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 138,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/scripts/run-dev-server.sh",
"repo_name": "allenyang79/member-system",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nDIR=\"$(cd \"$(dirname \"$0\")/..\" && pwd)\"\necho ${DIR}\nsource ${DIR}/venv/bin/activate\nPYTHONPATH=${DIR} python -m app.server\n\n\n"
}
] | 22 |
KopfLab/labware_c3 | https://github.com/KopfLab/labware_c3 | 2b50d89d634eebe03b139214216b85147181f78c | 696fca33a9074e61f5d8e169698fd7646da9bc99 | 4b85dbeed3bb59c33ee55db6dbe614a72e1c8b42 | refs/heads/master | 2021-01-21T11:00:35.445687 | 2019-09-02T22:49:09 | 2019-09-02T22:49:09 | 83,512,795 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5782349705696106,
"alphanum_fraction": 0.5841906070709229,
"avg_line_length": 28.774192810058594,
"blob_id": "f1c48c9f38f1888c520b2ed9d8ea6d3f5eb4bc0e",
"content_id": "a2316a6148a5307c352ec39e5b89a61b3205b17a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1847,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 62,
"path": "/R/app_module_login.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "\nloginServer <- function(input, output, session, app_pwd, group, timezone) {\n\n # namespace\n ns <- session$ns\n\n # reactive values\n values <- reactiveValues(\n menu = NULL,\n logged_in = FALSE\n )\n\n # output\n output$group <- renderText(str_c(\"Group: \", group))\n output$tz <- renderText(str_c(\"Timezone: \", timezone))\n\n # always jump back to login\n observeEvent(input$login, login(input$password))\n observeEvent(input$auto_login_trigger, { if (is.null(app_pwd)) login(NULL) })\n\n login <- function(pwd) {\n log_in = FALSE\n if (is.null(app_pwd)) {\n message(\"INFO: No app_pwd required, logged in automatically\")\n log_in = TRUE\n } else {\n glue(\"INFO: checking app_pwd '{pwd}'... \") %>% message(appendLF = FALSE)\n if (!is.null(pwd) && app_pwd == pwd) {\n message(\"correct.\")\n log_in = TRUE\n } else {\n message(\"incorrect.\")\n showModal(modalDialog(h2(str_c(\"Sorry, password not recognized.\")), easyClose = TRUE, fade = FALSE))\n }\n }\n\n if (log_in) {\n hide(\"login-panel\")\n show(\"welcome-panel\")\n values$logged_in <- TRUE\n }\n }\n\n list(\n is_logged_in = reactive({ values$logged_in })\n )\n\n}\n\nloginUI <- function(id, title) {\n ns <- NS(id)\n\n tagList(\n column(width = 12, h2(\"Welcome to the \", a(\"Lab Logger\", href = \"https://github.com/kopflab/lablogger\", target=\"_blank\"), \"Platform of the \", title)),\n div(id = ns(\"login-panel\"),\n column(width = 12,\n passwordInput(ns(\"password\"), NULL, placeholder = \"Please enter your password.\"), br(),\n selectInput(ns(\"auto_login_trigger\"), NULL, choices = \"1\", selected = \"1\") %>% hidden(),\n actionButton(ns(\"login\"), \"Login\")\n )),\n div(id = ns(\"welcome-panel\"), column(width = 12, h3(\"You have been succesfully logged in.\"))) %>% hidden()\n )\n}\n"
},
{
"alpha_fraction": 0.6394851803779602,
"alphanum_fraction": 0.6424963474273682,
"avg_line_length": 32.425323486328125,
"blob_id": "0a994528aa2ee20ea7bd99f5c682a8f95c9cc9f3",
"content_id": "824e8705b1aa18c65ee137a35043afb89f06a122",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 20590,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 616,
"path": "/R/app_data.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# LINKS =====\n\nexperimentDeviceLinksDataServer <- function(input, output, session, group_id, access_token, pool, timezone) {\n\n # namespace\n ns <- session$ns\n\n # reactive values\n values <- reactiveValues(\n refresh_links = NULL\n )\n\n # get links\n get_links <- reactive({\n req(values$refresh_links)\n withProgress(\n message = 'Fetching data links', detail = \"Querying database for experiment device links...\", value = 0.5,\n ll_get_experiment_device_links(\n group_id = group_id, con = pool,\n select = c(\n exp_device_data_id, data_idx, active, # link info\n exp_id, recording, # exp info\n device_id, device_name, device_type_desc, particle_id # device info\n ),\n filter = active)\n ) %>%\n # introduce a unique id for the link combinations\n mutate(..id.. = paste(device_id, data_idx, sep = \"_\")) %>%\n # arrange by device name and index\n arrange(device_name, data_idx)\n })\n\n # initialize links\n init_links <- function() {\n if (is.null(values$refresh_links)) {\n module_message(ns, \"debug\", \"initializing links\")\n values$refresh_links <- 1\n }\n }\n\n # refresh links (only refreshes if links are already initalized!)\n refresh_links <- function() {\n if (!is.null(values$refresh_links)) {\n module_message(ns, \"debug\", \"setting links refresh flag\")\n values$refresh_links <- values$refresh_links + 1\n }\n }\n\n # LINKS functions ====\n list(\n get_links = get_links,\n refresh_links = refresh_links,\n init_links = init_links\n )\n}\n\n# EXPERIMENTS ====\n\nexperimentsDataServer <- function(input, output, session, links, group_id, access_token, pool, timezone) {\n\n # namespace\n ns <- session$ns\n\n # reactive values ----\n values <- reactiveValues(\n refresh_experiments = NULL,\n selected_exp_ids = c(), # multi selection experiments\n loaded_exp_id = NULL # single selection experiments\n )\n\n # LOAD =====\n\n # get_experiments ----\n get_experiments <- reactive({\n req(values$refresh_experiments)\n withProgress(\n message = 'Fetching experiments', detail = \"Querying database...\", value = 0.5,\n ll_get_experiments(group_id = group_id, con = pool, convert_to_TZ = timezone)\n )\n })\n\n init_experiments <- function() {\n if (is.null(values$refresh_experiments)) {\n module_message(ns, \"debug\", \"initializing experiments\")\n values$refresh_experiments <- 1\n }\n }\n\n refresh_experiments <- function() {\n if (!is.null(values$refresh_experiments)) {\n module_message(ns, \"debug\", \"setting experiments refresh flag\")\n values$refresh_experiments <- values$refresh_experiments + 1\n links$refresh_links()\n }\n }\n\n # single experiment\n load_experiment <- function(exp_id) {\n stopifnot(length(exp_id) == 1)\n if (!identical(values$loaded_exp_id, exp_id)) {\n module_message(ns, \"debug\", glue(\"loading exp id '{exp_id}'\"))\n values$loaded_exp_id <- exp_id\n if (is_loaded_experiment_archived()) {\n # archived\n module_message(ns, \"debug\", glue(\"this experiment ('{values$loaded_exp_id}') is archived\"))\n }\n }\n }\n\n # SELECT =====\n\n select_experiments <- function(exp_ids) {\n if (!identical(values$selected_exp_ids, exp_ids)) {\n module_message(\n ns, \"debug\",\n glue::glue(\"selecting exp ids '\",\n if(!is.null(exp_ids)) paste(exp_ids, collapse = \"', '\") else 'none',\n \"'\")\n )\n values$selected_exp_ids <- exp_ids\n }\n }\n\n # ADD =====\n\n # create new experiment\n add_experiment <- function(exp_id) {\n stopifnot(length(exp_id) == 1)\n withProgress(\n message = 'Creating experiment', detail = glue::glue(\"Setting up new experiment '{exp_id}'...\"), value = 0.5,\n ll_add_experiment(exp_id = exp_id, exp_desc = \"\", group_id = group_id, con = pool)\n )\n }\n\n # LINKS ====\n\n # get_experiment_device_links -------\n get_experiment_device_links <- reactive({\n if (!is.null(values$loaded_exp_id)) {\n links$init_links() # make sure links are initialized\n if (nrow(links$get_links() > 0)) {\n experiment_links <- links$get_links() %>%\n filter(exp_id == values$loaded_exp_id)\n module_message(\n ns, \"debug\",\n glue::glue(\"found {nrow(experiment_links)} device links for experiment \",\n \"'{values$loaded_exp_id}'\")\n )\n return(experiment_links)\n }\n }\n return(tibble(device_id = integer(0), data_idx = integer(0)))\n })\n\n # get_experiment_device_links_all -----\n # all experiment device links that overlap with this experiment\n get_experiment_device_links_all <- reactive({\n links$init_links() # make sure links are initialized\n semi_join(\n links$get_links(),\n get_experiment_device_links(),\n by = c(\"device_id\", \"data_idx\")\n )\n })\n\n # get_experiment_devices -----\n get_experiment_devices <- reactive({\n if (nrow(get_experiment_device_links()) > 0) {\n get_experiment_device_links() %>%\n select(device_id, device_name, particle_id, device_type_desc) %>%\n unique()\n } else {\n tibble(device_id = integer(0), device_name = character(0),\n particle_id = character(0), device_type_desc = character(0))\n }\n })\n\n # add new links\n add_experiment_device_links <- function(device_links) {\n ll_add_experiment_devices(\n exp_id = values$loaded_exp_id,\n experiment_devices = mutate(device_links, exp_id = values$loaded_exp_id),\n group_id = group_id,\n con = pool)\n }\n\n # delte links\n delete_experiment_device_links <- function(device_link_ids) {\n ll_remove_experiment_device_links(\n exp_id = values$loaded_exp_id,\n exp_device_data_ids = device_link_ids,\n con = pool)\n }\n\n # INFO =======\n # get_loaded_experiment_info ----\n get_loaded_experiment_info <- reactive({\n filter(get_experiments(), exp_id == !!values$loaded_exp_id)\n })\n\n update_loaded_experiment_info <- function(exp_desc, exp_notes) {\n ll_update_experiment_info(exp_id = values$loaded_exp_id, exp_desc = exp_desc,\n exp_notes = exp_notes, group_id = group_id, con = pool)\n }\n\n # is_loaded_experiment_archived -----\n is_loaded_experiment_archived <- reactive({\n filter(get_experiments(), exp_id == !!values$loaded_exp_id)$archived\n })\n\n # START / STOP ======\n\n start_experiment <- function() {\n withProgress(\n message = 'Starting experiment', detail = \"Updating data base...\", value = 0.5,\n ll_experiment_start_recording(exp_id = values$loaded_exp_id, group_id = group_id, con = pool)\n )\n }\n\n stop_experiment <- function() {\n withProgress(\n message = 'Stopping experiment', detail = \"Updating data base...\", value = 0.5,\n ll_experiment_stop_recording(exp_id = values$loaded_exp_id, group_id = group_id, con = pool)\n )\n }\n\n # FUNCTIONS ====\n list(\n get_experiments = get_experiments,\n init_experiments = init_experiments,\n refresh_experiments = refresh_experiments,\n select_experiments = select_experiments,\n get_selected_experiments = reactive(values$selected_exp_ids),\n add_experiment = add_experiment,\n load_experiment = load_experiment,\n get_loaded_experiment = reactive(values$loaded_exp_id),\n get_loaded_experiment_info = get_loaded_experiment_info,\n updated_loaded_experiment_info = update_loaded_experiment_info,\n\n get_loaded_experiment_device_links = get_experiment_device_links,\n get_loaded_experiment_device_links_all = get_experiment_device_links_all,\n get_loaded_experiment_devices = get_experiment_devices,\n get_loaded_experiment_device_ids = reactive(get_experiment_devices()$device_id),\n add_experiment_device_links = add_experiment_device_links,\n delete_experiment_device_links = delete_experiment_device_links,\n\n is_loaded_experiment_recording = reactive(filter(get_experiments(), exp_id == !!values$loaded_exp_id)$recording),\n is_loaded_experiment_archived = is_loaded_experiment_archived,\n start_experiment = start_experiment,\n stop_experiment = stop_experiment\n )\n\n}\n\n# DEVICES ======\n\ndevicesDataServer <- function(input, output, session, links, group_id, access_token, pool, timezone) {\n\n # namespace\n ns <- session$ns\n\n # reactive values\n values <- reactiveValues(\n refresh_devices = NULL,\n selected_device_ids = c()\n )\n\n # get_devices -----\n get_devices <- reactive({\n req(values$refresh_devices)\n withProgress(\n message = 'Fetching devices', detail = \"Querying database...\", value = 0.5,\n ll_get_devices(group_id = group_id, con = pool) %>% arrange(device_name)\n )\n })\n\n init_devices <- function() {\n if (is.null(values$refresh_devices)) {\n module_message(ns, \"debug\", \"initializing devices\")\n values$refresh_devices <- 1\n }\n }\n\n refresh_devices <- function() {\n if (!is.null(values$refresh_devices)) {\n module_message(ns, \"debug\", \"setting devices refresh flag\")\n values$refresh_devices <- values$refresh_devices + 1\n links$refresh_links()\n }\n }\n\n select_devices <- function(device_ids) {\n if (!identical(values$selected_device_ids, device_ids)) {\n module_message(\n ns, \"debug\",\n glue::glue(\n \"selecting device ids \",\n if(!is.null(device_ids)) paste(device_ids, collapse = ', ')\n else \"'none'\")\n )\n values$selected_device_ids <- device_ids\n }\n }\n\n # get_devices_experiment_links -----\n get_devices_experiments_links <- reactive({\n if (length(values$selected_device_ids) > 0) {\n # make sure links are initialized\n links$init_links()\n if (nrow(links$get_links() > 0)) {\n device_links <- links$get_links() %>%\n filter(device_id %in% !!values$selected_device_ids & active)\n module_message(\n ns, \"debug\",\n glue::glue(\"found {nrow(device_links)} device links for device ids \",\n paste(values$selected_device_ids, collapse = \", \"))\n )\n return(device_links)\n }\n }\n return(tibble())\n })\n\n # devicesDataServer functions ====\n list(\n get_devices = get_devices,\n init_devices = init_devices,\n refresh_devices = refresh_devices,\n select_devices = select_devices,\n get_selected_devices = reactive({values$selected_device_ids}),\n get_devices_experiments_links = get_devices_experiments_links\n )\n}\n\n# LOGS ======\n\n# FIXME: this should just be logsDataServer because it handles data and state logs\ndatalogsDataServer <- function(input, output, session, experiments, devices, group_id, access_token, pool, timezone) {\n\n # namespace\n ns <- session$ns\n\n # reactive values\n values <- reactiveValues(\n refresh_device_state_logs = NULL,\n refresh_experiment_device_state_logs = NULL,\n refresh_experiments_data_logs = NULL,\n refresh_experiment_data_logs = NULL\n )\n\n # main getter function for device state logs\n get_device_state_logs <- function(device_ids) {\n if (length(device_ids) > 0) {\n filter_quo <- quo(device_id %in% !!device_ids)\n state_logs <- withProgress(\n message = 'Fetching device state logs', detail = \"Querying database...\", value = 0.5,\n ll_get_device_state_logs(\n group_id = group_id,\n filter = !!filter_quo,\n con = pool,\n convert_to_TZ = timezone\n )\n )\n return(state_logs)\n } else {\n return(data_frame())\n }\n }\n\n refresh_state_logs <- function() {\n values$refresh_device_state_logs <-\n if(is.null(values$refresh_device_state_logs)) 1\n else values$refresh_device_state_logs + 1\n }\n\n refresh_experiment_state_logs <- function() {\n values$refresh_experiment_device_state_logs <-\n if(is.null(values$refresh_experiment_device_state_logs)) 1\n else values$refresh_experiment_device_state_logs + 1\n }\n\n # main getter function for device data logs\n get_device_data_logs <- function(exp_ids, device_ids = NULL) {\n if (length(exp_ids) > 0 && (is.null(device_ids) || length(device_ids) > 0)) {\n data_logs <- withProgress(\n message = 'Fetching device data logs', detail = \"Querying database...\", value = 0.5,\n ll_get_exp_device_data_logs(\n exp_id = exp_ids,\n group_id = group_id,\n con = pool,\n convert_to_TZ = timezone\n )\n )\n if (!is.null(device_ids)) data_logs <- filter(data_logs, device_id %in% device_ids)\n return(data_logs)\n } else {\n return(data_frame())\n }\n }\n\n refresh_data_logs <- function() {\n values$refresh_experiments_data_logs <-\n if(is.null(values$refresh_experiments_data_logs)) 1\n else values$refresh_experiments_data_logs + 1\n }\n\n refresh_experiment_data_logs <- function() {\n values$refresh_experiment_data_logs <-\n if(is.null(values$refresh_experiment_data_logs)) 1\n else values$refresh_experiment_data_logs + 1\n }\n\n # datalogsDataServer functions =====\n list(\n # state logs\n get_devices_state_logs = eventReactive(\n values$refresh_device_state_logs,\n get_device_state_logs(devices$get_selected_devices())\n ),\n refresh_state_logs = refresh_state_logs,\n get_experiment_devices_state_logs = eventReactive(\n values$refresh_experiment_device_state_logs,\n get_device_state_logs(experiments$get_loaded_experiment_device_ids())\n ),\n refresh_experiment_state_logs = refresh_experiment_state_logs,\n # multi experiments data logs\n get_devices_experiments_data_logs = eventReactive( # Q: is this function used anywhere?\n values$refresh_experiments_data_logs,\n get_device_data_logs(experiments$get_selected_experiments(), devices$get_selected_devices())),\n get_experiments_data_logs = eventReactive(\n values$refresh_experiments_data_logs,\n get_device_data_logs(experiments$get_selected_experiments())),\n refresh_data_logs = refresh_data_logs,\n # single experiment data logs\n get_experiment_data_logs = eventReactive(\n {\n # trigger either if new experiment is loaded or the refresh event happens\n #experiments$get_loaded_experiment()\n values$refresh_experiment_data_logs\n },\n get_device_data_logs(experiments$get_loaded_experiment())),\n refresh_experiment_data_logs = refresh_experiment_data_logs\n )\n}\n\n# CLOUD =====\n\ncloudInfoDataServer <- function(input, output, session, experiments, devices, links, group_id, access_token, pool, timezone) {\n\n # namespace\n ns <- session$ns\n\n # reactive values\n values <- reactiveValues(\n refresh_cloud_state = NULL,\n refresh_cloud_data = NULL,\n refresh_cloud_info = NULL\n )\n\n # CLOUD STATE ----\n get_cloud_state <- function(devices, device_ids = devices$device_id) {\n withProgress(\n message = 'Fetching device cloud state', detail = \"Querying device cloud...\", value = 0.5,\n devices %>%\n filter(device_id %in% !!device_ids) %>%\n ll_get_devices_cloud_state(access_token = access_token, convert_to_TZ = timezone, spread = TRUE)\n )\n }\n\n # get_devices_cloud_state -----\n get_devices_cloud_state <- eventReactive(values$refresh_cloud_state, {\n validate(\n need(!is.null(devices$get_devices()), \"No devices available.\") %then%\n need(length(devices$get_selected_devices()) > 0, \"No device selected.\")\n )\n module_message(ns, \"debug\", \"fetching devices cloud state\")\n get_cloud_state(devices$get_devices(), devices$get_selected_devices())\n })\n\n # get_exp_devices_cloud_state ----\n get_exp_devices_cloud_state <- eventReactive(values$refresh_cloud_state, {\n validate(\n need(length(experiments$get_loaded_experiment_device_ids()) > 0, \"This experiment has no associated devices yet.\")\n )\n module_message(ns, \"debug\", \"fetching experiment devices cloud state\")\n get_cloud_state(experiments$get_loaded_experiment_devices())\n })\n\n refresh_cloud_state <- function() {\n values$refresh_cloud_state <- if(is.null(values$refresh_cloud_state)) 1 else values$refresh_cloud_state + 1\n }\n\n # CLOUD DATA =====\n get_cloud_data <- function(devices, device_ids = devices$device_id, links, linked, unlinked) {\n data <- withProgress(\n message = 'Fetching device cloud data', detail = \"Querying device cloud...\", value = 0.5,\n devices %>%\n filter(device_id %in% !!device_ids) %>%\n ll_get_devices_cloud_data(access_token = access_token, convert_to_TZ = timezone)\n )\n ll_summarize_cloud_data_experiment_links(\n cloud_data = data, experiment_device_links = links,\n linked = linked, unlinked = unlinked) %>%\n mutate(..id.. = paste(device_id, idx, sep = \"_\")) %>%\n arrange(device_name, idx)\n }\n\n # get_devices_cloud_data ----\n get_devices_cloud_data <- eventReactive(values$refresh_cloud_data, {\n validate(\n need(!is.null(devices$get_devices()), \"No devices available.\") %then%\n need(length(devices$get_selected_devices()) > 0, \"No device selected.\")\n )\n # get device cloud data\n module_message(ns, \"debug\", \"fetching device cloud data\")\n get_cloud_data(\n devices = devices$get_devices(),\n device_ids = devices$get_selected_devices(),\n links = devices$get_devices_experiments_links(),\n linked = TRUE, unlinked = TRUE\n )\n })\n\n # get_exp_devices_cloud_data ----\n get_exp_devices_cloud_data <- eventReactive(values$refresh_cloud_data, {\n validate(\n need(!is.null(experiments$get_loaded_experiment_devices()), \"No devices available.\") %then%\n need(!is.null(experiments$get_loaded_experiment_device_links_all()), \"No device links available.\")\n )\n module_message(ns, \"debug\", \"fetching experiment devices cloud data\")\n get_cloud_data(\n devices = experiments$get_loaded_experiment_devices(),\n links = experiments$get_loaded_experiment_device_links_all(),\n linked = TRUE, unlinked = FALSE\n )\n })\n\n # has_cloud_data ----\n has_cloud_data <- reactive({\n return(!is.null(values$refresh_cloud_data))\n })\n\n refresh_cloud_data <- function() {\n values$refresh_cloud_data <- if(is.null(values$refresh_cloud_data)) 1 else values$refresh_cloud_data + 1\n }\n\n reset_cloud_data <- function() {\n values$refresh_cloud_data <- NULL\n }\n\n\n\n # CLOUD INFO ====\n get_cloud_info <- function(devices, device_ids = devices$device_id) {\n withProgress(\n message = 'Fetching device cloud info', detail = \"Querying device cloud...\", value = 0.5,\n devices %>%\n filter(device_id %in% !!device_ids) %>%\n ll_get_devices_cloud_info(access_token = access_token, convert_to_TZ = timezone, include_unregistered = TRUE)\n )\n }\n\n # get_all_devices_cloud_info ----\n get_all_devices_cloud_info <- eventReactive(values$refresh_cloud_info, {\n validate(\n need(!is.null(devices$get_devices()), \"No devices available.\")\n )\n module_message(ns, \"debug\", \"fetching cloud info for all devices\")\n get_cloud_info(devices$get_devices())\n })\n\n # get_devices_cloud_info ----\n get_devices_cloud_info <- eventReactive(values$refresh_cloud_info, {\n validate(\n need(!is.null(devices$get_devices()), \"No devices available.\") %then%\n need(length(devices$get_selected_devices()) > 0, \"No device selected.\")\n )\n module_message(ns, \"debug\", \"fetching cloud info for devices\")\n get_cloud_info(devices$get_devices(), devices$get_selected_devices())\n })\n\n # get_exp_devices_cloud_info -----\n get_exp_devices_cloud_info <- eventReactive(values$refresh_cloud_info, {\n validate(\n need(length(experiments$get_loaded_experiment_device_ids()) > 0, \"This experiment has no associated devices yet.\")\n )\n module_message(ns, \"debug\", \"fetching cloud info experiment devices\")\n get_cloud_info(experiments$get_loaded_experiment_devices())\n })\n\n refresh_cloud_info <- function() {\n values$refresh_cloud_info <- if(is.null(values$refresh_cloud_info)) 1 else values$refresh_cloud_info + 1\n }\n\n # ALL CLOUD FUNCTIONS ======\n list(\n # devices cloud state\n get_devices_cloud_state = get_devices_cloud_state,\n get_exp_devices_cloud_state = get_exp_devices_cloud_state,\n refresh_cloud_state = refresh_cloud_state,\n # devices cloud data\n get_cloud_data = get_cloud_data,\n get_devices_cloud_data = get_devices_cloud_data,\n get_exp_devices_cloud_data = get_exp_devices_cloud_data,\n refresh_cloud_data = refresh_cloud_data,\n reset_cloud_data = reset_cloud_data,\n has_cloud_data = has_cloud_data,\n # devices cloud info\n get_all_devices_cloud_info = get_all_devices_cloud_info,\n get_devices_cloud_info = get_devices_cloud_info,\n get_exp_devices_cloud_info = get_exp_devices_cloud_info,\n refresh_cloud_info = refresh_cloud_info\n )\n\n\n}\n"
},
{
"alpha_fraction": 0.7857142686843872,
"alphanum_fraction": 0.7857142686843872,
"avg_line_length": 17.66666603088379,
"blob_id": "ea8f3fa51c59459fca937a8edee553592ffa3818",
"content_id": "feabf5c329f861d8991e60f785e66274fddbabdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 3,
"path": "/README.md",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# lablogger\n\nLab Logger command line interface and GUI.\n"
},
{
"alpha_fraction": 0.6216962337493896,
"alphanum_fraction": 0.6240631341934204,
"avg_line_length": 40.38775634765625,
"blob_id": "fa9abf658df333b32427ba6723aa5d8e60dd8c05",
"content_id": "be4c5d848c8454c07963cdeea5fa6d48a90d80ff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 20280,
"license_type": "no_license",
"max_line_length": 271,
"num_lines": 490,
"path": "/R/particle_cloud.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# functions to interact with the particle cloud variables --------\n\n# helper function to make a particle cloud request\n# @param endpoint the url endpoint for the request, typically devices/<device_id>/variable or devices/<device_id>/function\n# @param arg request argument --> required for function calls! even if just \\code{character(0)}\n# @param timeout how long to wait for curl request\n# @param nr which request this is (purely for info messages)\n# @param total total number of requests (purely for info messages)\n# @note consider implementing an asynchronious version with curl_fetch_multi (if possible to integrate well into shiny app.)\nmake_particle_cloud_request <- function(endpoint, arg = NULL, nr = NULL, total = NULL, timeout = default(request_timeout), access_token = default(access_token), quiet = default(quiet)) {\n\n # safety checks\n if (nchar(access_token) == 0) stop(\"missing access token\", call. = FALSE)\n\n # request\n handle <- new_handle(timeout = timeout)\n if (is.null(arg)) {\n request <- sprintf(\"https://api.particle.io/v1/%s?access_token=%s\", endpoint, access_token)\n } else if (is.character(arg) && length(arg) <= 1) {\n request <- sprintf(\"https://api.particle.io/v1/%s\", endpoint)\n post <- sprintf(\"access_token=%s&arg=%s\", access_token, utils::URLencode(arg))\n } else {\n stop(\"something amiss with arg: \", arg, call. = FALSE)\n }\n\n if (!quiet) {\n glue(\"\\nInfo: making cloud request \",\n if(!is.null(nr) && !is.null(total)) \"{nr}/{total} \" else \"\",\n \"('{endpoint}'\",\n if(!is.null(arg)) \" with arg '{arg}'\" else \"\",\n \")... \") %>%\n message(appendLF = FALSE)\n }\n\n # generate curl handle\n result <-\n tryCatch(\n handle %>%\n {\n # POST?\n if (!is.null(arg)) handle_setopt(., copypostfields = post)\n else .\n } %>%\n # make request\n curl_fetch_memory(request, handle = .) %>%\n { rawToChar(.$content) } %>%\n fromJSON(),\n error = function(e) {\n if (str_detect(e$message, \":\")) {\n return(list(error = str_extract(e$message, \"^[^:]*\"), error_details = str_extract(e$message, \"[^:\\\\s][^:]*$\")))\n } else {\n return(list(error = e$message))\n }\n }\n )\n\n if (!is.null(result$error)) {\n if (!quiet) glue(\"failed.\") %>% message()\n glue(\"encountered the following error: {result$error}\") %>%\n warning(immediate. = TRUE, call. = FALSE)\n } else if (!quiet) {\n glue(\"successful.\") %>% message()\n }\n\n return(result)\n}\n\n#' Get general device information\n#'\n#' Get information from the particle cloud about devices.\n#'\n#' @param devices data frame with devices for which to get cloud info, by default all devices associated with the group\n#' @param include_unregistered whether to provide cloud info for devices that are not registered in the database (no by default)\n#' @param particle_id the ID(s) of the particle device(s)\n#' @param access_token the access token for the accout\n#' @return nested data frame (converted from JSON)\n# @ note: consider making a function to udpate particle ids in the DB from here (overkill? since state/data logs cause update too)\n#' @export\nll_get_devices_cloud_info <- function(devices = ll_get_devices(group_id = group_id, con = con), include_unregistered = FALSE, group_id = default(group_id), con = default(con), access_token = default(access_token), convert_to_TZ = Sys.timezone(), quiet = default(quiet)) {\n\n # safety checks\n if (!is.data.frame(devices) | !all(c(\"particle_id\", \"device_name\") %in% names(devices)))\n stop(\"devices needs to be supplied as a data frame with columns (at the least) 'particle_id' and 'device_name'\", call. = FALSE)\n\n # request general info\n info <- make_particle_cloud_request(\n endpoint = \"devices\",\n access_token = access_token,\n quiet = quiet\n )\n\n if (is.data.frame(info) && nrow(info) > 0) {\n if (include_unregistered)\n devices <- devices %>% full_join(info, by = c(\"device_name\" = \"name\"))\n else\n devices <- devices %>% left_join(info, by = c(\"device_name\" = \"name\"))\n\n # check for missing particle IDs\n probs <- filter(devices, !is.na(device_id), !is.na(id), is.na(particle_id) | particle_id != id)\n if (nrow(probs) > 0) {\n glue::glue(\n \"some registered devices ({paste(probs$device_name, collapse = ', ')}) \",\n \"particle ids are not yet updated in the database and will be updated now...\") %>%\n warning(immediate. = TRUE, call. = FALSE)\n # NOTE: this will NOT automatically update the devices in the data manager which\n # requires a \"Refresh\", but particle_id updates should happen rarely enough that we're\n # just not going to worry about it, even if this means that this update may run\n # multiple times (unnecessarily) until devices are refreshed\n devices <- update_device_particle_id(devices, con = con, quiet = quiet)\n }\n\n # check for registered devices not listed in the cloud\n probs <- filter(devices, !is.na(device_id), is.na(id))\n if (nrow(probs) > 0) {\n glue::glue(\n \"some registered devices ({paste(probs$device_name, collapse = ', ')}) \",\n \"do not seem to exist in the cloud\") %>%\n warning(immediate. = TRUE, call. = FALSE)\n }\n\n devices <- devices %>% select(-id) %>%\n mutate(registered = !is.na(device_id))\n\n # time zone\n devices <- devices %>% mutate(last_heard = ymd_hms(last_heard)) %>%\n {\n if (!is.null(convert_to_TZ)) mutate(., last_heard = with_tz(last_heard, convert_to_TZ))\n else .\n }\n } else {\n warning(\"no information retrieved from cloud\", immediate. = TRUE, call. = FALSE)\n }\n\n return(devices)\n}\n\n# helper function for cloud variable request\nget_devices_cloud_variable <- function(devices, variable, access_token = default(access_token), quiet = default(quiet)) {\n # safety checks\n if (!is.data.frame(devices) | !all(c(\"particle_id\", \"device_name\") %in% names(devices)))\n stop(\"devices needs to be supplied as a data frame with columns (at the least) 'particle_id' and 'device_name'\", call. = FALSE)\n\n # request state\n ..device_variable.. <- variable\n devices %>%\n mutate(\n lists = map2(\n particle_id, dplyr::row_number(),\n ~make_particle_cloud_request(\n endpoint = sprintf(\"devices/%s/%s\", .x, ..device_variable..),\n nr = .y,\n total = nrow(devices),\n access_token = access_token,\n quiet = quiet\n )\n )\n ) %>%\n unpack_lists_data_frame(unnest_single_values = TRUE, unpack_sub_lists = TRUE, nest_into_data_frame = FALSE)\n}\n\n# helper function to unpack cloud variable result\nunpack_cloud_variable_result <- function(var_data, data_column, renames = c(), convert_to_TZ = Sys.timezone(), spread_function = NULL) {\n\n var_data <- mutate(var_data, ..rowid.. = dplyr::row_number())\n data_column_quo <- enquo(data_column)\n\n # unpack state data\n if (nrow(var_data) > 0 && \"result\" %in% names(var_data)) {\n var_data_unpacked <-\n var_data %>%\n select(..rowid.., result) %>%\n mutate(result = map(result, ~if(!is.na(.x)) {\n tryCatch(fromJSON (fix_truncated_JSON(.x)), error = function(e) { warning(\"problems parsing JSON - \", e$message, immediate. = TRUE, call. = FALSE); list() })\n } else list())) %>%\n unpack_lists_data_frame(result)\n\n # check if there is any data\n if (quo_text(data_column_quo) %in% names(var_data_unpacked)) {\n var_data_unpacked <- var_data_unpacked %>% unnest(!!data_column_quo)\n\n # only rename what exists\n renames <- renames[unname(renames) %in% names(var_data_unpacked)]\n\n if (nrow(var_data_unpacked) > 0) {\n var_data_unpacked <-\n var_data_unpacked %>%\n rename(!!!renames) %>%\n mutate(datetime = ymd_hms(datetime)) %>%\n {\n if (!is.null(convert_to_TZ)) mutate(., datetime = with_tz(datetime, convert_to_TZ))\n else .\n }\n\n if (!is.null(spread_function)) {\n var_data_unpacked <- spread_function(var_data_unpacked)\n }\n\n }\n\n var_data <- left_join(var_data %>% select(-result), var_data_unpacked, by = \"..rowid..\")\n }\n }\n\n return (select(var_data, -..rowid..))\n}\n\n# helper function for truncated JSON\n# closes unclosed \\\", ] and } as well as removes terminal ,\n# note\nfix_truncated_JSON <- function(json) {\n\n # close quotes if it's an odd number of quotes\n if (stringr::str_count(json, \"\\\\\\\"\") %% 2 > 0)\n json <- paste0(json, \"\\\"\")\n # make sure it doesn't end on a comma that doesn't have any follow-up\n else if (stringr::str_sub(json, -1) == \",\")\n json <- stringr::str_sub(json, 1, -2)\n # make sure doesn't end on a key without a value\n else if (stringr::str_detect(json, \",?\\\"[^\\\"]+\\\":?$\"))\n json <- stringr::str_replace(json, \",?\\\"[^\\\"]+\\\":?$\", \"\")\n\n # close missing parentheses\n open_brackets <- stringr::str_extract_all(json, \"[\\\\{\\\\[]\")[[1]]\n close_brackets <- stringr::str_extract_all(json, \"[\\\\}\\\\]]\")[[1]]\n if (length(open_brackets) > length(close_brackets)) {\n missing_brackets <- open_brackets[1:(length(open_brackets) - length(close_brackets))]\n matching_brackets <- c(\"{\" = \"}\", \"[\" = \"]\")\n json <- paste0(json, paste(matching_brackets[rev(missing_brackets)], collapse = \"\"))\n }\n test <<- json\n return(json)\n}\n\n#' Get device state\n#'\n#' Get state from the particle cloud for devices.\n#' @inheritParams ll_get_devices_cloud_info\n#' @inheritParams ll_get_device_state_logs\n#' @param spread whether to convert the state data into wide format (note that this combines value and units columns!)\n#' @return nested data frame (converted from JSON)\nll_get_devices_cloud_state <-\n function(devices = ll_get_devices(group_id = group_id, con = con),\n group_id = default(group_id),\n con = default(con),\n access_token = default(access_token),\n convert_to_TZ = Sys.timezone(),\n spread = FALSE,\n quiet = default(quiet)) {\n\n if (nrow(devices) == 0) return(data_frame())\n\n devices %>%\n # request state info\n get_devices_cloud_variable(\n variable = \"state\",\n access_token = access_token,\n quiet = quiet\n ) %>%\n # unpack state data\n unpack_cloud_variable_result(\n data_column = s,\n renames = c(datetime = \"dt\", key = \"k\", value = \"v\", units = \"u\"),\n convert_to_TZ = convert_to_TZ,\n spread_function = if (spread) spread_state_columns else NULL\n )\n }\n\n#' Get device data\n#'\n#' Get latest data from the particle cloud for devices.\n#' @inheritParams ll_get_devices_cloud_info\n#' @inheritParams ll_get_device_state_logs\n#' @param spread whether to convert the state data into wide format (note that this combines key and index, as well as, value and units columns!)\n#' @return nested data frame (converted from JSON)\nll_get_devices_cloud_data <-\n function(devices = ll_get_devices(group_id = group_id, con = con),\n group_id = default(group_id),\n con = default(con),\n access_token = default(access_token),\n convert_to_TZ = Sys.timezone(),\n spread = FALSE,\n quiet = default(quiet)) {\n\n if (nrow(devices) == 0) return(data_frame())\n\n devices %>%\n # request live data info\n get_devices_cloud_variable(\n variable = \"data\",\n access_token = access_token,\n quiet = quiet\n ) %>%\n # unpack live data\n unpack_cloud_variable_result(\n data_column = d,\n renames = c(datetime = \"dt\", idx = \"i\", key = \"k\", value = \"v\", units = \"u\"),\n convert_to_TZ = convert_to_TZ,\n spread_function = if (spread) spread_data_columns else NULL\n ) %>%\n # add missing error\n { if (!\"error\" %in% names(.)) mutate(., error = NA_character_) else . } %>%\n # add missing datetime\n { if (!\"datetime\" %in% names(.)) mutate(., datetime = NA_real_) else . } %>%\n # rename raw data\n { if (\"r\" %in% names(.)) rename(., raw_serial = r) else mutate(., raw_serial = NA_character_) } %>%\n { if (\"e\" %in% names(.)) rename(., raw_serial_errors = e) else mutate(., raw_serial_errors = NA_character_) } %>%\n # add missing data fields\n { if (!spread && !\"idx\" %in% names(.)) mutate(., idx = NA_integer_) else . } %>%\n { if (!spread && !\"key\" %in% names(.)) mutate(., key = NA_character_) else . } %>%\n { if (!spread && !\"value\" %in% names(.)) mutate(., value = NA_real_) else . } %>%\n { if (!spread && !\"units\" %in% names(.)) mutate(., units = NA_character_) else . } %>%\n # arrange\n { if (spread) arrange(., device_name) else arrange(., device_name, idx) }\n }\n\n#' Test which values one gets for a set of experiment devices\n#' @param experiment_device_links experiment_device_links records, see \\link{ll_get_experiment_device_links}\nll_test_experiment_device_links <- function(experiment_device_links, spread = FALSE, access_token = default(access_token), quiet = default(quiet)) {\n\n if (!\"particle_id\" %in% names(experiment_device_links))\n stop(\"particle_id is a required column in experiment_device_links data frame\", call. = FALSE)\n if (!\"device_name\" %in% names(experiment_device_links))\n stop(\"device_name is a required column in experiment_device_links data frame\", call. = FALSE)\n if (!\"data_idx\" %in% names(experiment_device_links))\n stop(\"data_idx is a required column in experiment_device_links data frame\", call. = FALSE)\n\n data <- ll_get_devices_cloud_data(devices = experiment_device_links %>% select(particle_id, device_name) %>% unique(), spread = FALSE)\n if (nrow(data) > 0) {\n data <- select(data, particle_id, device_name, datetime, raw_serial, raw_serial_errors, idx, key, value, units)\n experiment_device_links %>%\n rename(idx = data_idx) %>%\n left_join(data, by = c(\"particle_id\", \"device_name\", \"idx\")) %>%\n { if (spread) spread_data_columns(.) else . }\n } else {\n experiment_device_links\n }\n}\n\n#' Cloud data / experiment links summary\n#'\n#' Utility function to combine experimental device links with devices cloud data. Will join by device_name, device_id or both, depending on which of these are in both the cloud_data and experiment_device_links tables.\n#'\n#' @param experiment_device_links the links between devices and experiments, see \\link{ll_get_experiment_device_links}\n#' @param linked whether to include linked data\n#' @param unlinked whether to include unlinked data\n#' @export\nll_summarize_cloud_data_experiment_links <- function(\n cloud_data = tibble(),\n experiment_device_links = tibble(),\n linked = TRUE, unlinked = TRUE) {\n\n # remove exp_device_data_ids because they interfere with summarizing, and particle_id if it exists because we want it from the cloud data instead\n experiment_device_links <- experiment_device_links[!names(experiment_device_links) %in% c(\"exp_device_data_id\", \"particle_id\")]\n\n # make sure empty cloud data or device links have the necessary columns\n if (nrow(experiment_device_links) == 0) {\n experiment_device_links <- tibble(\n exp_id = character(), recording = logical(),\n device_id = integer(), device_name = character(),\n data_idx = integer(), active = logical())\n }\n experiment_device_links <- rename(experiment_device_links, idx = data_idx)\n\n if (nrow(cloud_data) == 0) {\n cloud_data <- tibble(\n particle_id = character(), device_id = integer(), device_name = character(),\n datetime = as.POSIXct(numeric(), origin = \"1960-01-01\"),\n error = character(),\n raw_serial = character(), raw_serial_errors = character(),\n idx = integer(), key = character(), value = character(), units = character()\n )\n }\n\n # figure out join by\n join_by <- c()\n if (\"device_id\" %in% names(experiment_device_links) && \"device_id\" %in% names(cloud_data))\n join_by <- c(join_by, \"device_id\")\n if (\"device_name\" %in% names(experiment_device_links) && \"device_name\" %in% names(cloud_data))\n join_by <- c(join_by, \"device_name\")\n\n if (length(join_by) == 0) stop(\"neither device_id nor device_name\", call. = TRUE)\n\n cloud_data <- cloud_data %>%\n select(particle_id, !!join_by, device_name, datetime, error, raw_serial, raw_serial_errors, idx, key, value, units)\n\n full_join(\n cloud_data %>% filter(is.na(error)) %>% select(-error),\n experiment_device_links %>% filter(active),\n by = c(join_by, \"idx\")) %>%\n # add error info from cloud data to the existing links\n left_join(cloud_data %>% filter(!is.na(error)) %>% select(!!join_by, error), by = join_by) %>%\n # add info from cloud data for devices that have no existing links at all\n {\n bind_rows(., dplyr::anti_join(filter(cloud_data, !is.na(error)), ., by = join_by))\n } %>%\n filter(linked & !is.na(exp_id) | unlinked & is.na(exp_id)) %>%\n nest(exp_id, recording, .key = ..exp_data..) %>%\n mutate(\n recording_exp_ids = map_chr(..exp_data.., ~filter(.x, recording)$exp_id %>% { if(length(.) > 0) glue::glue_collapse(., sep = \", \") else NA_character_ }),\n non_recording_exp_ids = map_chr(..exp_data.., ~filter(.x, !recording)$exp_id %>% { if(length(.) > 0) glue::glue_collapse(., sep = \", \") else NA_character_ })\n ) %>%\n select(-..exp_data..) %>%\n select(particle_id, !!join_by, datetime, error, everything())\n}\n\n# functions to interact with particle cloud commands =====\n\n# helper function for cloud function calls\ncall_devices_cloud_function <- function(devices, func = \"device\", arg = \"\", access_token = default(access_token), quiet = default(quiet)) {\n # safety checks\n if (!is.data.frame(devices) | !all(c(\"particle_id\", \"device_name\") %in% names(devices)))\n stop(\"devices needs to be supplied as a data frame with columns (at the least) 'particle_id' and 'device_name'\", call. = FALSE)\n\n # request state\n devices %>%\n mutate(\n lists = map2(\n particle_id, dplyr::row_number(),\n ~make_particle_cloud_request(\n endpoint = sprintf(\"devices/%s/%s\", .x, !!func),\n arg = !!arg,\n nr = .y,\n total = nrow(devices),\n access_token = access_token,\n quiet = quiet\n )\n )\n ) %>%\n unpack_lists_data_frame(unnest_single_values = TRUE, unpack_sub_lists = TRUE, nest_into_data_frame = FALSE)\n}\n\n#' Send device commands to the cloud\n#'\n#' Send commands to one or more devices.\n#'\n#' @param devices data frame with devices for which to issue commands\n#' @param command to send\n#' @param message message to add to command\n#' @param access_token the access token for the accout\n#' @return nested data frame (converted from JSON)\n# @ note: consider making a function to udpate particle ids in the DB from here (overkill? since state/data logs cause update too)\n#' @export\nll_send_devices_command <- function(devices, command, message = \"\", access_token = default(access_token), quiet = default(quiet)) {\n\n if (nchar(message) > 0) command <- paste(command, message)\n\n # return codes\n return_codes <- get_device_command_return_values()\n\n # call cloud function\n devices %>%\n # add command\n mutate(\n command = !!command,\n ) %>%\n call_devices_cloud_function(\n func = \"device\", arg = command,\n access_token = access_token, quiet = quiet\n ) %>%\n # add missing error\n { if (!\"error\" %in% names(.)) mutate(., error = NA_character_) else . } %>%\n # add missing return value\n { if (!\"return_value\" %in% names(.)) mutate(., return_value = NA_integer_) else . } %>%\n # add return message\n mutate(\n return_message = case_when(\n is.na(return_value) ~ error,\n as.character(return_value) %in% names(return_codes) ~ return_codes[as.character(return_value)],\n return_value < 0 ~ \"Unknown error\",\n return_value > 0 ~ \"Unknown warning\",\n TRUE ~ \"Undefined behaviour\",\n ) %>% {\n ifelse(!is.na(return_value), str_c(., \" (code \", return_value, \")\"), .)\n }\n )\n}\n\n# helper function for device return values\nget_device_command_return_values <- function() {\n # details at https://github.com/KopfLab/labware_photon/blob/master/DeviceCommands.h\n c(\n `0` = \"Success\",\n `-1` = \"Undefined error\",\n `-2` = \"Device locked\",\n `-3` = \"Invalid command\",\n `-4` = \"Invalid command value\",\n `-5` = \"Invalid command units\",\n `1` = \"State already as requested\"\n )\n}\n"
},
{
"alpha_fraction": 0.6266912817955017,
"alphanum_fraction": 0.6297662854194641,
"avg_line_length": 26.559322357177734,
"blob_id": "32e31ed2ac7368ebcfde3b15c488a18a2b392825",
"content_id": "9ef02299c88e14c03a47d8d7a37136fbf3219e45",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1626,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 59,
"path": "/R/app_module_device_selector.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "deviceSelectorServer <- function(input, output, session, get_devices, get_selected_devices, refresh_devices, select_devices, access_token) {\n\n # namespace\n ns <- session$ns\n\n # values\n values <- reactiveValues(\n devices = NULL\n )\n\n # selector\n selector <- callModule(selectorTableServer, \"selector\",\n id_column = \"device_id\",\n column_select = c(Name = device_name, Type = device_type_desc))\n\n # update data\n observe({\n req(df <- get_devices())\n isolate({\n if (nrow(df) > 0) {\n module_message(ns, \"debug\", \"setting device selection table\")\n df <- select(df, device_id, device_name, device_type_desc)\n selector$set_table(df)\n }\n })\n })\n\n # update selected\n observe({\n selected_devices <- get_selected_devices()\n selector$set_selected(selected_devices)\n })\n\n # trigger refresh\n observeEvent(input$device_refresh, refresh_devices())\n\n # trigger select\n observe(select_devices(selector$get_selected()))\n\n # device control ====\n control <- callModule(deviceCommandsServer, \"control\", get_devices = get_devices, access_token = access_token)\n\n}\n\ndeviceSelectorUI <- function(id, width = 12, add_footer = tagList()) {\n ns <- NS(id)\n default_box(\n title = \"Devices\", width = width,\n selectorTableUI(ns(\"selector\")),\n footer = div(\n tooltipInput(actionButton, ns(\"device_refresh\"), label = \"Refresh\", icon = icon(\"refresh\"), tooltip = \"Refresh devices.\"),\n spaces(1),\n selectorTableButtons(ns(\"selector\")),\n spaces(1),\n deviceControlButton(ns(\"control\")),\n add_footer\n )\n )\n}\n"
},
{
"alpha_fraction": 0.5274431109428406,
"alphanum_fraction": 0.5435073375701904,
"avg_line_length": 42.94117736816406,
"blob_id": "c237ea9b3dbfa6024f8e0402f3e5eea4ae206a70",
"content_id": "b8749047e1520cde3fc35b06c2748b42997cb161",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 747,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 17,
"path": "/R/app_screen_cameras.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# RASPI CAMS ====\n# NOTE: needs proper implementation\n# output$raspicams <- renderUI({\n#\n# # refresh if refresh is pushed\n# input$refresh_cams\n#\n# # retrieve raspi camp ip addresses\n# boxes <- map2(values$cameras$label, values$cameras$ip,\n# function(label, ip) {\n# box(title = a(glue(\"{label} camera\"), href = glue(\"http://{ip}\"), target = \"_new\"), collapsible = TRUE, collapsed = FALSE,\n# width = 12, status = \"success\", solidHeader = TRUE,\n# tags$iframe(src=glue(\"http://{ip}\"), width = \"640px\", height = \"480px\", frameborder=\"0\")) %>%\n# column(width = 12)\n# })\n# return(tagAppendChildren(fluidRow(), list = boxes))\n# })\n"
},
{
"alpha_fraction": 0.5781336426734924,
"alphanum_fraction": 0.5847597718238831,
"avg_line_length": 31.339284896850586,
"blob_id": "b5e836ced6d828a4debf5cd42358674612944875",
"content_id": "45dc373570fc51aea013fc8ccce7238257ff7787",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1811,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 56,
"path": "/R/summary.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# identify outliers\nidentify_data_outliers <- function(device_data_logs) {\n if (nrow(device_data_logs) == 0) return(device_data_logs)\n\n identify_outliers <- function(x) {\n qnt <- quantile(x, probs = c(0.25, 0.75), na.rm = TRUE)\n H <- 1.5 * IQR(x, na.rm = TRUE)\n !between(x, qnt[1] - H, qnt[2] + H)\n }\n\n device_data_logs %>%\n group_by(exp_id, device_name, data_key) %>%\n mutate(\n outlier = identify_outliers(data_value)\n ) %>%\n ungroup()\n}\n\n#' Generate data log summary\n#' @export\nll_summarize_data_logs <- function(data_logs, exclude_outliers = FALSE, slope_denom_units = \"hour\") {\n\n # column names\n drift_col <- str_c(\"lin_drift_per_\", slope_denom_units)\n drift_se_col <- str_c(drift_col, \"_se\")\n drift_pval_col <- str_c(drift_col, \"_pval\")\n\n # calculate summary\n data_logs %>%\n ll_calculate_duration(slope_denom_units) %>%\n {\n if(exclude_outliers) filter(identify_data_outliers(.), !outlier)\n else .\n } %>%\n group_by(exp_id, device_name, data_key, data_units) %>%\n summarize(\n n = n(),\n mean = mean(data_value, na.rm = TRUE),\n sd = sd(data_value, na.rm = TRUE),\n fit = list(lm(data_value ~ duration))\n # not that informative\n #cor_obj = list(cor.test(data_value, duration)),\n #time_cor = map_dbl(cor_obj, ~.x$estimate),\n ) %>%\n mutate(\n est = map(fit, broom::tidy),\n !!drift_col := map_dbl(est, ~filter(.x, term == \"duration\")$estimate),\n !!drift_se_col := map_dbl(est, ~filter(.x, term == \"duration\")$std.error),\n drift_R2 = map_dbl(fit, ~glance(.x)$r.squared)\n # same as time cor pval\n #!!drift_pval_col := map_dbl(est, ~filter(.x, term == \"duration\")$p.value)\n ) %>%\n ungroup() %>%\n arrange(exp_id, data_key, device_name) %>%\n select(-fit, -est, -device_name)\n}\n"
},
{
"alpha_fraction": 0.6704375743865967,
"alphanum_fraction": 0.6711854934692383,
"avg_line_length": 43.381744384765625,
"blob_id": "570a874677844815bb84e8b5c5a58857d02b24ae",
"content_id": "e601215e0a54943170f16d0b7b6378a61e893b5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 10696,
"license_type": "no_license",
"max_line_length": 204,
"num_lines": 241,
"path": "/R/db_write.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# groups ======\n\n#' Add a new group record\n#' @param group_id the unique identifier/name of the group. Will error if id already exists.\n#' @param desc the description for the device group (optional)\n#' @export\nll_add_group <- function(group_id, desc = NA, con = default(con), quiet = default(quiet)) {\n con <- validate_db_connection(enquo(con))\n if (!quiet) glue(\"\\nInfo: add new group '{group_id}'... \") %>% message(appendLF = FALSE)\n data <- data_frame(group_id = group_id, group_desc = desc)\n run_insert_sql(data, \"groups\", con, quiet = quiet)\n return(invisible(data));\n}\n\n# devices ====\n\n#' Add new device\n#' @param device_name device name / id, must be unique for each group (will error if not)\n#' @param desc device description\n#' @param device_type_id device type name, must exist in database (will error if not)\n#' @param group_id group name, must exist in database (will error if not)\n#' @param particle_id optional, will be automatically filled in the first time the device logs to the database\n#' @param in_use whether device is in use (if not, cannot log any data)\n#' @export\nll_add_device <- function(device_name, desc = NA, device_type_id = \"undefined\", group_id = default(group_id), particle_id = NA, in_use = TRUE, con = default(con), quiet = default(quiet)) {\n con <- validate_db_connection(enquo(con))\n if (!quiet) glue(\"\\nInfo: add new device '{device_name}' for group '{group_id}'... \") %>%\n message(appendLF = FALSE)\n data <- data_frame(device_name, device_desc = desc, device_type_id, group_id, particle_id, in_use)\n run_insert_sql(data, \"devices\", con, quiet = quiet)\n return(invisible(data));\n}\n\n# update device particle IDs\n# @return the devices with updated particle id and a new column (updated_particle_id)\nupdate_device_particle_id <- function(devices, con, quiet) {\n\n # safety checks\n con <- validate_db_connection(enquo(con))\n stopifnot(all(c(\"device_id\", \"particle_id\", \"id\") %in% names(devices)))\n\n devices <- mutate(devices, updated_particle_id = !is.na(device_id) & !is.na(id) & (is.na(particle_id) | particle_id != id))\n if (!quiet) {\n glue::glue(\"Info: updating particle ID for {sum(devices$updated_particle_id)} devices... \") %>%\n message(appendLF = FALSE)\n }\n sql <-\n glue::glue(\"UPDATE devices AS t SET particle_id = new.particle_id \",\n \"FROM ( VALUES {df_to_sql(select(filter(devices, updated_particle_id), device_id, id))} ) \",\n \"AS new (device_id, particle_id) \",\n \"WHERE new.device_id = t.device_id\")\n result <- run_sql(sql, con)\n if (!quiet) {\n glue::glue(\n \"{result} records updated.\\n\",\n \" - {paste(with(filter(devices, updated_particle_id), paste(device_id, ':', particle_id, 'to', id)), collapse = '\\n - ')}\") %>%\n message()\n }\n\n # return updated devices\n devices %>%\n mutate(particle_id = ifelse(updated_particle_id, id, particle_id))\n}\n\n# experiments ====\n\n#' Add new experiment\n#' @param exp_id the unique identifier of the experiment (usually a few letter code). Will error if id already exists.\n#' @param exp_name longer name of the experiment\n#' @param desc device description\n#' @param device_type_id device type name, must exist in database (will error if not)\n#' @param group_id group name, must exist in database (will error if not)\n#' @param particle_id optional, will be automatically filled in the first time the device logs to the database\n#' @param in_use whether device is in use (if not, cannot log any data)\n#' @export\nll_add_experiment <- function(exp_id, exp_desc = NA, exp_notes = NA, group_id = default(group_id), con = default(con), quiet = default(quiet)) {\n con <- validate_db_connection(enquo(con))\n if (missing(exp_id)) stop(\"must supply an experiment id\", call. = FALSE)\n if (!quiet) glue(\"\\nInfo: add new experiment '{exp_id}' for group '{group_id}'... \") %>%\n message(appendLF = FALSE)\n data <- data_frame(exp_id, exp_desc, exp_notes, group_id, recording = FALSE)\n run_insert_sql(data, \"experiments\", con, quiet = quiet)\n return(invisible(data));\n}\n\n#' Update experiment information\n#' @inheritParams ll_add_experiment\n#' @return whether the update was successful or not\n#' @export\nll_update_experiment_info <- function(exp_id, exp_desc = NULL, exp_notes = NULL, group_id = default(group_id), con = default(con), quiet = default(quiet)) {\n con <- validate_db_connection(enquo(con))\n if (missing(exp_id)) stop(\"must supply an experiment id\", call. = FALSE)\n if (length(exp_id) != 1) stop(\"must provide only one exp id\", call. = FALSE)\n if (!quiet) glue(\"\\nInfo: updating info for experiment '{exp_id}'... \") %>%\n message(appendLF = FALSE)\n\n updates <- c(\n exp_desc = if (!is.null(exp_desc)) exp_desc,\n exp_notes = if (!is.null(exp_notes)) exp_notes\n )\n if(is.null(update) || length(update) == 0) stop(\"nothing to update\", call. = FALSE)\n\n sql <- glue::glue(\n \"UPDATE experiments SET {to_sql(updates, named = TRUE)} \",\n \"WHERE group_id = {to_sql(group_id)} AND exp_id = {to_sql(exp_id)}\")\n result <- run_sql(sql, con)\n\n if (!quiet) {\n if (result > 0) glue(\"{result} record updated.\") %>% message()\n else message(\"no records found, this experiment is not part of this group.\")\n }\n\n return(result == 1)\n}\n\n# TODO: rename to ll_add_experiment_device_links\n#' Link device to experiment\n#'\n#' Note that for the same device, experiment and index, only one record can exist. If the same one is added again, it will simply update the existing one to be active (in case it is not).\n#'\n#' @param exp_id the unique identifier of the experiment (usually a few letter code). Has to exist already in the data base.\n#' @param device_ids one or multiple device ids to add (by default is inferred from the device names)\n#' @param device_names names of the device(s) to link (define alternatively to the device id, will determine device_ids internally)\n#' @param data_idxs the data indices to map for the experiment, must be same length as device_names/device_ids\n#' @param experiment_devices alternatively provide experiment devices as a data frame right away\nll_add_experiment_devices <- function(\n exp_id,\n device_names,\n data_idxs,\n device_ids = ll_get_device_ids(device_names, quiet = quiet),\n experiment_devices = tibble(exp_id, device_id = device_ids, data_idx = data_idxs),\n group_id = default(group_id),\n con = default(con),\n quiet = default(quiet)) {\n\n if (!\"exp_id\" %in% names(experiment_devices)) stop(\"must supply an existing experiment id\", call. = FALSE)\n if (!\"device_id\" %in% names(experiment_devices)) stop(\"must supply device_id(s)\", call. = FALSE)\n if (!\"data_idx\" %in% names(experiment_devices) || !is.numeric(experiment_devices$data_idx))\n stop(\"must supply integer data indices\", call. = FALSE)\n\n # no duplicates\n experiment_devices <- unique(experiment_devices)\n\n if (!quiet) {\n device_info <- { if(!missing(device_names)) device_names else experiment_devices$device_id } %>%\n str_c(\" #\", experiment_devices$data_idx) %>% glue::glue_collapse(sep = \", \")\n glue(\"\\nInfo: linking device data ({device_info}) to experiment '{exp_id}'... \") %>%\n message(appendLF = FALSE)\n }\n run_insert_sql(\n experiment_devices, \"experiment_device_data\",\n # if record already exists, simply update it\n on_conflict_constraint = \"experiment_device_data_exp_id_device_id_data_idx_key\",\n on_conflict_do = \"UPDATE SET active = true\",\n con = con, quiet = quiet\n )\n return(invisible(experiment_devices));\n}\n\n#' Remove experiment device links\n#'\n#' Removes/deactivates the specified experiment device links. Removes those that don't already have data records associated with them and deactivates the others.\n#'\n#' @inheritParams ll_add_experiment_devices\n#' @param exp_id experiment id\n#' @param exp_device_data_ids exp_device_data_ids\nll_remove_experiment_device_links <- function(exp_id, exp_device_data_ids, con = default(con), quiet = default(quiet)) {\n\n if (!quiet) {\n glue(\"\\nInfo: trying to remove (if not already used) or else deactivate \",\n \"{length(exp_device_data_ids)} link(s) for experiment '{exp_id}'... \") %>%\n message(appendLF = FALSE)\n }\n\n deactivated <-\n glue::glue(\n \"UPDATE experiment_device_data SET active = false WHERE \",\n \"exp_id = {to_sql(exp_id)} \",\n \"AND exp_device_data_id IN ({to_sql(exp_device_data_ids)})\") %>%\n run_sql(con)\n\n deleted <-\n glue::glue(\n \"DELETE FROM experiment_device_data WHERE \",\n \"exp_id = {to_sql(exp_id)} \",\n \"AND exp_device_data_id IN ({to_sql(exp_device_data_ids)}) \",\n \"AND NOT EXISTS (SELECT 1 FROM device_data_logs \",\n \"WHERE device_data_logs.exp_device_data_id = experiment_device_data.exp_device_data_id)\") %>%\n run_sql(con)\n\n if (!quiet) {\n glue::glue(\"{deleted} link(s) deleted, {deactivated - deleted} link(s) deactivated.\") %>%\n message()\n }\n\n return(invisible(NULL))\n}\n\n# helper function to start/stop experiment recording\nchange_experiment_recording <- function(exp_id, recording, group_id, con, quiet) {\n\n con <- validate_db_connection(enquo(con))\n\n if (!quiet) {\n glue(\"\\nInfo: trying to {if(recording) 'start' else 'stop'} recording for experiment '{exp_id}' (group '{group_id}')... \") %>%\n message(appendLF = FALSE)\n }\n\n result <-\n glue(\"UPDATE experiments SET recording = {if(recording) 'true' else 'false'}, last_recording_change={to_sql(format(now('UTC')))} WHERE group_id = {to_sql(group_id)} AND exp_id = {to_sql(exp_id)}\") %>%\n run_sql(con)\n if (!quiet) {\n if (result > 0) glue(\"{result} record updated.\") %>% message()\n else message(\"no records found, this experiment is not part of this group.\")\n }\n return(result)\n}\n\n#' Start recording for an experiment\n#' @inheritParams ll_add_experiment_devices\nll_experiment_start_recording <- function(exp_id, group_id = default(group_id), con = default(con), quiet = default(quiet)) {\n if (missing(exp_id)) stop(\"must supply an existing experiment id\", call. = FALSE)\n result <- data_frame(exp_id) %>%\n mutate(\n updated = map_int(exp_id, ~change_experiment_recording(.x, TRUE, group_id = group_id, con = con, quiet = quiet)),\n success = updated > 0\n )\n return(invisible(result))\n}\n\n#' stop recording for an experiment\n#' @inheritParams ll_experiment_stop_recording\nll_experiment_stop_recording <- function(exp_id, group_id = default(group_id), con = default(con), quiet = default(quiet)) {\n if (missing(exp_id)) stop(\"must supply an existing experiment id\", call. = FALSE)\n result <- data_frame(exp_id) %>%\n mutate(\n updated = map_int(exp_id, ~change_experiment_recording(.x, FALSE, group_id = group_id, con = con, quiet = quiet)),\n success = updated > 0\n )\n return(invisible(result))\n}\n"
},
{
"alpha_fraction": 0.5953671336174011,
"alphanum_fraction": 0.6052560210227966,
"avg_line_length": 33.643192291259766,
"blob_id": "e765fb757c7c6235cd6cc60422ee700805dbe6d8",
"content_id": "3cea603fcd86a9a3ba070b5d43b11b9728664190",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 7382,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 213,
"path": "/R/app_module_device_info.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "\n#' Device Info Server\ndeviceInfoServer <- function(input, output, session, get_cloud_state, refresh_cloud_state, get_cloud_data, refresh_cloud_data, get_cloud_info, refresh_cloud_info, get_device_ids, get_state_logs, refresh_state_logs) {\n\n\n # namespace\n ns <- session$ns\n\n # fetches =======\n\n # fetch state\n observeEvent(input$fetch_state, refresh_cloud_state())\n\n # fetch data\n observeEvent(input$fetch_data, refresh_cloud_data())\n\n # fetch info\n observeEvent(input$fetch_info, refresh_cloud_info())\n\n # fetch logs\n observeEvent(input$fetch_logs, refresh_state_logs())\n\n # fetch all\n observe({\n fetches <-\n (input$fetch_state_all %>% { if(is.null(.)) 0L else . }) +\n (input$fetch_data_all %>% { if(is.null(.)) 0L else . }) +\n (input$fetch_info_all %>% { if(is.null(.)) 0L else . }) +\n (input$fetch_logs_all %>% { if(is.null(.)) 0L else . })\n req(fetches > 0)\n module_message(ns, \"debug\", \"fetching all cloud and database info\")\n isolate({\n refresh_cloud_state()\n refresh_cloud_data()\n refresh_cloud_info()\n refresh_state_logs()\n })\n })\n\n # state logs table =====\n is_device_selected <- reactive(length(get_device_ids()) > 0)\n generate_state_logs_table <- eventReactive(get_state_logs(), {\n logs <- get_state_logs()\n validate(need(nrow(logs) > 0, \"No state logs available.\"))\n logs <- logs %>%\n select(log_datetime, everything()) %>%\n select(-device_id) %>%\n mutate(log_datetime = format(log_datetime, \"%Y-%m-%d %H:%M:%S\"))\n return(logs)\n })\n output$logs_table <- DT::renderDataTable({\n DT::datatable(\n generate_state_logs_table(),\n options = list(orderClasses = TRUE, order = list(1, \"desc\"),\n lengthMenu = c(5, 10, 25, 50, 100), pageLength = 10),\n filter = \"bottom\"\n )\n })\n\n # live state table ====\n output$state_table <- renderTable({\n state <- get_cloud_state()\n validate(need(nrow(state) > 0 && \"datetime\" %in% names(state), \"No live state information available.\"))\n module_message(ns, \"debug\", \"rendering cloud state table\")\n vars_start <- which(names(state) == \"version\")\n state %>% arrange(device_name) %>%\n mutate(datetime = format(datetime)) %>%\n select(Name = device_name, `Live state posted at` = datetime, Version = version, vars_start:ncol(state))\n }, striped = TRUE, spacing = 'xs', width = '100%', align = NULL)\n\n # live data table ======\n # this is only really used on devices screen, should probably be part of device_manager\n # like the experiment counterpart in experiment_manager!\n output$data_table <- renderTable({\n data <- get_cloud_data()\n validate(need(nrow(data) > 0, \"No live data available.\"))\n data <- data %>%\n mutate(datetime = ifelse(!is.na(datetime), format(datetime), error)) %>%\n select(Name = device_name, `Live data posted at` = datetime,\n `Exp IDs (recording)` = recording_exp_ids, `Exp IDs (not recording)` = non_recording_exp_ids,\n idx, key, value, units,\n raw_serial, raw_serial_errors)\n module_message(ns, \"debug\", \"rendering cloud data table\")\n return(apply_live_data_table_options(data))\n }, striped = TRUE, spacing = 'xs', width = '100%', align = NULL)\n\n # column options for live data table\n apply_live_data_table_options <- function(data) {\n if (!\"serial\" %in% input$data_table_options)\n data <- select(data, -raw_serial, -raw_serial_errors)\n if (!\"r_exps\" %in% input$data_table_options)\n data <- select(data, -`Exp IDs (recording)`)\n if (!\"nr_exps\" %in% input$data_table_options)\n data <- select(data, -`Exp IDs (not recording)`)\n return(data)\n }\n\n # live info table =====\n output$info_table <- renderTable({\n info <- get_cloud_info()\n validate(need(nrow(info) > 0, \"No device information available.\"))\n module_message(ns, \"debug\", \"rendering cloud info table\")\n info %>%\n # only show db-registered devices' cloud info\n filter(registered) %>%\n arrange(device_name) %>%\n mutate(last_heard = format(last_heard)) %>%\n select(Name = device_name, `Last heard from` = last_heard, Connected = connected, Status = status, Firmware = system_firmware_version)\n }, striped = TRUE, spacing = 'xs', width = '100%', align = NULL)\n\n # return functions\n list(\n trigger_live_data_table_options = reactive(input$data_table_options),\n apply_live_data_table_options = apply_live_data_table_options\n )\n\n}\n\ndeviceLogsUI <- function(id, width = 12) {\n\n ns <- NS(id)\n\n tagList(\n\n default_box(\n title = \"Device State Logs\", width = width,\n style = paste0(\"min-height: 300px;\"),\n DT::dataTableOutput(ns(\"logs_table\")) %>% withSpinner(type = 5, proxy.height = \"300px\"),\n footer = div(\n tooltipInput(actionButton, ns(\"fetch_logs\"), \"Fetch Logs\", icon = icon(\"cloud-download\"),\n tooltip = \"Fetch the most recent state logs from the data base.\")\n )\n )\n\n )\n\n}\n\n\ndeviceInfoUI <- function(id, width = 12) {\n\n ns <- NS(id)\n\n tagList(\n\n # live info\n default_box(\n title = \"Live Device Info\", width = width,\n style = paste0(\"min-height: 130px;\"),\n tableOutput(ns(\"info_table\")) %>% withSpinner(type = 5, proxy.height = \"130px\"),\n footer = div(\n tooltipInput(actionButton, ns(\"fetch_info\"), \"Fetch Info\", icon = icon(\"cloud-download\"),\n tooltip = \"Fetch the most recent device information from the cloud.\")\n )\n )\n\n )\n\n}\n\ndeviceStateUI <- function(id, width = 12) {\n ns <- NS(id)\n\n tagList(\n\n # live state\n default_box(\n style = paste0(\"min-height: 130px;\"),\n title = \"Live Device State\", width = width,\n tableOutput(ns(\"state_table\")) %>% withSpinner(type = 5, proxy.height = \"130px\"),\n footer =\n div(\n tooltipInput(actionButton, ns(\"fetch_state\"), \"Fetch State\", icon = icon(\"cloud-download\"),\n tooltip = \"Fetch the most recent state information from the cloud.\")\n )\n )\n )\n}\n\n# allow output as parameter to re-use this function for the experiment device data links\ndeviceDataUI <- function(\n id, width = 12, selected_options = c(\"r_exps\", \"serial\"),\n ns = NS(id),\n title = \"Live Device Data\",\n output = withSpinner(tableOutput(ns(\"data_table\")), type = 5, proxy.height = \"130px\"),\n add_footer = tagList()) {\n\n tagList(\n # live data\n default_box(\n title = title, width = width,\n style = paste0(\"min-height: 130px;\"),\n checkboxGroupInput(ns(\"data_table_options\"), NULL,\n c(\"Experiment Links (recording)\" = \"r_exps\",\n \"Experiment Links (not recording)\" = \"nr_exps\",\n \"Raw Serial Data\" = \"serial\"),\n selected = selected_options,\n inline = TRUE),\n output,\n footer = div(\n tooltipInput(actionButton, ns(\"fetch_data\"), \"Fetch Data\", icon = icon(\"cloud-download\"),\n tooltip = \"Fetch the most recent live data and experiment links from the cloud.\"),\n spaces(1),\n add_footer\n )\n )\n )\n}\n\ndeviceFetchAllUI <- function(id) {\n ns <- NS(id)\n tooltipInput(actionButton, ns(\"fetch_data_all\"), \"Fetch All (Data, State, Info, Logs)\", icon = icon(\"cloud-download\"),\n tooltip = \"Fetch all device information from the cloud and database.\")\n}\n\n\n"
},
{
"alpha_fraction": 0.6532033681869507,
"alphanum_fraction": 0.6545960903167725,
"avg_line_length": 32.78823471069336,
"blob_id": "fc8dea44182ad877cd9cdc52d6a8f298b4b07a70",
"content_id": "fdaa3d4d7a3878a37f4e8f21ca6ada46f13cc8c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2872,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 85,
"path": "/R/app_module_device_manager.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "deviceManagerServer <- function(input, output, session, dm_devices, dm_cloudinfo, dm_datalogs, access_token) {\n\n # namespace\n ns <- session$ns\n\n # add/register new device ===\n\n observeEvent(input$device_add, {\n module_message(ns, \"debug\", \"adding new devices is not implemented yet\")\n # # FIXME implement\n # # generate modal dialog with radio button selection of the device ypes\n # # make sure both devices and cloud info are up to date\n # dm_devices$refresh_devices()\n # dm_cloudinfo$refresh_cloud_info()\n # all_devices <- dm_cloudinfo$get_all_devices_cloud_info()\n # print(all_devices) # filter by !registered\n })\n\n observeEvent(input$device_inuse, {\n # FIXME implement\n module_message(ns, \"debug\", \"deactivating devices is not implemented yet\")\n })\n\n # devices selector ===\n\n devices <- callModule(\n deviceSelectorServer, \"devices\",\n get_devices = dm_devices$get_devices,\n get_selected_devices = dm_devices$get_selected_devices,\n refresh_devices = dm_devices$refresh_devices,\n select_devices = dm_devices$select_devices,\n access_token = access_token\n )\n\n # devices info ===\n\n deviceInfoServer <- callModule(\n deviceInfoServer, \"devices_info\",\n get_cloud_state = dm_cloudinfo$get_devices_cloud_state,\n refresh_cloud_state = dm_cloudinfo$refresh_cloud_state,\n get_cloud_data = dm_cloudinfo$get_devices_cloud_data,\n refresh_cloud_data = dm_cloudinfo$refresh_cloud_data,\n get_cloud_info = dm_cloudinfo$get_devices_cloud_info,\n refresh_cloud_info = dm_cloudinfo$refresh_cloud_info,\n get_device_ids = dm_devices$get_selected_devices,\n get_state_logs = dm_datalogs$get_devices_state_logs,\n refresh_state_logs = dm_datalogs$refresh_state_logs\n )\n\n}\n\n\n\ndeviceManagerUI <- function(id, width = 12) {\n\n ns <- NS(id)\n\n tagList(\n deviceSelectorUI(ns(\"devices\"), width = width,\n add_footer = tagList(\n spaces(1),\n tooltipInput(actionButton, ns(\"device_add\"), label = \"Add device\", icon = icon(\"plus-circle\"), tooltip = \"Register new device. NOT IMPLEMENETED YET\"),\n spaces(1),\n tooltipInput(actionButton, ns(\"device_unuse\"), label = \"Deactivate\", icon = icon(\"eye-slash\"),\n tooltip = \"Deactivate the selected device(s). Inactive devices do not record any data but are otherwise fully functional and can be reactivated using -Add device-. NOT IMPLEMENTED YET.\")\n )\n ),\n tabsetPanel(\n type = \"tabs\", # selected = \"data\",\n tabPanel(\n value = \"live\",\n \"Live Info\", br(),\n # fetch all is a bit confusing...\n deviceDataUI(ns(\"devices_info\"), selected_options = c(\"r_exps\")),\n deviceStateUI(ns(\"devices_info\")),\n deviceInfoUI(ns(\"devices_info\"))\n ),\n tabPanel(\n value = \"logs\",\n \"Logs\", br(),\n deviceLogsUI(ns(\"devices_info\"))\n )\n )\n )\n}\n"
},
{
"alpha_fraction": 0.7108266949653625,
"alphanum_fraction": 0.7267381548881531,
"avg_line_length": 29.592592239379883,
"blob_id": "4d52c51fbff7e3a83e3beeb7e1febfff0840dd5d",
"content_id": "923fa6a2546e71f033ce3fcead1998c36b8d772a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 5782,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 189,
"path": "/inst/db/postgres_setup.sql",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# Data base setup\n\n-- Database: chemostat-db1\n-- Note: all timestamps must be WITH timezone, otherwise R has trouble\n\n-- DROP DATABASE \"chemostat-db1\";\n\nCREATE DATABASE \"chemostat-db1\"\n WITH\n OWNER = root\n ENCODING = 'UTF8'\n LC_COLLATE = 'en_US.UTF-8'\n LC_CTYPE = 'en_US.UTF-8'\n TABLESPACE = pg_default\n CONNECTION LIMIT = -1;\n\n-- Table: public.camera\n\nDROP TABLE IF EXISTS cameras;\n\nCREATE TABLE cameras\n(\n name character varying(30) PRIMARY KEY,\n label character varying(100) NULL,\n address character varying(20) NOT NULL,\n active boolean DEFAULT true\n);\n\nINSERT INTO cameras(name, label, address, active)\n\tVALUES ('Cam #1', 'Test Camera', '128.138.152.159:8081', true);\n\nSELECT * FROM cameras;\n\n-- Table: public.settings\n\nDROP TABLE IF EXISTS settings;\n\nCREATE TABLE settings\n(\n setting_id character varying(50) PRIMARY KEY,\n setting_value text\n);\n\nINSERT INTO settings(setting_id, setting_value)\n VALUES('particle_access_token', '');\n\nSELECT * FROM settings;\n\n-- Table: device_types\n\nDROP TABLE IF EXISTS device_types;\n\nCREATE TABLE device_types\n(\n device_type_id character varying(20) PRIMARY KEY,\n device_type_desc character varying(255) NULL\n);\n\nINSERT INTO device_types(device_type_id, device_type_desc)\n\tVALUES ('undefined', 'undefined device types'),\n ('M800', 'Mettler Toledo multi-channel amplifier'),\n ('scale', 'Scale'),\n ('mfc', 'Mass Flow Controller'),\n ('pump', 'Peristaltic Pump');\n\n-- Table: groups\n\nDROP TABLE IF EXISTS groups;\n\nCREATE TABLE groups\n(\n group_id character varying(20) PRIMARY KEY,\n group_desc character varying(255) NULL\n);\n\nINSERT INTO groups(group_id, group_desc)\n\tVALUES ('testing', 'testing account');\n\n-- Table: devices\n\nDROP TABLE IF EXISTS devices CASCADE;\n\nCREATE TABLE devices\n(\n device_id SERIAL PRIMARY KEY,\n device_name character varying(20) NOT NULL,\n particle_id character varying(50) NULL,\n group_id character varying(20) NOT NULL references groups(group_id),\n device_type_id character varying(20) NOT NULL references device_types(device_type_id),\n device_desc character varying(255) NULL,\n in_use boolean NOT NULL DEFAULT true,\n unique (group_id, device_name)\n);\n\nINSERT INTO devices(device_name, group_id, device_type_id, device_desc)\n\tVALUES ('testing', 'testing', 'undefined', 'testing device');\n\n-- Table: experiments\n\nDROP TABLE IF EXISTS experiments;\n\nCREATE TABLE experiments\n(\n exp_id character varying(20) PRIMARY KEY,\n group_id character varying(20) NOT NULL references groups(group_id),\n exp_desc character varying(255) NULL,\n recording boolean NOT NULL DEFAULT false,\n last_recording_change timestamp with time zone default NOW(),\n archived boolean NOT NULL DEFAULT false\n);\n\nINSERT INTO experiments(exp_id, group_id, exp_desc, recording)\n VALUES('TEST', 'testing', 'Test Exp', true);\nINSERT INTO experiments(exp_id, group_id, exp_desc, recording)\n VALUES('TEST2', 'testing', 'Test Exp', true);\nINSERT INTO experiments(exp_id, group_id, exp_desc, recording)\n VALUES('TEST3', 'testing', 'Inactive Test Exp', false);\n\n-- Table: experiment_device_data\n\nDROP TABLE IF EXISTS experiment_device_data;\n\nCREATE TABLE experiment_device_data (\n exp_device_data_id SERIAL PRIMARY KEY,\n exp_id character varying(20) NOT NULL references experiments(exp_id) ON UPDATE CASCADE,\n -- only used for mapping during log processing\n device_id integer NOT NULL references devices(device_id),\n data_idx integer NOT NULL,\n -- whether this is an active experiment device\n active boolean NOT NULL DEFAULT True,\n -- constraint\n unique (exp_id, device_id, data_idx)\n);\n\nINSERT INTO experiment_device_data(exp_id, device_id, data_idx)\n SELECT 'TEST', d.device_id, 1 FROM devices d WHERE d.device_name = 'testing' AND d.group_id = 'testing';\nINSERT INTO experiment_device_data(exp_id, device_id, data_idx)\n SELECT 'TEST', d.device_id, 2 FROM devices d WHERE d.device_name = 'testing' AND d.group_id = 'testing';\nINSERT INTO experiment_device_data(exp_id, device_id, data_idx)\n SELECT 'TEST2', d.device_id, 1 FROM devices d WHERE d.device_name = 'testing' AND d.group_id = 'testing';\nINSERT INTO experiment_device_data(exp_id, device_id, data_idx)\n SELECT 'TEST3', d.device_id, 1 FROM devices d WHERE d.device_name = 'testing' AND d.group_id = 'testing';\n\n\n-- Table: device_raw_logs\n\nDROP TABLE IF EXISTS device_raw_logs CASCADE;\n\nCREATE TABLE device_raw_logs(\n device_raw_log_id SERIAL PRIMARY KEY,\n created_datetime timestamp with time zone NOT NULL,\n raw_data text NULL\n);\n\n-- Table: device_state_logs\n\nDROP TABLE IF EXISTS device_state_logs;\n\nCREATE TABLE device_state_logs (\n device_state_log_id SERIAL PRIMARY KEY,\n device_raw_log_id integer NOT NULL references device_raw_logs(device_raw_log_id),\n device_id integer NOT NULL references devices(device_id),\n log_datetime timestamp with time zone,\n log_type character varying(50) NOT NULL,\n log_message character varying(255) NULL,\n state_key character varying(50) NULL,\n state_value character varying(100) NULL,\n state_units character varying(20) NULL,\n notes character varying(255) NULL\n);\n\n-- Table: device_data_logs\n\nDROP TABLE IF EXISTS device_data_logs;\n\nCREATE TABLE device_data_logs (\n device_data_log_id SERIAL PRIMARY KEY,\n device_raw_log_id integer NOT NULL references device_raw_logs(device_raw_log_id),\n device_id integer NOT NULL references devices(device_id),\n exp_device_data_id integer NOT NULL references experiment_device_data(exp_device_data_id),\n log_datetime timestamp with time zone,\n log_time_offset real default 0.0,\n data_idx integer NOT NULL,\n data_key character varying(50) NOT NULL,\n data_value double precision NULL,\n data_sd double precision NULL,\n data_units character varying(20) NULL,\n data_n integer NULL\n);\n"
},
{
"alpha_fraction": 0.599449872970581,
"alphanum_fraction": 0.6025442481040955,
"avg_line_length": 44.80315017700195,
"blob_id": "dc0b6241ebd66f93a49338c6b544e72a55b16372",
"content_id": "afad2657091174566ed7674dc60dec99d1abc970",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5817,
"license_type": "no_license",
"max_line_length": 291,
"num_lines": 127,
"path": "/inst/lambda/service.py",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport logging\nimport pg8000\n\n# logger\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# db connection\ntry:\n conn = pg8000.connect(user=os.environ['db_user'], host=os.environ['db_host'], database=os.environ['db_name'], password=os.environ['db_pwd'])\nexcept:\n logger.error(\"unexpected error - could not connect to database.\")\n sys.exit()\nlogger.info(\"connection to database succeeded\")\n\n# parameter checks\ndef get_parameter(event, param_name):\n param = event.get(param_name)\n if param is None:\n logger.error(\"no '{}' provided in event {}\".format(param_name, event))\n sys.exit()\n return(param)\n\n# create raw log\ndef log_event(event):\n # event raw log\n cur = conn.cursor()\n cur.execute(\"INSERT INTO device_raw_logs (created_datetime, raw_data) VALUES (current_timestamp, %s) RETURNING device_raw_log_id\", ('{}'.format(event),));\n device_raw_log_id = cur.fetchone()[0]\n conn.commit()\n return(device_raw_log_id)\n\n# handler\ndef handler(event, context):\n # event log\n device_raw_log_id = log_event(event)\n\n # safety checks\n group_id = get_parameter(event, 'group_id')\n particle_id = get_parameter(event, 'particle_id')\n published_at = get_parameter(event, 'published_at')\n payload = get_parameter(event, 'payload')\n device_name = get_parameter(payload, 'id')\n log_type = get_parameter(event, 'log')\n\n logger.info(\"processing '{}' log for device '{}' (group_id={}, particle_id={}, device_raw_log_id={}): {}\".format(\n log_type, device_name, group_id, particle_id, device_raw_log_id, event))\n\n # valid log_type?\n if log_type != \"state\" and log_type != \"data\":\n logger.info(\"invalid log type\");\n return(\"Log type not supported.\");\n\n # session\n cur = conn.cursor()\n\n # get device\n cur.execute(\"SELECT device_id, particle_id, in_use FROM devices WHERE device_name = (%s) AND group_id = (%s)\", (device_name, group_id,))\n device = cur.fetchone()\n\n # no device\n if device is None:\n logger.info(\"device/group pair not listed in base\")\n return(\"Device does not exist for group.\")\n\n [device_id, known_particle_id, in_use] = device\n # device in use?\n if in_use != True:\n logger.info(\"device not in use, discarding log entries\")\n return(\"Device not in use.\")\n\n # update particle_id?\n if particle_id != known_particle_id:\n logger.info(\"updating device particle_id from {} to {}\".format(known_particle_id, particle_id))\n cur.execute(\"UPDATE devices SET particle_id = %s WHERE device_id = %s\",\n (particle_id, device_id, ))\n conn.commit()\n\n # process state logs\n if log_type == \"state\":\n device_state_log_ids = []\n for state in payload.get('s'):\n cur.execute(\"INSERT INTO device_state_logs (device_raw_log_id, device_id, log_datetime, log_type, log_message, state_key, state_value, state_units, notes) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING device_state_log_id\",\n (device_raw_log_id, device_id, published_at, payload.get('t'), payload.get('m'), state.get('k'), state.get('v'), state.get('u'), payload.get('n')))\n device_state_log_ids = device_state_log_ids + cur.fetchone()\n conn.commit()\n logger.info(\"device in use, created {} log entries (IDs: {})\".format(len(device_state_log_ids), ', '.join(map(str, device_state_log_ids))))\n return(\"Device in use ({} state log entries created).\".format(len(device_state_log_ids)))\n\n # process data logs\n elif log_type == \"data\":\n\n # get device data logs\n cur.execute(\"SELECT data_idx, exp_device_data_id, experiments.exp_id FROM experiments, experiment_device_data WHERE experiments.exp_id = experiment_device_data.exp_id AND device_id = (%s) AND group_id = (%s) AND recording = true AND active = true\",\n (device_id, group_id,))\n exp_device_idxs = cur.fetchall()\n\n # loop through data and create log entries as needed\n device_data_log_ids = []\n exp_ids = []\n global_to = payload.get('to')\n for data in payload.get('d'):\n if(not(data.get('v') is None)):\n for exp_device_idx in exp_device_idxs:\n if exp_device_idx[0] == data.get('i'):\n exp_ids.append(exp_device_idx[2])\n # figure out global or local time offset\n local_to = data.get('to')\n if (local_to is None):\n local_to = global_to\n # generate a record for the exp_device_data_id\n cur.execute(\"INSERT INTO device_data_logs (device_raw_log_id, device_id, exp_device_data_id, log_datetime, log_time_offset, data_idx, data_key, data_value, data_sd, data_units, data_n) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING device_data_log_id\",\n (device_raw_log_id, device_id, exp_device_idx[1], published_at, local_to/1000., data.get('i'), data.get('k'), data.get('v'), data.get('s'), data.get('u'), data.get('n')))\n device_data_log_ids.append(cur.fetchone())\n conn.commit()\n exp_ids = list(set(exp_ids))\n logger.info(\"device in use, created {} log entries (IDs: {}) for {} experiments (IDs: {})\".format(\n len(device_data_log_ids), ', '.join(map(str, device_data_log_ids)), len(exp_ids), ', '.join(map(str, exp_ids))))\n if (len(device_data_log_ids) > 0):\n return(\"Device in use ({} log entries created for {} experiments).\".format(len(device_data_log_ids), len(exp_ids)))\n else:\n return(\"Device in use but no log entries created (no active experiments).\")\n"
},
{
"alpha_fraction": 0.6850393414497375,
"alphanum_fraction": 0.6905511617660522,
"avg_line_length": 47.846153259277344,
"blob_id": "b27bbb0890e530ae696d63ecefc257d98e9940d9",
"content_id": "7c5290a167772743167e276c7a30a8d9d43d32d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Ruby",
"length_bytes": 1270,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 26,
"path": "/Guardfile",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# Ruby guard file (need ruby >2.2 and bundler installed: 'gem install bundler')\n# To make sure all the gems are installed, run 'bundle install' once in terminal\n# Run lablogger::ll_setup_gui('dev') and fill out the resulting dev/credentials.R file\n# Then you can use the Makefile target 'make gui_dev' to start the GUI in development mode\n# For browser livereload to work, need the browser extension: http://livereload.com/extensions/#installing-sections\n# If the delay is too short for relaunching the app, increase the grace_period\n\nport = 5000\n\nguard 'process', name: 'Shiny', command: ['R', '-e', \" \\\nsource('dev/credentials.R'); \\\nmessage('INFO: Connection to database... ', appendLF = FALSE); \\\npool <- pool::dbPool(drv = RPostgreSQL::PostgreSQL(), host = db_host, dbname = db_name, user = db_user, password = db_pwd); \\\nmessage('successful.'); \\\nshiny::onStop(function() { pool::poolClose(pool) }); \\\ndevtools::load_all('.'); \\\nlablogger:::turn_debug_on(); \\\nlablogger::ll_run_gui(group_id = group_id, access_token = access_token, pool = pool, app_title = 'DEV', app_pwd = NULL, port = #{port}, launch = TRUE)\"] do\n watch(%r{NAMESPACE})\n watch(%r{R/.+\\.R$})\nend\n\nguard 'livereload', grace_period: 5 do\n watch(%r{NAMESPACE})\n watch(%r{R/.+\\.R$})\nend\n"
},
{
"alpha_fraction": 0.6366373896598816,
"alphanum_fraction": 0.6373901963233948,
"avg_line_length": 35.89814758300781,
"blob_id": "920cc3ed52b97018396af772c27914930e8f0490",
"content_id": "fa270e0b75044bdb1da9b0cd0e4a45a7c9a78caa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3985,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 108,
"path": "/R/app_server.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Chemostat Control Center Server\n#'\n#' Generates the server part of the isoviewer app\napp_server <- function(group_id, access_token, pool, app_pwd, timezone, start_screen = \"experiments\") {\n shinyServer(function(input, output, session) {\n\n message(\"\\n\\nINFO: Loading GUI instance ...\")\n\n # DATA MANAGERS =====\n dm_links <- callModule(experimentDeviceLinksDataServer, \"dm_links\", group_id, access_token, pool, timezone)\n dm_experiments <- callModule(experimentsDataServer, \"dm_experiments\", dm_links, group_id, access_token, pool, timezone)\n dm_devices <- callModule(devicesDataServer, \"dm_devices\", dm_links, group_id, access_token, pool, timezone)\n dm_datalogs <- callModule(datalogsDataServer, \"dm_datalogs\", dm_experiments, dm_devices, group_id, access_token, pool, timezone)\n dm_cloudinfo <- callModule(cloudInfoDataServer, \"dm_cloudinfo\", dm_experiments, dm_devices, dm_links, group_id, access_token, pool, timezone)\n\n # LOGIN SCREEN =====\n login_manager <- callModule(loginServer, \"login\", app_pwd = app_pwd, group = group_id, timezone = timezone)\n observeEvent(input$menu, {\n if (!login_manager$is_logged_in()) {\n module_message(NULL, \"debug\", \"not logged in yet, jumping back to login screen\")\n updateTabItems(session, \"menu\", selected = \"login\")\n }\n })\n observeEvent(login_manager$is_logged_in(), {\n if (login_manager$is_logged_in())\n updateTabItems(session, \"menu\", start_screen)\n })\n\n # SCREEN LOADING ====\n observeEvent(input$menu, {\n if (input$menu %in% c(\"experiments\", \"data\")) dm_experiments$init_experiments()\n else if (input$menu == \"devices\") dm_devices$init_devices()\n })\n\n # DATA SCREEN ====\n callModule(experimentSelectorServer, \"data_exps\", dm_experiments)\n data_plot <- callModule(\n dataPlotServer, \"data_plot\", timezone = timezone,\n get_experiments = dm_experiments$get_selected_experiments,\n get_data_logs = dm_datalogs$get_experiments_data_logs,\n refresh_data_logs = dm_datalogs$refresh_data_logs,\n reset_plot = eventReactive(length(dm_experiments$get_selected_experiments()), runif(1))\n )\n output$data <- renderUI({\n if (!login_manager$is_logged_in()) return(NULL)\n isolate({\n message(\"INFO: Generating 'data' screen\")\n tagList(\n experimentSelectorUI(\"data_exps\"),\n dataPlotUI(\"data_plot\")\n )\n })\n })\n\n # EXPERIMENTS SCREEN ====\n\n callModule(\n experimentManagerServer, \"experiments\",\n dm_links = dm_links,\n dm_experiments = dm_experiments,\n dm_devices = dm_devices,\n dm_cloudinfo = dm_cloudinfo,\n dm_datalogs = dm_datalogs,\n timezone = timezone,\n access_token = access_token\n )\n\n output$experiments <- renderUI({\n if (!login_manager$is_logged_in()) return(NULL)\n message(\"INFO: Generating 'experiments' screen\")\n experimentManagerUI(\"experiments\")\n })\n\n # DEVICES SCREEN ====\n\n callModule(\n deviceManagerServer, \"devices\",\n dm_devices = dm_devices,\n dm_cloudinfo = dm_cloudinfo,\n dm_datalogs = dm_datalogs,\n access_token = access_token\n )\n\n output$devices <- renderUI({\n if (!login_manager$is_logged_in()) return(NULL)\n message(\"INFO: Generating 'devices' screen\")\n deviceManagerUI(\"devices\")\n })\n\n # WEBCAMS SCREEN ====\n output$live <- renderUI({\n if (!login_manager$is_logged_in()) return(NULL)\n message(\"INFO: Generating 'webcams' screen\")\n tagList(h3(\"Coming soon...\"))\n })\n\n # HELP LINK ====\n output$help <- renderUI({\n link <- \"https://github.com/KopfLab/lablogger/wiki\"\n links <- c(experiments = \"Experiments\", devices = \"Devices\", data = \"All-Data\")\n if (input$menu %in% names(links)) link <- paste0(link, \"/\", links[input$menu])\n a(tags$i(class=\"fa fa-question-circle\"), \"Help\", href = link, target = \"_blank\") %>%\n as.character() %>%\n HTML()\n })\n\n })\n}\n"
},
{
"alpha_fraction": 0.619526207447052,
"alphanum_fraction": 0.6223977208137512,
"avg_line_length": 41.181819915771484,
"blob_id": "2dd302f9351cc82d4a2e540b5e2c3b1aec9926a6",
"content_id": "41cd30e3ecdf4eb8c2b1b9cb3ea85e4f246e54da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1393,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 33,
"path": "/R/nse.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# resolve default cols in a list of quos\nresolve_defaults <- function(quos) {\n resolve_default <- function(x) if (rlang::quo_is_call(x) && rlang::call_name(x) == sym(\"default\")) eval_tidy(x) else x\n if (is_quosure(quos)) return(resolve_default(quos))\n else map(quos, resolve_default)\n}\n\n# Convert quo to text accounting for plain text and symbol quos\nquos_to_text <- function(lquos, check_for_validity = TRUE, variable = \"variable\") {\n single_quo <- is_quosure(lquos)\n lquos <- if(single_quo) quos(!!lquos) else quos(!!!lquos)\n are_text_quos <- map_lgl(lquos, ~is.character(quo_squash(.x)))\n are_symbol_quos <- map_lgl(lquos, quo_is_symbol)\n\n # check for validity\n if (check_for_validity && !all(ok <- are_text_quos | are_symbol_quos)) {\n params <-\n str_c(names(lquos)[!ok] %>% { ifelse(nchar(.) > 0, str_c(., \" = \"), .) },\n map_chr(lquos[!ok], quo_text)) %>%\n glue::glue_collapse(\"', '\", last = \"' and '\")\n if (sum(!ok) > 1)\n glue(\"parameters '{params}' do not refer to valid {variable} names\") %>% stop(call. = FALSE)\n else\n glue(\"parameter '{params}' does not refer to a valid {variable} name\") %>% stop(call. = FALSE)\n }\n\n text_quos <-\n map2_chr(lquos, are_text_quos, function(lquo, is_text)\n if(is_text) quo_squash(lquo) else quo_text(lquo)) %>%\n as.list()\n if (single_quo) return(text_quos[[1]])\n else return(text_quos)\n}\n\n"
},
{
"alpha_fraction": 0.756303608417511,
"alphanum_fraction": 0.7581518292427063,
"avg_line_length": 81.33695983886719,
"blob_id": "d94640403924ff3a533f9357a324231835b1da9d",
"content_id": "e23f9a0a117f6f506165bf00257f6fc2dbfdd776",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 7575,
"license_type": "no_license",
"max_line_length": 399,
"num_lines": 92,
"path": "/inst/lambda/README.md",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# Lambda function development\n\n## Installation\n\n### python / pip\n\n- easiest is just to install anaconda: https://www.anaconda.com/what-is-anaconda/\n- make sure python and pip exist (`python -V` and `pip -V`)\n\n### virtual environments\n\n- install virtualenv `pip install virtualenv` (virtual environment to develop lambda functions in, [doc](http://docs.python-guide.org/en/latest/dev/virtualenvs/#lower-level-virtualenv))\n - test that it was installed properly `virtualenv --version`\n - if it does give a version number, it might not be in the path (possibly already installed), easiest solution: `pip uninstall virtualenv`, then re-installed\n- Optional: for general work with virtualenvs, use the wrapper: `pip install virtualenvwrapper` ([doc](http://docs.python-guide.org/en/latest/dev/virtualenvs/#virtualenvwrapper))\n\n### AWS CLI\n\n- install AWS command line interface: `conda install -c conda-forge awscli` (via anaconda, easier than with pip and then defining the env. variables)\n - to check: `aws --version`\n - to configure: `aws configure` ([doc](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html))\n - test: `aws rds describe-db-instances`\n\n## Setup\n\n- create a new project folder (best in atom terminal - extension `platformio-ide-terminal` to develop python there):\n- run `make install`, which does the following:\n - creates virtual environment: `virtualenv .`\n - activates virtual environment: `source bin/activate`\n - Note: from the make file it does not translate into the shell but if run manually, terminal should show `(folder name)` at the beginning of the console line\n - installs python lambda for the virtual environment: `pip install python-lambda` ([doc](https://github.com/nficano/python-lambda))\n - optional (if needed): installs postgresql db driver in the virtual env.: `pip install pg8000` ([doc](https://github.com/mfenniak/pg8000))\n - Note: to make successful database connections, make sure to have permissions set up for your IP to connect to the database\n- run `make init`, which does the following:\n - bootstraps lambda project if it does not exist yet via `lambda init`\n - makes local copies of the config and webhook template files (automatically git-ignored)\n\n## Development\n\n- develop the lambda function in the `service.py` file and connected scipts\n- run `make invoke` to run lambda function with the information in `event.json`\n - uses `lambda invoke -v` in the virtual environment\n\n## Deployment\n\n - if no `config.yaml` exists yet (should have been created during `make init`), run `make config.yaml`\n - fill out the settings in `config.yaml`, see **Integration** section for details (no worries if not everything correct right away, deploy will throw errors)\n - run `make deploy` to deploy the lambda function:\n - uses `lambda deploy` in the virtual environment\n - Note: for large project dependencies, consider using S3 bucket for the labmda function, see [docs](https://github.com/nficano/python-lambda#uploading-to-s3) for details\n - Note: for the lambda function to work properly, it needs to be well integrated into other services (see below for details)\n\n## Integration\n\nOnce the Lambda function is deployed it can be viewed and tested in the Lambda functions console https://console.aws.amazon.com/lambda. Detailed log messages and errors are available at the Cloudwatch console https://console.aws.amazon.com/cloudwatch. For the lambda function to be useful though, it usually needs to be setup with the right IAM role, VPC access and usually some sort of API gateway.\n\n### IAM (database permissions)\n\n- IAM administration: https://console.aws.amazon.com/iam\n - It is useful to create an IAM role for the lambda function, a useful default policy is `AWSLambdaVPCAccessExecutionRole` (see [setup docs](https://docs.aws.amazon.com/lambda/latest/dg/vpc-rds-create-iam-role.html) for details)\n - Add the name of the role in the `role` setting of the `config.yaml`\n- Additionally, to get access to the an RDS database running in a Virtual Private Container (VPC), the lambda function needs to be added to the VPC the database is in:\n - Subnets:\n - find the subnet IDs for your primary VPC in the **Subnets** menu at https://console.aws.amazon.com/vpc\n - add at least one (might as well add all though) in the `subnet_ids` setting in `config.yaml` - format: `[subnet-1,subnet-2,...]`\n - Security group(s):\n - if none exists yet, make a simple security group for lambda functions in the **Security Groups** menu at https://console.aws.amazon.com/vpc (doesn't need any custom inbound or outbound definitions) and note the Group ID\n - add at least one (usually one is enough) in the `security_group_ids` setting in `config.yaml` - format: `[sg-1,sg-2,...]`\n - make sure to add the assigned security group to the allowed **Inbound** traffic rules of the RDS databases' security group (enter the lambda security group ID under **Source**)\n - redeploy the lambda function (`make deploy`), these settings should now all be listed in the details for the lambda function at https://console.aws.amazon.com/lambda\n - troubleshooting: if the labmda function times out with a test JSON, most likely the security settings aren't quite right, sometimes the security group and subnets don't get saved properly by the `lambda deploy`, simply add them manually in the console at https://console.aws.amazon.com/lambda for the new lambda function (under the **Network** section)\n\n### API gateway\n\nFor the lambda function to be easily accessible via web API (be it http or something else), an API gateway needs to be set up, see [this link for a good example](https://docs.aws.amazon.com/apigateway/latest/developerguide/getting-started-lambda-non-proxy-integration.html#getting-started-new-lambda) and the API gateway administration: https://console.aws.amazon.com/apigateway\n\nNotes:\n- API gateway configuration is _completely_ independent of the lambda function deployment and must be configured separately (i.e. no setting in the `config.yaml` that plays a role here)\n- if you want to make the API secure, good to include an API key requirement which requires creating an API key and setting up a usage plan that allows the API key for a specific API gateway and deployed stage (this stage's method must have the `API Key Required` setting in the `Method->Method Request` set to `true`).\n- API development can be very tricky, [POSTMAN](https://www.getpostman.com/) helps with testing APIs:\n\n### Webhooks\n\nLastly, the particle photons need to actually send data to the API, this is done via [Webhooks](https://docs.particle.io/reference/webhooks/).\n\n- if no `webhook_data_log.json` and `webhook_state_log.json` files exist yet (should have been created during `make init`), run `make webhooks`\n- replace the placeholders in both webhook files:\n - `<<ACCESS_POINT>>`, `<<STAGE>>` and `<<RESOURCE>>` are parameters from the API gateway\n - `<<API-KEY>>` is the access key from the API gateway (if configured to need one, see details above)\n - `<<OWNER>>` is the name/id of the owner record in the database (depends on what is set up in your database)\n- create the webhooks via `particle webhook create webhook_data_log.json` and `particle webhook create webhook_state_log.json` (need to be logged into the particle account via `particle login`)\n- note that creating the webhooks multiple times will NOT throw an error but lead to multiple trigger events, make sure to remove old webhooks via `particle webhook list` and `particle webhook delete` before re-creating updated versions\n"
},
{
"alpha_fraction": 0.6996644139289856,
"alphanum_fraction": 0.7000839114189148,
"avg_line_length": 29.96103858947754,
"blob_id": "f1b778e4f9b15ebfa0c183a1c97d7e2ea5bdcb84",
"content_id": "3e176f6ff2f8b415cb2c5006d9ec348e10a3acd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2384,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 77,
"path": "/R/settings.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# retrieve package settings, internal function, not exported\ndefault <- function(name, allow_null = FALSE) {\n name <- enquo(name) %>% quos_to_text(variable = \"setting\")\n value <- getOption(str_c(\"ll.\", name))\n if (!allow_null && is.null(value)) stop(\"ll setting '\", name, \"' does not exist, make sure to set the default first\", call. = FALSE)\n return(value)\n}\n\n# set package setting, internal function, not exported\nset_default <- function(name, value, overwrite = TRUE) {\n if (overwrite || !str_c(\"ll.\", name) %in% names(options()))\n options(list(value) %>% setNames(str_c(\"ll.\", name)))\n return(invisible(value))\n}\n\n# turn debug on\nturn_debug_on <- function() {\n set_default(\"debug\", TRUE)\n}\n\n#' Set default database connection or pool\n#' @param con database connection or pool object\n#' @export\nll_set_db_con <- function(con) {\n set_default(\"con\", enquo(con))\n}\n\n#' Set default access token for particle API requests\n#' @param token the particle acocunt access token, keep secret!!\n#' @export\nll_set_access_token <- function(token) {\n set_default(\"access_token\", token)\n}\n\n#' Set default group ID\n#'\n#' Note that this is not checked wheter it exists, simply used as the default for other functions.\n#'\n#' @param group_id group\n#' @export\nll_set_group_id <- function(group_id) {\n set_default(\"group_id\", group_id)\n}\n\n#' Set default request timeout\n#'\n#' @param group_id group\n#' @export\nll_set_request_timeout <- function(timeout) {\n set_default(\"request_timeout\", timeout)\n}\n\n\n#' Turn information messages on/off\n#'\n#' These functions turn information messages on/off in all subsequent function calls by changing the global settings for the \\code{quiet} parameter of c3 functions.\n#' These functions can be called stand alone or within a pipeline to turn messages on/off at a certain point during the pipeline.\n#'\n#' @param data a data frame - returned invisibly as is if provided (e.g. in the middle of a pipeline)\n#' @name ll_info_messages\nNULL\n\n#' @rdname ll_info_messages\n#' @family settings functions\n#' @export\nll_turn_info_messages_on <- function(data = NULL) {\n set_default(\"quiet\", FALSE)\n message(\"Info: information messages turned on\")\n if (!missing(data)) return(invisible(data))\n}\n\n#' @rdname ll_info_messages\n#' @export\nll_turn_info_messages_off <- function(data = NULL) {\n set_default(\"quiet\", TRUE)\n if (!missing(data)) return(invisible(data))\n}\n"
},
{
"alpha_fraction": 0.5604768991470337,
"alphanum_fraction": 0.565706193447113,
"avg_line_length": 37.16766357421875,
"blob_id": "6f15f42a9e66e7928bfbef4d827c089b86e5f1d4",
"content_id": "4e91b37a47896191d99f51fe1e7437d4bdb736e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 19123,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 501,
"path": "/R/app_module_data_plot.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "\ndataPlotServer <- function(input, output, session, timezone, get_experiments, get_data_logs, refresh_data_logs, reset_plot) {\n\n # namespace\n ns <- session$ns\n\n # reactive values ====\n zoom_factor <- 2 # zoom in and out factor with each click\n zoom_move <- 0.5 # sideways move interval\n values <- reactiveValues(\n valid_fetch = FALSE,\n valid_plot = FALSE,\n selected_traces = c(),\n refresh_data_plot = NULL,\n zoom_stack = list(list(zoom = NULL, x_min = NULL, x_max = NULL))\n )\n\n # experiment selected\n is_experiment_selected <- reactive(length(get_experiments()) > 0)\n\n # reset =====\n observeEvent(reset_plot(), {\n module_message(ns, \"debug\", \"resetting data plot - exp selected? \", is_experiment_selected())\n values$valid_fetch <- FALSE\n values$valid_plot <- FALSE\n values$zoom_stack <- list(list(zoom = NULL, x_min = NULL, x_max = NULL))\n toggle(\"data_plot_div\", condition = FALSE)\n toggle(\"traces_box\", condition = FALSE)\n toggle(\"groups_box\", condition = FALSE)\n toggle(\"options_box\", condition = FALSE)\n })\n\n observeEvent(values$valid_plot, {\n toggle(\"summary_box\", condition = values$valid_plot)\n toggle(\"data_box\", condition = values$valid_plot)\n })\n\n # plot buttons ====\n observeEvent(is_experiment_selected(), {\n toggleState(\"fetch_data\", condition = is_experiment_selected())\n toggleState(\"reset_cache\", condition = is_experiment_selected())\n })\n\n observeEvent(values$valid_plot, {\n toggleState(\"zoom_all\", condition = values$valid_plot)\n toggleState(\"zoom_move_left\", condition = values$valid_plot)\n toggleState(\"zoom_move_right\", condition = values$valid_plot)\n toggleState(\"zoom_back\", condition = values$valid_plot)\n toggleState(\"plot_download-download_dialog\", condition = values$valid_plot)\n toggleState(\"data_download-download_dialog\", condition = values$valid_plot)\n })\n\n observe({\n refresh_available <- values$valid_fetch && length(traces_selector$get_selected()) > 0\n toggleState(\"plot_refresh\", condition = refresh_available)\n toggleState(\"traces_refresh\", condition = refresh_available)\n toggleState(\"groups_refresh\", condition = refresh_available)\n toggleState(\"options_refresh\", condition = refresh_available)\n })\n\n # plot messages ====\n output$data_plot_message <- renderUI({\n # cannot use validate here because it doesn't allow HTML messages\n msg <-\n if (!is_experiment_selected()) \"Please select one or multiple experiments.\"\n else if (is.null(values$valid_fetch) || !values$valid_fetch)\n \"Please press the fetch data button (<i class='fa fa-cloud-download'></i>) to query the database.\"\n else if (is.null(traces_selector$get_selected()))\n \"Please select at least one data trace.\"\n else if (is.null(values$valid_plot) || !values$valid_plot)\n \"Please press any re-plot button (<i class='fa fa-refresh'></i>) to render the plot.\"\n else\n NULL\n return(HTML(msg))\n })\n\n # traces ====\n\n # selector\n traces_selector <- callModule(\n selectorTableServer, \"traces_selector\",\n id_column = \"data_trace\", column_select = c(`# data points` = n),\n dom = \"tlp\"\n )\n\n # update data\n observe({\n df <- get_data_logs() %>% prepare_data_for_plotting()\n isolate({\n if (nrow(df) > 0) {\n traces_selector$set_table(df %>% dplyr::count(data_trace) %>% arrange(data_trace))\n } else {\n traces_selector$set_table(data_frame(data_trace = character(0), n = integer(0)))\n }\n })\n })\n\n # zooming ====\n\n get_data_logs_in_time_interval <- function(logs, from, to) {\n filter(logs, between(datetime, as_datetime(from, tz = timezone), as_datetime(to, tz = timezone)))\n }\n\n # add to zoom stack\n add_to_zoom_stack <- function(zoom, x_min, x_max, update = TRUE, only_add_if_new = TRUE) {\n if (missing(zoom)) zoom <- get_last_zoom()$zoom\n if (missing(x_min)) x_min <- get_last_zoom()$x_min\n if (missing(x_max)) x_max <- get_last_zoom()$x_max\n new_zoom <- list(zoom = zoom, x_min = x_min, x_max = x_max)\n if (only_add_if_new && identical(get_last_zoom(), new_zoom)) return(NULL)\n module_message(ns, \"debug\", \"adding to zoom stack: \", zoom, \" time: \", x_min, \" to \", x_max)\n values$zoom_stack <- c(values$zoom_stack, list(new_zoom))\n if (update) refresh_data_plot()\n }\n\n # load last zoom\n load_last_zoom <- function(update = TRUE) {\n last_element <- length(values$zoom_stack)\n if (last_element > 1) values$zoom_stack[last_element] <- NULL\n if (update) refresh_data_plot()\n }\n\n # get current zoom\n get_last_zoom <- function() {\n values$zoom_stack[[length(values$zoom_stack)]]\n }\n\n # zoom back\n observeEvent(input$zoom_back, load_last_zoom())\n observeEvent(input$data_plot_dblclick, load_last_zoom())\n # zoom whole data set\n observeEvent(input$zoom_all, {\n add_to_zoom_stack(zoom = NULL, x_min = NULL, x_max = NULL)\n })\n # # zoom fit\n # observeEvent(input$zoom_fit, {\n # add_to_zoom_stack(zoom = NULL)\n # })\n # # zoom in\n # observeEvent(input$zoom_in, {\n # if (is.null(get_last_zoom()$zoom)) add_to_zoom_stack(zoom = zoom_factor)\n # else add_to_zoom_stack(zoom = get_last_zoom()$zoom * zoom_factor)\n # })\n # # zoom out\n # observeEvent(input$zoom_out, {\n # if (is.null(get_last_zoom()$zoom)) add_to_zoom_stack(zoom = 1/zoom_factor)\n # else add_to_zoom_stack(zoom = get_last_zoom()$zoom/zoom_factor)\n # })\n # time zoom\n observeEvent(input$data_plot_brush, {\n brush <- input$data_plot_brush\n if (!is.null(brush$xmin) && !is.null(brush$xmax)) {\n # convert to seconds\n add_to_zoom_stack(x_min = brush$xmin, x_max = brush$xmax)\n }\n })\n # left right movening\n move_zoom <- function(direction) {\n if ( !is.null(get_last_zoom()$x_min) && !is.null(get_last_zoom()$x_max) ) {\n add_to_zoom_stack(\n x_min = get_last_zoom()$x_min + direction * zoom_move * (get_last_zoom()$x_max - get_last_zoom()$x_min),\n x_max = get_last_zoom()$x_max + direction * zoom_move * (get_last_zoom()$x_max - get_last_zoom()$x_min)\n )\n }\n }\n observeEvent(input$zoom_move_left, move_zoom(-1))\n observeEvent(input$zoom_move_right, move_zoom(+1))\n\n # fetch data ====\n observeEvent(input$fetch_data, {\n values$valid_fetch <- TRUE\n refresh_data_logs()\n get_data_logs()\n toggle(\"traces_box\", condition = TRUE)\n toggle(\"groups_box\", condition = TRUE)\n toggle(\"options_box\", condition = TRUE)\n\n # refresh existing plot\n if (values$valid_plot) {\n refresh_data_plot()\n }\n })\n\n # reset cache ====\n\n observeEvent(input$reset_cache, {\n values$valid_fetch <- FALSE\n\n withProgress(\n message = 'Resetting data logs cache', detail = \"Accessing file system...\", value = 0.5,\n ll_reset_exp_device_data_logs_cache(get_experiments())\n )\n })\n\n # generate data plot ====\n\n refresh_data_plot <- function() {\n if (is.null(values$refresh_data_plot)) values$refresh_data_plot <- 1\n else values$refresh_data_plot <- values$refresh_data_plot + 1\n toggle(\"data_plot_div\", condition = TRUE)\n }\n\n observeEvent(input$plot_refresh, refresh_data_plot())\n observeEvent(input$traces_refresh, refresh_data_plot())\n observeEvent(input$groups_refresh, refresh_data_plot())\n observeEvent(input$options_refresh, refresh_data_plot())\n\n get_plot_data_logs <- reactive({\n logs <- get_data_logs() %>% prepare_data_for_plotting()\n\n # zoom\n if (!is.null(get_last_zoom()$x_min) && !is.null(get_last_zoom()$x_max)) {\n logs <- get_data_logs_in_time_interval(logs, get_last_zoom()$x_min, get_last_zoom()$x_max)\n }\n\n # traces and groups filter\n traces <- traces_selector$get_selected()\n logs %>% filter(data_trace %in% traces)\n })\n\n generate_data_plot <- eventReactive(values$refresh_data_plot, {\n\n # logs\n logs <- get_plot_data_logs()\n\n # plot\n if (nrow(logs) == 0) {\n p <- ggplot() + annotate(\n \"text\", x = 0, y = 0,\n label = glue(\"no data available for the\\nselected filters and time interval\\n\",\n \"experiment(s): {paste(get_experiments(), collapse = ', ')}\\n\",\n \"trace(s): {paste(traces_selector$get_selected(), collapse = ', ')}\"),\n vjust = 0.5, hjust = 0.5, size = 10) + theme_void()\n } else {\n\n # datetime vs. duration\n plot_duration <- !is.null(input$time_axis) && input$time_axis == \"duration\"\n\n # interval number\n interval_number <- NULL\n if (!is.null(input$time_intervals_number) && !is.na(as.numeric(input$time_intervals_number)) && as.numeric(input$time_intervals_number) > 0) {\n interval_number <- as.numeric(input$time_intervals_number)\n if (!plot_duration) interval_number <- ceiling(interval_number) # only full units allowed\n }\n\n # interval unit\n interval_unit <- input$time_intervals_unit\n if (plot_duration && interval_unit == \"default\") interval_unit <- \"days\"\n else if (!plot_duration && interval_unit == \"default\") interval_unit <- NULL\n\n # setting breaks\n if (!is.null(interval_number) && !is.null(interval_unit))\n time_breaks <- paste(interval_number, interval_unit)\n else if (!is.null(interval_unit))\n time_breaks <- interval_unit\n else\n time_breaks <- NULL\n\n # duration vs. date\n if (plot_duration) {\n duration_breaks <- time_breaks\n date_breaks <- NULL\n } else {\n duration_breaks <- NULL\n date_breaks <- time_breaks\n }\n\n p <- ll_plot_device_data_logs(\n logs,\n duration_breaks = duration_breaks, date_breaks = time_breaks,\n show_error_range = input$show_errors,\n exclude_outliers = !input$show_outliers,\n include_device_info= input$show_device_info,\n overlay_experiments = input$overlay_exps)\n\n # legend position\n if (input$legend_position == \"bottom\") {\n p <- p + theme(legend.position = \"bottom\", legend.direction=\"vertical\")\n } else if (input$legend_position == \"hide\") {\n p <- p + theme(legend.position = \"none\")\n }\n\n # font size\n if (!is.null(input$font_size) && input$font_size > 0)\n p <- p + theme(text = element_text(size = input$font_size))\n\n }\n\n values$valid_plot <- TRUE\n return(p)\n })\n\n # generate data table & summary =====\n\n generate_data_summary <- eventReactive(values$refresh_data_plot, {\n logs <- get_plot_data_logs()\n if (nrow(logs) > 0) {\n logs <- logs %>% ll_summarize_data_logs(slope_denom_units = \"day\", exclude_outliers = !input$show_outliers)\n }\n return(logs)\n })\n\n generate_data_table <- eventReactive(values$refresh_data_plot, {\n logs <- get_plot_data_logs() %>%\n select(datetime, exp_id, device_name, data_key, data_units, data_value, data_sd, data_n) %>%\n mutate(datetime = format(datetime, \"%Y-%m-%d %H:%M:%S\"))\n return(logs)\n })\n\n # data plot output ====\n\n output$data_plot <- renderPlot(generate_data_plot(), height = eventReactive(values$refresh_data_plot, input$plot_height))\n\n # summary table output ====\n\n output$summary_table <- renderTable({\n req(!is.null(input$digits) && is.numeric(input$digits))\n summary <- generate_data_summary()\n module_message(ns, \"debug\", \"rendering plot data summary table\")\n if (nrow(summary) > 0) summary\n else data_frame(` ` = \"No data.\")\n }, striped = TRUE, spacing = 'xs', width = '100%', align = NULL, digits = reactive(input$digits))\n\n # data table output =====\n\n output$data_table <- DT::renderDataTable({\n DT::datatable(\n generate_data_table(),\n options = list(orderClasses = TRUE, order = list(1, \"desc\")),\n filter = \"bottom\"\n )\n })\n\n # plot download ====\n download_handler <- callModule(\n plotDownloadServer, \"plot_download\",\n plot_func = generate_data_plot,\n filename_func = reactive({\n exps <- get_data_logs()$exp_id %>% unique()\n glue(\"{format(now(), '%Y_%m_%d')}-\",\n \"{glue::glue_collapse(exps, sep = '_')}\",\n \".pdf\")\n }))\n\n # data download ====\n data_handler <- callModule(\n dataDownloadServer, \"data_download\",\n data_func = get_plot_data_logs,\n filename_func = reactive({\n logs <- get_data_logs()\n exps <- logs$exp_id %>% unique()\n traces <- logs$data_key %>% unique()\n glue(\"{format(now(), '%Y_%m_%d')}-\",\n \"{paste(exps, collapse = '_')}-\",\n \"{paste(traces, collapse = '_')}\",\n \".zip\")\n }))\n\n}\n\n\ndataPlotUI <- function(id, plot_height = 650) {\n ns <- NS(id)\n\n tagList(\n # plot box ------\n default_box(\n title = \"Data Plot\", width = 8,\n div(style = paste0(\"min-height: \", plot_height, \"px;\"),\n div(id = ns(\"data_plot_actions\"),\n fluidRow(\n column(width = 4,\n tooltipInput(actionButton, ns(\"fetch_data\"), NULL, icon = icon(\"cloud-download\"),\n tooltip = \"Fetch the most recent data from the data base.\") %>% disabled(),\n spaces(1),\n tooltipInput(actionButton, ns(\"reset_cache\"), NULL, icon = icon(\"unlink\"),\n tooltip = \"Reset local cache (only use if experiment configuration changed).\") %>% disabled()\n ),\n column(width = 4, align = \"center\",\n tooltipInput(actionButton, ns(\"zoom_all\"), \"\", icon = icon(\"resize-full\", lib = \"glyphicon\"),\n tooltip = \"Show all data\") %>% disabled(),\n # tooltipInput(actionButton, ns(\"zoom_in\"), \"\", icon = icon(\"plus\"),\n # tooltip = \"Zoom in\"),\n # tooltipInput(actionButton, ns(\"zoom_out\"), \"\", icon = icon(\"minus\"),\n # tooltip = \"Zoom out\"),\n # tooltipInput(actionButton, ns(\"zoom_fit\"), \"\", icon = icon(\"resize-vertical\", lib = \"glyphicon\"),\n # type = \"toggle\", tooltip = \"Switch to optimal zoom<br/>for visible peaks\"),\n tooltipInput(actionButton, ns(\"zoom_move_left\"), \"\", icon = icon(\"arrow-left\"),\n tooltip = \"Move back in time\") %>% disabled(),\n tooltipInput(actionButton, ns(\"zoom_move_right\"), \"\", icon = icon(\"arrow-right\"),\n tooltip = \"Move forward in time\") %>% disabled(),\n tooltipInput(actionButton, ns(\"zoom_back\"), \"\", icon = icon(\"rotate-left\"),\n tooltip = \"Revert to previous view\") %>% disabled()\n ),\n column(width = 4, align = \"right\",\n tooltipInput(actionButton, ns(\"plot_refresh\"), NULL, icon = icon(\"refresh\"),\n tooltip = \"Refresh the plot with the selected filters and plot options.\") %>% disabled(),\n spaces(1),\n plotDownloadLink(ns(\"plot_download\"), label = NULL) %>% disabled(),\n spaces(1),\n dataDownloadLink(ns(\"data_download\"), label = NULL) %>% disabled()\n )\n )\n ),\n div(id = ns(\"data_plot_messages\"), h3(htmlOutput(ns(\"data_plot_message\")))),\n div(id = ns(\"data_plot_div\"),\n plotOutput(ns(\"data_plot\"), height = \"100%\",\n dblclick = ns(\"data_plot_dblclick\"),\n brush = brushOpts(\n id = ns(\"data_plot_brush\"),\n delayType = \"debounce\",\n direction = \"x\",\n resetOnNew = TRUE\n )) %>%\n withSpinner(type = 5, proxy.height = paste0(plot_height - 50, \"px\"))\n )\n )\n ),\n\n # traces box ----\n div(id = ns(\"traces_box\"),\n default_box(\n title = \"Data Traces\", width = 4,\n selectorTableUI(ns(\"traces_selector\")),\n footer = div(\n tooltipInput(actionButton, ns(\"traces_refresh\"), label = \"Re-plot\", icon = icon(\"refresh\"),\n tooltip = \"Refresh plot with new data trace selection.\"),\n spaces(1),\n selectorTableButtons(ns(\"traces_selector\"))\n )\n )\n ) %>% hidden(),\n\n # options box -----\n div(id = ns(\"options_box\"),\n default_box(\n title = \"Plot Options\", width = 4,\n fluidRow(\n h4(\"Errors:\") %>% column(width = 4),\n checkboxInput(ns(\"show_errors\"), NULL, value = FALSE) %>%\n column(width = 2),\n h4(\"Outliers:\") %>% column(width = 4),\n checkboxInput(ns(\"show_outliers\"), NULL, value = TRUE) %>%\n column(width = 2)\n ),\n fluidRow(\n h4(\"Device Info:\") %>% column(width = 4),\n checkboxInput(ns(\"show_device_info\"), NULL, value = FALSE) %>%\n column(width = 2),\n h4(\"Overlay Exps:\") %>% column(width = 4),\n checkboxInput(ns(\"overlay_exps\"), NULL, value = FALSE) %>%\n column(width = 2)\n ),\n fluidRow(\n h4(\"Time axis:\") %>% column(width = 4),\n radioButtons(ns(\"time_axis\"), NULL, choices = c(\"date & time\", \"duration\"), selected = \"date & time\", inline = TRUE) %>% column(width = 8)\n ),\n fluidRow(\n h4(\"Time intervals:\") %>% column(width = 4),\n numericInput(ns(\"time_intervals_number\"), NULL, value = NA, min = 1, step = 1) %>% column(width = 3),\n selectInput(ns(\"time_intervals_unit\"), NULL, choices = c(\"default\", \"mins\", \"hours\", \"days\"), selected = \"default\") %>% column(width = 5)\n ),\n fluidRow(\n h4(\"Plot height:\") %>% column(width = 4),\n numericInput(ns(\"plot_height\"), NULL, value = plot_height, min = 100, step = 50) %>%\n column(width = 8)),\n fluidRow(\n h4(\"Legend:\") %>% column(width = 4),\n selectInput(ns(\"legend_position\"), NULL, choices = c(\"right\", \"bottom\", \"hide\"), selected = \"right\") %>% column(width = 8)\n ),\n fluidRow(\n h4(\"Font Size:\") %>% column(width = 4),\n numericInput(ns(\"font_size\"), NULL, value = 18, min = 6, step = 1) %>%\n column(width = 8)\n ),\n footer = tooltipInput(actionButton, ns(\"options_refresh\"), label = \"Re-plot\",\n icon = icon(\"refresh\"),\n tooltip = \"Refresh plot with new plot settings.\") %>% disabled()\n )\n ) %>% hidden(),\n\n # summary box -----\n div(id = ns(\"summary_box\"),\n default_box(\n title = \"Summary of Plotted Data\", width = 12,\n tooltipInput(numericInput, ns(\"digits\"), label = NULL, value = 2, step = 1, tooltip = \"Enter number of digits to display.\"),\n tableOutput(ns(\"summary_table\"))\n )\n ) %>% hidden(),\n\n # data box ----\n\n div(id = ns(\"data_box\"),\n default_box(\n title = \"All Plotted Data\", width = 12,\n DT::dataTableOutput(ns(\"data_table\"))\n )\n ) %>% hidden()\n\n )\n\n}\n"
},
{
"alpha_fraction": 0.6900129914283752,
"alphanum_fraction": 0.6900129914283752,
"avg_line_length": 46.20408248901367,
"blob_id": "8f448fdce5c7918bd6c4a49e1fecaafd9d58b3a9",
"content_id": "5cc864780ab9e626c71ca3ede46a9ef213cede7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2313,
"license_type": "no_license",
"max_line_length": 512,
"num_lines": 49,
"path": "/R/app_setup.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Setup the user interface app\n#'\n#' Sets up the necessary files for launching the user interface in the target folder. Keeps sensitive authentication information separate from the app launch script and allows for easy running of the gui via \\code{\\link[shiny]{runApp}} or deployment as a server application (e.g. on shinyapps.io). IMPORTANT: never add the created \\code{credentials.R} file to any publically accessible shared folder or github repository - the database and particle account information you have stored there could be compromised!\n#' @param dir the directory where to create the launch scripts (absolute path or relative to your current working directory)\n#' @param overwrite whether to overwrite existing launch files (default is FALSE)\n#' @export\nll_setup_gui <- function(dir, overwrite = FALSE) {\n\n if(!dir.exists(dir)) dir.create(dir, recursive = TRUE)\n\n glue::glue(\"Info: setting up lablogger launch files in folder '{dir}'...\") %>% message()\n\n # copy app.R template file\n message(\" - creating 'app.R' launch script... \", appendLF = FALSE)\n file_exists <- file.exists(file.path(dir, \"app.R\"))\n if (!file_exists || overwrite) {\n file.copy(\n from = system.file(package = \"lablogger\", \"templates\", \"app.R\"),\n to = file.path(dir, \"app.R\"),\n overwrite = TRUE\n )\n }\n glue::glue(\n if (file_exists && overwrite) \"existing file overwritten.\"\n else if (file_exists) \"failed (file already exists, use overwrite = TRUE)\"\n else \"complete\"\n ) %>% message()\n\n # copy credentials.R template file\n message(\" - creating 'credentials.R' authentication file... \", appendLF = FALSE)\n file_exists <- file.exists(file.path(dir, \"credentials.R\"))\n if (!file_exists || overwrite) {\n file.copy(\n from = system.file(package = \"lablogger\", \"templates\", \"credentials.R\"),\n to = file.path(dir, \"credentials.R\"),\n overwrite = TRUE\n )\n }\n glue::glue(\n if (file_exists && overwrite) \"existing file overwritten.\"\n else if (file_exists) \"failed (file already exists, use overwrite = TRUE)\"\n else \"complete\"\n ) %>% message()\n\n # add to gitignore\n message(\" - adding authentication file to .gitignore... \", appendLF = FALSE)\n cat(\"# exclude credentials\\ncredentials.R\\n\", file = file.path(dir, \".gitignore\"), append = TRUE)\n message(\"complete.\")\n}\n"
},
{
"alpha_fraction": 0.6491745114326477,
"alphanum_fraction": 0.6491745114326477,
"avg_line_length": 47.42856979370117,
"blob_id": "76e9e50ab40fe50665466c8b39bcc34f9f279c86",
"content_id": "ed4091690aa54bc8da13ac07f488652462908269",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1696,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 35,
"path": "/R/app_run.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Run the user interface\n#'\n#' This function runs the user interface for the lab logger.\n#'\n#' @param group_id which group to run for\n#' @param access_token access token for particle account\n#' @param pool ideally database connection pool, see \\link[pool]{dbPool} but can also be a single db connection (not recommended)\n#' @param password which password to require for login. If NULL, login will be automatic. (NOTE: maybe manage by data base at some point?)\n#' @param ... passed on to the \\code{\\link[shiny]{runApp}} call (only if \\code{launch = TRUE}), can include server-specific parameters such as host or port\n#' @param launch whether to launch the app (TRUE) or return a shiny app object (FALSE) that then can be launched via \\code{\\link[shiny]{runApp}}\n#' (note: if \\code{launch=FALSE}, \\code{...} gets ignored)\n#' @inheritParams app_ui\n#' @export\nll_run_gui <- function(group_id, access_token, pool, timezone = Sys.timezone(), app_pwd = NULL, app_title = group_id, app_color = \"red\", ..., launch = FALSE) {\n\n glue(\"\\n\\n***************************************************************\",\n \"\\nINFO: Launching lab logger GUI for group '{group_id}' in timezone {timezone}...\",\n \"{if (default(debug)) {'\\nINFO: debug mode ON'} else {''}}\") %>%\n message()\n\n # make sure shinyBS on attach runs\n shinyBS:::.onAttach()\n\n # generate app\n app <- shinyApp(\n ui = app_ui(app_title = app_title, app_color = app_color, timezone = timezone),\n server = app_server(group_id = group_id, access_token = access_token, pool = pool, app_pwd = app_pwd, timezone = timezone)\n )\n\n # launch or return\n if (launch)\n runApp(app, display.mode = \"normal\", ...)\n else\n return(app)\n}\n\n"
},
{
"alpha_fraction": 0.5369359254837036,
"alphanum_fraction": 0.5507329702377319,
"avg_line_length": 37.230770111083984,
"blob_id": "265221f8ad7384961a2fa8b65cc8d40a746f0bb4",
"content_id": "70ffafd04c66680fb54cd21fe48a99080bc82b29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3479,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 91,
"path": "/R/app_ui.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Lab Logger UI\n#'\n#' Generates the user interface part of the isoviewer app\n#'\n#' @param app_title the title of the application\n#' @param app_color the dashboard color, see \\link[shinydashboard]{dashboardPage} skin for available options\napp_ui <- function(app_title = \"Lab Logger\", app_color = \"red\", timezone = NULL) {\n\n #box_default <- \"#222d32\" # darker\n box_default <- \"#2c3b41\" # ligther\n\n # set spinner color\n options(spinner.color = app_color)\n\n dashboardPage(\n # SKIN ----\n skin = app_color,\n\n # HEADER ----\n dashboardHeader(title = app_title, titleWidth = 150),\n\n # SIDEBAR ---\n dashboardSidebar(\n\n width = 150,\n sidebarMenu(\n id = \"menu\",\n h5(a(\"Lab Logger\", href = \"https://github.com/KopfLab/lablogger\", target = \"_blank\"),\n as.character(packageVersion(\"lablogger\")), align = \"center\"),\n if (!is.null(timezone)) h5(timezone, align = \"center\"),\n \"login\" %>% menuItem(\"Login\", tabName = ., icon = icon(\"log-in\", lib = \"glyphicon\"), selected = TRUE),\n \"experiments\" %>% menuItem(\"Experiments\", tabName = ., icon = icon(\"flask\")),\n \"devices\" %>% menuItem(\"Devices\", tabName = ., icon = icon(\"cogs\")),\n \"data\" %>% menuItem(\"All Data\", tabName = ., icon = icon(\"line-chart\")),\n \"live\" %>% menuItem(\"Webcams\", tabName = ., icon = icon(\"camera\")),\n tags$li(a(uiOutput(\"help\", inline = TRUE)))\n ),\n\n # HEADER ----\n tags$head(\n tags$style(\n type=\"text/css\",\n HTML(str_c(\n # error validation output\n #\".shiny-output-error-validation { color: red; font-size: 16px; }\", # do we want this red?\n \".shiny-output-error-info { color: black; font-size: 20px; padding: 20px; }\",\n # adjust sidebar height\n #\".sidebar {height: 2000px}\", # FIXME: make this dynamically long enough\n # body top padding\n \".box-body {padding-top: 5px; padding-bottom: 0px}\",\n # pads on shiny items\n \".form-group, .selectize-control {margin-bottom: 0px;}\",\n # custom background box\n str_interp(\".box.box-solid.box-info>.box-header{color:#fff; background: ${col}; background-color: ${col};}\", list(col = box_default)),\n str_interp(\".box.box-solid.box-info{border:1px solid ${col};}\", list(col = box_default)),\n sep=\"\\n\"))\n )\n ),\n\n # USE SHINY JS AND EXTENSIONS ---\n useShinyjs()\n\n ),\n\n # BODY ====\n dashboardBody(\n\n div(class = \"row\",\n tabItems(\n # login ====\n tabItem(\"login\", loginUI(\"login\", title = app_title)),\n\n # all other tabs ====\n tabItem(\"data\", div(id = \"data-panel\", column(width = 12, uiOutput(\"data\") %>% withSpinner(type = 5, proxy.height = \"450px\")))),\n tabItem(\"devices\", div(id = \"devices-panel\", column(width = 12, uiOutput(\"devices\")))),\n tabItem(\"experiments\", div(id = \"experiments-panel\", column(width = 12, uiOutput(\"experiments\")))),\n tabItem(\"live\", div(id = \"live-panel\", column(width = 12, uiOutput(\"live\"))))\n ## old live\n # h2(\n # actionLink(\"refresh_cams\", \"Reload cameras\", icon = icon(\"gear\")),\n # bsTooltip(\"refresh_cams\", \"Reload the cameras\"),\n # align = \"center\"\n # ),\n # uiOutput(\"raspicams\") %>% withSpinner(type = 5, proxy.height = \"480px\")\n\n )\n )\n )\n )\n\n}\n"
},
{
"alpha_fraction": 0.6382592916488647,
"alphanum_fraction": 0.6391659379005432,
"avg_line_length": 34.015872955322266,
"blob_id": "62537b33b3696115a4c82d17f896a503f975585b",
"content_id": "81ede0e32548e12ff7f725c305afcc2c3ba1783f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2206,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 63,
"path": "/R/app_utils.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "`%then%` <- shiny:::`%OR%`\n\n# display module message\n# @param type if this is an info meessage or debug (debug only shows if in debug mode)\nmodule_message <- function(ns, type = c(\"info\", \"debug\"), ...) {\n if (type == \"debug\" && !default(debug)) return()\n prefix <- if(type == \"info\") \"INFO: \" else if (type == \"debug\") \"DEBUG: \" else stop(\"don't know message type\", type)\n prefix <- paste(as.character(Sys.time()), prefix)\n cat(file=stderr(), prefix, ..., \" (NS: \", ns(NULL),\")\\n\", sep = \"\")\n}\n\n# convenience function for adding spaces (not the most elegant way but works)\nspaces <- function(n) {\n HTML(rep(\" \", n))\n}\n\n# convenience function for adding input with tooltip with default parameters\ntooltipInput <- function(input, inputId, ..., tooltip = NULL, hidden = FALSE, disabled = FALSE) {\n input_tag <- do.call(input, args = c(list(inputId = inputId), list(...)))\n if (hidden) input_tag <- shinyjs::hidden(input_tag)\n if (disabled) input_tag <- shinyjs::disabled(input_tag)\n tagList(\n input_tag,\n if (!is.null(tooltip)) bsTooltip(inputId, tooltip)\n )\n}\n\n# convenience function for adding output with tooltip with default parameters\ntooltipOutput <- function(input, outputId, ..., tooltip = NULL) {\n tagList(\n do.call(input, args = c(list(outputId = outputId), list(...))),\n if (!is.null(tooltip)) bsTooltip(outputId, tooltip)\n )\n}\n\n# default box\ndefault_box <- function(..., status = \"info\", solidHeader = TRUE, collapsible = TRUE) {\n box(..., status = status, solidHeader = solidHeader, collapsible = collapsible)\n}\n\n# success modal\nsuccess_modal <- function(..., title = \"Success\", show = TRUE) {\n modal <- modalDialog(\n title = h3(title, align = \"center\"),\n fade = FALSE, easyClose = TRUE, size = \"m\",\n span(...),\n footer = modalButton(\"Close\")\n )\n if (show) showModal(modal)\n else return(modal)\n}\n\n# error modal\nerror_modal <- function(..., title = \"A problem occurred\", show = TRUE) {\n modal <- modalDialog(\n title = h3(title, align = \"center\", style = \"color: red;\"),\n fade = FALSE, easyClose = TRUE, size = \"m\",\n span(..., style = \"color: red;\"),\n footer = modalButton(\"Close\")\n )\n if (show) showModal(modal)\n else return(modal)\n}\n"
},
{
"alpha_fraction": 0.6410923004150391,
"alphanum_fraction": 0.6430429220199585,
"avg_line_length": 35.619049072265625,
"blob_id": "af7da5ef13b8b5ae33780d03d675855e8d8e941d",
"content_id": "c7be6456b331ec75d1461634544e427f5d016474",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1538,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 42,
"path": "/R/app_module_plot_download.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Plot Download Server\n#' @param plot_func reactive function generating the plot\n#' @param filename_func reactive function returning the default plot name\nplotDownloadServer <- function(input, output, session, plot_func, filename_func) {\n\n # namespace\n ns <- session$ns\n\n # save dialog\n save_dialog <- reactive({\n modalDialog(\n title = \"Save plot\", fade = FALSE, easyClose = TRUE, size = \"s\",\n textInput(ns(\"save_name\"), \"Filename:\", filename_func()),\n numericInput(ns(\"save_width\"), \"Width [inches]:\", 12),\n numericInput(ns(\"save_height\"), \"Height [inches]:\", 8),\n\n footer =\n tagList(\n downloadButton(ns(\"download\"), label = \"Download\", icon = icon(\"download\")),\n modalButton(\"Close\")\n )\n )})\n observeEvent(input$download_dialog, showModal(save_dialog()))\n\n # download handler\n output$download <- downloadHandler(\n filename = function() { isolate(input$save_name) },\n content = function(filename) {\n module_message(ns, \"debug\", \"saving plot \", input$save_name, \" (\", input$save_width, \" by \", input$save_height, \")\")\n ggsave(file = filename, plot = plot_func(), width = isolate(input$save_width), height = isolate(input$save_height), device = \"pdf\")\n })\n\n}\n\n\n#' Plot Download Link\n#' @param label Label for the download link\nplotDownloadLink <- function(id, label = \"Save\", tooltip = \"Save the plot as a PDF\") {\n ns <- NS(id)\n tooltipInput(actionButton, ns(\"download_dialog\"), label, icon = icon(\"file-pdf\"),\n tooltip = tooltip)\n}\n"
},
{
"alpha_fraction": 0.6664456129074097,
"alphanum_fraction": 0.6690981388092041,
"avg_line_length": 26.925926208496094,
"blob_id": "11c0169c5178d873298e16e19e82dca48d4ca5a6",
"content_id": "f12cb4d8951a4fd5cd218d6d943442928e6d2442",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1508,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 54,
"path": "/R/app_module_experiment_selector.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# FIXME: replace data_manager with get_experiments, refresh_experiments, etc. functions\n# this is the multiple experiment selector\n# single experiment selector is implemented directly in app_module_experiment_overview\nexperimentSelectorServer <- function(input, output, session, data_manager) {\n\n # namespace\n ns <- session$ns\n\n # selector\n selector <- callModule(\n selectorTableServer, \"selector\",\n id_column = \"exp_id\",\n column_select = c(Description = exp_desc, Recording = recording)\n )\n\n # update data\n observe({\n req(df <- data_manager$get_experiments())\n isolate({\n if (nrow(df) > 0) {\n df <- select(df, exp_id, exp_desc, recording) %>%\n mutate(recording = ifelse(recording, \"yes\", \"no\"))\n selector$set_table(df)\n }\n })\n })\n\n # update selected\n observe({\n selected_exps <- data_manager$get_selected_experiments()\n selector$set_selected(selected_exps)\n })\n\n # trigger refresh\n observeEvent(input$experiment_refresh, data_manager$refresh_experiments())\n\n # trigger select\n observe(data_manager$select_experiments(selector$get_selected()))\n\n}\n\n\nexperimentSelectorUI <- function(id, width = 12) {\n ns <- NS(id)\n default_box(\n title = \"Experiments\", width = width,\n selectorTableUI(ns(\"selector\")),\n footer = div(\n tooltipInput(actionButton, ns(\"experiment_refresh\"), label = \"Refresh\", icon = icon(\"refresh\"), tooltip = \"Refresh experiments.\"),\n spaces(1),\n selectorTableButtons(ns(\"selector\"))\n )\n )\n}\n"
},
{
"alpha_fraction": 0.7097744345664978,
"alphanum_fraction": 0.7097744345664978,
"avg_line_length": 46.5,
"blob_id": "81611b6fc1bd8b0eb525c3c5ae3c311343a9e04f",
"content_id": "4733553447a252978f8d9667bdb2ecf3ac0f38ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 665,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 14,
"path": "/inst/templates/app.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "source('credentials.R')\nmessage('INFO: Connection to database... ', appendLF = FALSE)\npool <- pool::dbPool(drv = RPostgreSQL::PostgreSQL(), host = db_host, dbname = db_name, user = db_user, password = db_pwd)\nmessage('successful.')\nshiny::onStop(function() { pool::poolClose(pool) })\nlablogger::ll_run_gui(\n group_id = group_id, # from credentials\n access_token = access_token, # from credentials\n pool = pool, # from credentials\n timezone = timezone, # from credentials\n app_pwd = app_pwd, # from credentials\n app_title = group_id, # change to customize application title\n app_color = \"red\" # for options, see skin in ?shinydashboard::dashboardPage\n)\n"
},
{
"alpha_fraction": 0.6059757471084595,
"alphanum_fraction": 0.6115779876708984,
"avg_line_length": 32.81052780151367,
"blob_id": "e8d61981952da380306c93fa83e6b45c55bb859d",
"content_id": "b682d87607bbd89276383ad5fe2e410b25de2b9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 3213,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 95,
"path": "/R/app_module_device_commands.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "deviceCommandsServer <- function(input, output, session, get_devices, access_token) {\n\n # namespace\n ns <- session$ns\n\n # values\n values <- reactiveValues(\n selected_devices = data_frame()\n )\n\n # trigger control dialog\n observeEvent(input$device_control, showModal(control_dialog()))\n\n # dialog\n control_dialog <- reactive({\n modalDialog(\n title = h3(\"Send a command to one or multiple devices\", align = \"center\"),\n fade = FALSE, easyClose = TRUE, size = \"l\",\n fluidRow(\n column(6, selectorTableUI(ns(\"devices\"))),\n column(6, h4(\"Selected Devices\"), tableOutput(ns(\"selected_devices\")))\n ),\n fluidRow(\n column(6,\n h4(\"Command\"),\n h5(\"For a full list of available commands, follow \", a(\"this link\", href=\"https://github.com/KopfLab/labware_commands#devicecontroller-commands-cmd-options\", target = \"_blank\"), \".\"),\n textInput(ns(\"command\"), NULL)\n ),\n column(6,\n h4(\"Message for State Log\"),\n h5(\"To add an optional custom note to the devices' state log.\"),\n textInput(ns(\"message\"), NULL)\n )\n ),\n h4(\"CHECK: are you absolutely sure you want to send this command to the selected devices?\", style = \"color: red;\", align = \"center\"),\n footer =\n tagList(\n tooltipInput(actionButton, ns(\"send_command\"),\n label = \"Send\", icon = icon(\"paper-plane\"),\n tooltip = \"Send command.\",\n disabled = TRUE),\n modalButton(\"Close\")\n )\n )})\n\n # selection table\n selector <- callModule(\n selectorTableServer, \"devices\",\n id_column = \"device_id\",\n column_select = c(Name = device_name, Type = device_type_desc),\n initial_page_length = 10\n )\n\n # update devices table\n observe({\n req(df <- get_devices())\n if (nrow(df) > 0) selector$set_table(df)\n })\n\n # react to device selection\n observe({\n values$selected_devices <- selector$get_selected_items()\n shinyjs::toggleState(\"send_command\", condition = nrow(values$selected_devices) > 0)\n })\n\n # show summary table of selected devices\n output$selected_devices = renderTable({\n validate(need(nrow(values$selected_devices) > 0, \"None\"))\n if (\"return_message\" %in% names(values$selected_devices))\n select(values$selected_devices, Name = device_name, `Command Message` = return_message)\n else\n select(values$selected_devices, Name = device_name)\n })\n\n # send command\n observeEvent(input$send_command, {\n module_message(ns, \"debug\", \"sending device command '\", input$command, \"'...\")\n values$selected_devices <-\n withProgress(\n message = 'Sending command', detail = \"Contacting device cloud...\", value = 0.5,\n values$selected_devices %>%\n ll_send_devices_command(\n command = input$command,\n message = input$message,\n access_token = access_token\n )\n )\n })\n}\n\n# Device Control Button\ndeviceControlButton <- function(id, label = \"Control\", tooltip = \"Send device commands.\") {\n ns <- NS(id)\n tooltipInput(actionButton, ns(\"device_control\"), label = label, icon = icon(\"gamepad\"), tooltip = tooltip)\n}\n\n"
},
{
"alpha_fraction": 0.7770618796348572,
"alphanum_fraction": 0.780283510684967,
"avg_line_length": 52.517242431640625,
"blob_id": "8ef49c53199786e4299a0c6d41572a100aba1d1f",
"content_id": "e05dc48f314cbd5aabadf75aeb789cfa85c83fb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1552,
"license_type": "no_license",
"max_line_length": 223,
"num_lines": 29,
"path": "/R/package.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' @keywords internal\n\"_PACKAGE\"\n\n#' @importFrom rlang quo quos enquo !! !!! is_quosure quo_squash quo_text quo_is_null quo_is_symbol quo_is_call lang_head sym syms eval_tidy parse_expr\n#' @importFrom curl new_handle curl_fetch_memory handle_setopt\n#' @importFrom jsonlite fromJSON toJSON\n#' @importFrom glue glue collapse\n#' @importFrom tibble deframe tibble\n#' @importFrom dplyr select rename filter as_data_frame %>% tbl collect mutate data_frame everything starts_with ends_with left_join full_join semi_join arrange group_by ungroup do summarize bind_rows between desc case_when\n#' @importFrom tidyr gather nest unnest spread\n#' @importFrom broom tidy glance\n#' @importFrom ggplot2 ggplot aes geom_point geom_line geom_ribbon theme_bw theme_void facet_grid scale_x_datetime labs annotate %+% ggsave theme element_text\n#' @importFrom lubridate with_tz now ymd_hms as.duration as_datetime\n#' @importFrom purrr map map_lgl map_chr map_dbl map_int map2 map2_chr walk walk2 map2_int\n#' @importFrom stringr str_c str_replace fixed str_interp str_to_lower str_detect str_extract\n#' @importFrom readr write_rds read_rds\n#' @importFrom DBI dbExecute\n#' @importFrom pool dbPool\n#' @importFrom RPostgreSQL PostgreSQL\n#' @import shiny\n#' @import shinydashboard\n#' @importFrom shinyjs useShinyjs hidden show hide toggle disabled toggleState\n#' @importFrom shinyAce aceEditor\n#' @importFrom shinyBS bsTooltip\n#' @importFrom shinycssloaders withSpinner\n#' @importFrom stats setNames cor.test lm\n#' @importFrom utils packageVersion\n#' @importFrom methods is\nNULL\n"
},
{
"alpha_fraction": 0.6419991254806519,
"alphanum_fraction": 0.6441441178321838,
"avg_line_length": 38.17647171020508,
"blob_id": "377d30f6e87378be2a53a21ce8de379384fb17f6",
"content_id": "8f234147f99d4477bc21c7da0be526ca3cf03fc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4662,
"license_type": "no_license",
"max_line_length": 377,
"num_lines": 119,
"path": "/R/file_write.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Save device data logs\n#'\n#' Write device data logs to an .rds, .xlsx (excel), and/or .csv file and optionally compress them into a zip archive. Note that target files will be overwritten if they already exist.\n#'\n#' @param device_data_logs data logs to save\n#' @param file_path path to the .rds, .xlsx and/or .csv files (can be multiple). All folders must already exist.\n#' @param generate_rmd whether to generate an RMarkdown file for analysis. If both .rds and .xlsx file paths are provided, reads from the one listed first. Use \\code{generate_rmd = TRUE} to have the function infer the Rmd file name from the .rds/.xlsx file path, or alternatively provide a file path with a specific name for the Rmd file (\\code{generate_rmd = file.path(...)}).\n#' @param zip whether to zip the resulting file(s). Use \\code{zip = TRUE} to have the function infer the zip file name from the .rds/.xlsx file path, or alternatively provide a file path with a specific name for the zip file (\\code{zip = file.path(...)}).\n#' @return returns the logs invisibly for piping\n#' @family data logs functions\n#' @export\nll_write_device_data_logs_to_file <- function(device_data_logs, file_path, zip = FALSE, quiet = default(quiet)) {\n\n # write log files\n files <- write_logs_to_file(device_data_logs, file_path)\n zip_file <- if (is.character(zip)) zip else paste0(files$base_path[1], \".zip\")\n\n # info message\n if (!quiet) {\n glue::glue(\n \"Info: writing device data logs\",\n if (zip != FALSE) \" into zip archive ('{basename(zip_file)}')\" else \"\",\n \":\\n - {paste0(basename(files$file_path), collapse = '\\n - ')}\"\n ) %>% message()\n }\n\n # zip file\n if (zip != FALSE) {\n zip_files(files$file_path, zip_file = zip_file, cleanup_after_compression = TRUE)\n }\n\n return(invisible(device_data_logs))\n}\n\n\n#' Save device state logs\n#' @export\nll_write_device_state_logs_to_file <- function(device_state_logs, ...) {\n stop(\"sorry, not implemented yet\", call. = FALSE)\n}\n\n# utils =====\n\n# convenience function to write logs to file, returns the files data frame\nwrite_logs_to_file <- function(logs, file_path) {\n\n # safety checks\n if (missing(logs) || !is.data.frame(logs)) stop(\"no data logs provided\", call. = FALSE)\n if (missing(file_path) || length(file_path) == 0) stop(\"no file paths provided\", call. = FALSE)\n\n # file info\n supported <- c(\"rds\", \"xlsx\", \"csv\")\n files <- tibble(\n file_path = file_path,\n ext = stringr::str_match(file_path, \"\\\\.(\\\\w+)$\")[,2],\n base_path = stringr::str_replace(file_path, sprintf(\"\\\\.%s$\", ext), \"\"),\n folder = dirname(file_path),\n dir_ok = dir.exists(folder),\n ext_ok = ext %in% supported\n )\n\n # check supported extensions\n if (!all(files$ext_ok)) {\n glue::glue(\n \"unknown file extension(s): {paste0(filter(files, !ext_ok)$ext, collapse = ', ') %>% unique()}\",\n \" (supported: {paste0(supported, collapse = ', ')})\") %>%\n stop(call. = FALSE)\n }\n\n # check folders\n if (!all(files$dir_ok)) {\n glue::glue(\n \"missing folder(s) - please make sure all directories already exist: \",\n \"{paste0(filter(files, !dir_ok)$folder %>% unique(), collapse = ', ')}\") %>%\n stop(call. = FALSE)\n }\n\n # save files\n logs_wo_lists <- remove_list_columns(logs)\n save_func <- function(file_path, ext) {\n if (ext == \"rds\") {\n readr::write_rds(logs, file_path)\n } else if (ext == \"xlsx\") {\n openxlsx::write.xlsx(logs_wo_lists, file_path)\n } else if (ext == \"csv\") {\n readr::write_csv(logs_wo_lists, file_path)\n } else {\n stop(\"shouldn't get here\", call. = FALSE)\n }\n return(TRUE)\n }\n\n mutate(files, saved = purrr::map2_lgl(file_path, ext, save_func))\n}\n\n# helper function to zip up files with a simple file path\n# @param cleanup_after_compression whether to remove the original files\nzip_files <- function(file_path, zip_file = paste0(tempfile(), \".zip\"), cleanup_after_compression = FALSE) {\n files_exist <- file.exists(file_path)\n if (any(!files_exist)) {\n glue::glue(\n \"some files do not exist \",\n \"({paste(file_path[!files_exist], collapse = ', ')}) \",\n \"and will be exluded from the zip archive\") %>%\n warning(immediate. = TRUE, call. = FALSE)\n }\n zip_files <- file_path[files_exist]\n zip_files_in_wd <- basename(zip_files)\n file.copy(from = zip_files, to = zip_files_in_wd)\n zip_file_in_wd <- basename(zip_file)\n zip::zip(zip_file_in_wd, files = zip_files_in_wd)\n file.copy(from = zip_file_in_wd, to = zip_file)\n if (cleanup_after_compression) {\n unlink(unique(c(zip_files, zip_files_in_wd, zip_file_in_wd)))\n } else {\n unlink(unique(c(zip_files_in_wd, zip_file_in_wd)))\n }\n return(zip_file)\n}\n"
},
{
"alpha_fraction": 0.6054258346557617,
"alphanum_fraction": 0.6090707778930664,
"avg_line_length": 33.844051361083984,
"blob_id": "831da2b9ca4ce10e600fefeda3abf50233765d8b",
"content_id": "b0e5732d895d9b8b0b98fda085293a4e3f7dd71c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 21674,
"license_type": "no_license",
"max_line_length": 429,
"num_lines": 622,
"path": "/R/app_module_experiment_manager.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "\nexperimentManagerServer <- function(input, output, session, dm_links, dm_experiments, dm_devices, dm_cloudinfo, dm_datalogs, timezone, access_token) {\n\n # namespace\n ns <- session$ns\n\n # reactive values\n values <- reactiveValues(\n in_edit_exp_device_links = NULL\n )\n\n # new experiment =====\n observeEvent(input$experiment_new, showModal(new_dialog()))\n new_dialog <- reactive({\n modalDialog(\n title = h3(\"Create new experiment\", align = \"center\"),\n fade = FALSE, easyClose = TRUE, size = \"m\",\n h4(\"New Experiment ID\"),\n h5(\"This is your experiment's unique identifier, best to keep it as short as possible (4-8 characters). Initials and an experiment number are a popular choice, for example 'ABC01'. \", span(\"Keep in mind that you will not be able to change this ID afterwards.\", style = \"color: red;\"), \"However, you will be able to add an experiment description and experiment notes that you can continue to change/update as you see fit.\"),\n textInput(ns(\"new_experiment_id\"), label = NULL),\n footer =\n tagList(\n actionButton(ns(\"create_experiment\"), label = \"Create\", icon = icon(\"save\")) %>%\n shinyjs::disabled(),\n modalButton(\"Close\")\n )\n )\n })\n observe({ toggleState(\"create_experiment\", nchar(input$new_experiment_id) > 3) })\n observeEvent(input$create_experiment, {\n toggleState(\"create_experiment\", FALSE) # disable button\n new_id <- input$new_experiment_id\n module_message(ns, \"debug\", \"creating new experiment \", new_id)\n tryCatch(\n {\n dm_experiments$add_experiment(new_id)\n dm_experiments$refresh_experiments()\n load_experiment(new_id, loading_tab = \"info\")\n removeModal()\n },\n error = function(e) {\n removeModal()\n error_modal(\n title = glue::glue(\"Could not create experiment '{new_id}'\"),\n h4(e$message)\n )\n }\n )\n })\n\n # select experiment ====\n get_experiments_for_dropdown <- reactive({\n exps <- dm_experiments$get_experiments()\n if (nrow(exps) == 0) return (\"Choose an experiment\" = \"\")\n exps <- exps %>% mutate(label = sprintf(\"%s: %s\", exp_id, exp_desc))\n c(\"Choose an experiment\" = \"\",\n list(\n `Recording` = exps %>% filter(!archived, recording) %>% select(label, exp_id) %>% deframe(),\n `Not recording` = exps %>% filter(!archived, !recording) %>% select(label, exp_id) %>% deframe(),\n `Archived` = exps %>% filter(archived) %>% select(label, exp_id) %>% deframe()\n ))\n })\n\n # render dropdown\n output$experiment <- renderUI(selectInput(ns(\"experiment\"), label = NULL, choices = get_experiments_for_dropdown()))\n\n # trigger refresh\n observeEvent(input$experiment_refresh, {\n dm_experiments$refresh_experiments()\n load_links_table()\n })\n\n # update dropdown\n observe({\n updateSelectInput(session, \"experiment\", choices = get_experiments_for_dropdown(), selected = isolate(dm_experiments$get_loaded_experiment()))\n module_message(ns, \"debug\", \"updating experiments dropdown\")\n })\n\n # load experiment ===\n observeEvent(input$experiment, {\n req(input$experiment)\n if (is.null(dm_experiments$get_loaded_experiment()) || input$experiment != dm_experiments$get_loaded_experiment()) {\n load_experiment(input$experiment)\n }\n })\n\n load_experiment <- function(exp_id, loading_tab = NULL) {\n dm_experiments$load_experiment(exp_id)\n\n # archived\n if (dm_experiments$is_loaded_experiment_archived()) {\n\n # archived exp - FIXME: allow access to data here?\n hide(\"tabs\")\n show(\"archived_msg\")\n\n } else {\n\n # recording\n recording <- dm_experiments$is_loaded_experiment_recording()\n toggle(\"start_recording\", condition = !recording)\n toggle(\"stop_recording\", condition = recording)\n\n # info\n if (!is.na(dm_experiments$get_loaded_experiment_info()$exp_desc))\n updateTextInput(session, \"exp_desc\", value = dm_experiments$get_loaded_experiment_info()$exp_desc)\n else\n updateTextInput(session, \"exp_desc\", value = \"\", placeholder = \"Add a succinct description for this experiment.\")\n\n if (!is.na(dm_experiments$get_loaded_experiment_info()$exp_notes))\n updateTextInput(session, \"exp_notes\", value = dm_experiments$get_loaded_experiment_info()$exp_notes)\n else\n updateTextInput(session, \"exp_notes\", value = \"\", placeholder = \"Keep notes about this experiment.\")\n\n # reset cloud data\n dm_cloudinfo$reset_cloud_data()\n\n # select tab\n if (!is.null(loading_tab)) {\n module_message(ns, \"debug\", glue::glue(\"selecting exp. details tab '{loading_tab}'...\"))\n updateTabsetPanel(session, \"tabset\", selected = loading_tab)\n }\n\n # show tabs\n hide(\"archived_msg\")\n show(\"tabs\")\n }\n }\n\n # info tab\n output$exp_ID <- renderText({\n validate(need(dm_experiments$get_loaded_experiment(), \"no experiment selected\"))\n dm_experiments$get_loaded_experiment()\n })\n\n # save exp info ====\n\n observeEvent(input$save_info, {\n success <- dm_experiments$updated_loaded_experiment_info(exp_desc = input$exp_desc, exp_notes = input$exp_notes)\n if (success) {\n success_modal(\n h4(sprintf(\"Experiment '%s' was updated succesfully.\",\n dm_experiments$get_loaded_experiment()))\n )\n } else {\n error_modal(\n h4(sprintf(\"Something went wrong updating experiment '%s'.\",\n dm_experiments$get_loaded_experiment()))\n )\n }\n dm_experiments$refresh_experiments()\n })\n\n # start/stop recording ====\n\n observeEvent(input$start_recording, {\n result <- dm_experiments$start_experiment()\n if (result$success) {\n success_modal(\n h4(strong(sprintf(\"Experiment %s is now recording.\", dm_experiments$get_loaded_experiment()))),\n h4(\"Please note that only data from linked devices that have 'data-log' turned on will actually be recorded. Check the 'Live State' on the 'Devices' tab for an overview of your devices' status.\")\n )\n } else {\n error_modal(\n h4(sprintf(\"Experiment %s could not start recording.\", dm_experiments$get_loaded_experiment()))\n )\n }\n toggle(\"start_recording\", condition = FALSE)\n toggle(\"stop_recording\", condition = TRUE)\n })\n\n observeEvent(input$stop_recording, {\n result <- dm_experiments$stop_experiment()\n if (result$success) {\n success_modal(\n h4(sprintf(\"Experiment %s is now no longer recording.\", dm_experiments$get_loaded_experiment()))\n )\n } else {\n error_modal(\n h4(sprintf(\"Experiment %s could not stop recording.\", dm_experiments$get_loaded_experiment()))\n )\n }\n toggle(\"start_recording\", condition = TRUE)\n toggle(\"stop_recording\", condition = FALSE)\n })\n\n # experiment links =====\n\n # device links table\n device_links <- callModule(\n selectorTableServer, \"device_links\",\n id_column = \"..id..\", row_column = \"..row..\",\n initial_page_length = -1 # show All\n )\n\n # load experiment links table\n load_links_table <- function() {\n\n # retrieve links\n exp_links <- dm_experiments$get_loaded_experiment_device_links_all()\n\n # no links available\n if (is.null(exp_links) || nrow(exp_links) == 0) {\n module_message(ns, \"debug\", \"no device links available\")\n device_links$set_table(NULL)\n } else {\n\n if (dm_cloudinfo$has_cloud_data()) {\n # fetch cloud data\n module_message(ns, \"debug\", \"preparing device links data table with live cloud data\")\n data <- dm_cloudinfo$get_exp_devices_cloud_data()\n } else {\n # only links, no cloud data\n module_message(ns, \"debug\", \"preparing device links data table without cloud data\")\n data <-\n ll_summarize_cloud_data_experiment_links(\n cloud_data = tibble(), experiment_device_links = exp_links,\n linked = TRUE, unlinked = FALSE\n ) %>% mutate(error = \"please click <Fetch Data>\")\n }\n\n # format data\n data <- data %>%\n mutate(datetime = ifelse(!is.na(datetime), format(datetime), error)) %>%\n mutate(..row.. = row_number()) %>%\n select(\n ..id.., ..row..,\n Name = device_name, `Live data posted at` = datetime,\n `Exp IDs (recording)` = recording_exp_ids, `Exp IDs (not recording)` = non_recording_exp_ids,\n idx, key, value, units,\n raw_serial, raw_serial_errors\n )\n\n data <- devices_info$apply_live_data_table_options(data)\n device_links$set_table(data)\n }\n }\n\n # watch for new cloud data\n observe({\n\n })\n\n # enable/disable delete button\n observe({\n toggleState(\"device_links_delete\", length(device_links$get_selected()) > 0)\n })\n\n # add links =====\n observeEvent(input$device_links_add, showModal(add_dialog()))\n add_dialog <- reactive({\n modalDialog(\n title = h3(\"Add device links\", align = \"center\"),\n fade = FALSE, easyClose = TRUE, size = \"l\",\n div(id = ns(\"add_links_step1_div\"),\n h5(\n align = \"center\",\n tooltipInput(\n actionButton, ns(\"add_by_device\"),\n label = \"Add links by device\", icon = icon(\"cogs\"), tooltip = \"Select specific devices to link some or all of their data to the experiment.\"),\n tooltipInput(\n actionButton, ns(\"add_by_experiment\"),\n label = \"Copy links from experiment\", icon = icon(\"flask\"), tooltip = \"Copy some or all of the device data links from another experiment.\")\n ),\n div(id = ns(\"devices_div\"), selectorTableUI(ns(\"add_links_devices\"))) %>% shinyjs::hidden(),\n div(id = ns(\"experiments_div\"), selectorTableUI(ns(\"add_links_experiments\"))) %>% shinyjs::hidden()\n ),\n div(id = ns(\"add_links_step2_div\"),\n span(\"Select which links to add (only links not already part of your experiment are displayed).\", align = \"center\"),\n selectorTableUI(ns(\"add_links_links\"))\n ) %>% shinyjs::hidden(),\n footer =\n tagList(\n actionButton(ns(\"add_by_device_next\"), label = \"Next\", icon = icon(\"forward\")) %>%\n shinyjs::hidden() %>% shinyjs::disabled(),\n actionButton(ns(\"add_by_experiment_next\"), label = \"Next\", icon = icon(\"forward\")) %>%\n shinyjs::hidden() %>% shinyjs::disabled(),\n actionButton(ns(\"add_links_save\"), label = \"Add Links\", icon = icon(\"save\"))%>%\n shinyjs::hidden() %>% shinyjs::disabled(),\n modalButton(\"Close\")\n )\n )\n })\n observeEvent(input$add_by_device, {\n # make sure to initialize devices\n dm_devices$init_devices()\n shinyjs::hide(\"experiments_div\")\n shinyjs::hide(\"add_by_experiment_next\")\n shinyjs::show(\"devices_div\")\n shinyjs::show(\"add_by_device_next\")\n })\n observeEvent(input$add_by_experiment, {\n shinyjs::hide(\"devices_div\")\n shinyjs::hide(\"add_by_device_next\")\n shinyjs::show(\"experiments_div\")\n shinyjs::show(\"add_by_experiment_next\")\n })\n observe({\n toggleState(\"add_by_device_next\", length(add_links_devices$get_selected()) > 0)\n toggleState(\"add_by_experiment_next\", length(add_links_experiments$get_selected()) > 0)\n })\n\n # add links - device selection\n add_links_devices <- callModule(\n selectorTableServer, \"add_links_devices\",\n id_column = \"device_id\",\n column_select = c(Name = device_name, Type = device_type_desc),\n initial_page_length = 5\n )\n observe({\n req(df <- dm_devices$get_devices())\n req(dm_experiments$get_loaded_experiment())\n if (nrow(df) > 0) add_links_devices$set_table(df)\n })\n\n # add links - experiment selection\n add_links_experiments <- callModule(\n selectorTableServer, \"add_links_experiments\",\n id_column = \"exp_id\",\n column_select = c(Description = exp_desc, Recording = recording),\n initial_page_length = 5\n )\n observe({\n req(df <- dm_experiments$get_experiments())\n req(dm_experiments$get_loaded_experiment())\n if (nrow(df) > 0) {\n df <- df %>%\n dplyr::mutate(recording = ifelse(recording, \"yes\", \"no\")) %>%\n dplyr::filter(exp_id != dm_experiments$get_loaded_experiment())\n add_links_experiments$set_table(df)\n }\n })\n\n # select links to add =====\n add_links_links <- callModule(\n selectorTableServer, \"add_links_links\",\n id_column = \"..id..\", row_column = \"..row..\",\n initial_page_length = 10\n )\n set_add_links_table <- function(links_cloud_data) {\n # only links that are not part of the experiment yet\n exp_links <- dm_experiments$get_loaded_experiment_device_links()\n if (nrow(exp_links) > 0) {\n links_cloud_data <- links_cloud_data %>%\n filter(!..id.. %in% exp_links$..id..)\n }\n\n # table data\n table_data <- links_cloud_data %>%\n mutate(datetime = ifelse(!is.na(datetime), format(datetime), error)) %>%\n mutate(..row.. = row_number()) %>%\n select(\n ..id.., ..row..,\n Name = device_name, `Live data posted at` = datetime,\n `Exp IDs (recording)` = recording_exp_ids, `Exp IDs (not recording)` = non_recording_exp_ids,\n idx, key, value, units\n )\n add_links_links$set_table(table_data)\n }\n\n # links by device ======\n observeEvent(input$add_by_device_next, {\n # relevant device links\n links <- dm_links$get_links() %>%\n filter(device_id %in% add_links_devices$get_selected())\n\n # fetch cloud data\n links_cloud_data <-\n dm_cloudinfo$get_cloud_data(\n devices = add_links_devices$get_selected_items(),\n links = links,\n linked = TRUE,\n unlinked = TRUE\n )\n\n # set data\n set_add_links_table(links_cloud_data)\n\n # hide/show divs\n shinyjs::hide(\"add_links_step1_div\")\n shinyjs::hide(\"add_by_device_next\")\n shinyjs::show(\"add_links_step2_div\")\n shinyjs::show(\"add_links_save\")\n })\n\n # links by experiment ====\n observeEvent(input$add_by_experiment_next, {\n\n # relevant device links\n links <- dm_links$get_links() %>%\n filter(exp_id %in% add_links_experiments$get_selected())\n\n # devices\n devices <- links %>%\n select(device_id, device_name, particle_id, device_type_desc) %>%\n unique()\n\n # fetch cloud data\n links_cloud_data <-\n dm_cloudinfo$get_cloud_data(\n devices = devices,\n links = links,\n linked = TRUE,\n unlinked = FALSE\n )\n\n # set data\n set_add_links_table(links_cloud_data)\n\n # hide/show divs\n shinyjs::hide(\"add_links_step1_div\")\n shinyjs::hide(\"add_by_experiment_next\")\n shinyjs::show(\"add_links_step2_div\")\n shinyjs::show(\"add_links_save\")\n })\n\n # FIXME: easier to get the device_id and idx elsewhere?\n # parsing seems a bit silly as a solution\n parse_link_id <- function(value) {\n matches <- stringr::str_match(value, \"(\\\\d+)_(\\\\d+|NA)\")\n tibble(\n device_id = readr::parse_number(matches[,2]),\n data_idx = readr::parse_number(matches[,3])\n )\n }\n\n observe({\n link_ids <- parse_link_id(add_links_links$get_selected())\n shinyjs::toggleState(\"add_links_save\", length(na.omit(link_ids$data_idx)) > 0)\n })\n\n # save new links ======\n observeEvent(input$add_links_save, {\n add_links <- parse_link_id(add_links_links$get_selected())\n\n module_message(\n ns, \"debug\", \"adding device links \",\n with(add_links, paste0(device_id, \" #\", data_idx) %>% paste(collapse = \", \")),\n \"...\"\n )\n dm_experiments$add_experiment_device_links(add_links)\n\n # close modal\n removeModal()\n # refresh device links\n dm_links$refresh_links()\n device_links$set_selected(c())\n dm_cloudinfo$reset_cloud_data()\n })\n\n # delete links ====\n observeEvent(input$device_links_delete, showModal(delete_dialog()))\n delete_dialog <- reactive({\n modalDialog(\n title = h3(\"Delete device links\", align = \"center\"),\n fade = FALSE, easyClose = TRUE, size = \"m\",\n tableOutput(ns(\"selected_links\")),\n h4(\"Are you sure that you want to delete these device links? They will no longer record information as part of this experiment.\",\n style = \"color: red;\", align = \"center\"),\n footer =\n tagList(\n tooltipInput(\n actionButton, ns(\"device_links_delete_confirm\"),\n label = \"Delete\", icon = icon(\"trash\"), tooltip = \"Delete these links.\"),\n modalButton(\"Close\")\n )\n )\n })\n output$selected_links = renderTable({\n validate(need(length(device_links$get_selected()) > 0, \"None selected.\"))\n device_links$get_selected_items() %>% select(-..id.., -..row..)\n })\n\n # delete confirmation\n observeEvent(input$device_links_delete_confirm, {\n\n to_delete <-\n filter(\n dm_experiments$get_loaded_experiment_device_links(),\n ..id.. %in% device_links$get_selected()\n )$exp_device_data_id\n\n # FIXME: immplement delete\n module_message(ns, \"debug\", \"deleting device links \", paste(to_delete, collapse = \", \"), \"...\")\n dm_experiments$delete_experiment_device_links(to_delete)\n\n # close modal\n removeModal()\n # refresh device links\n dm_links$refresh_links()\n device_links$set_selected(c())\n dm_cloudinfo$reset_cloud_data()\n })\n\n # device control / commands =====\n control <- callModule(deviceCommandsServer, \"control\", get_devices = dm_experiments$get_loaded_experiment_devices, access_token = access_token)\n\n # devices info =====\n devices_info <- callModule(\n deviceInfoServer, \"devices_info\",\n get_cloud_state = dm_cloudinfo$get_exp_devices_cloud_state,\n refresh_cloud_state = dm_cloudinfo$refresh_cloud_state,\n get_cloud_data = dm_cloudinfo$get_exp_devices_cloud_data,\n refresh_cloud_data = function(){\n dm_cloudinfo$refresh_cloud_data()\n load_links_table() # update links table whenever cloud data is refreshed\n },\n get_cloud_info = dm_cloudinfo$get_exp_devices_cloud_info,\n refresh_cloud_info = dm_cloudinfo$refresh_cloud_info,\n get_device_ids = dm_experiments$get_loaded_experiment_device_ids,\n get_state_logs = dm_datalogs$get_experiment_devices_state_logs,\n refresh_state_logs = dm_datalogs$refresh_experiment_state_logs\n )\n observe({\n devices_info$trigger_live_data_table_options()\n load_links_table()\n })\n\n # experiment data ======\n\n callModule(\n dataPlotServer, \"exp_data_plot\", timezone = timezone,\n get_experiments = dm_experiments$get_loaded_experiment,\n get_data_logs = dm_datalogs$get_experiment_data_logs,\n refresh_data_logs = dm_datalogs$refresh_experiment_data_logs,\n reset_plot = eventReactive(dm_experiments$get_loaded_experiment(), runif(1))\n )\n\n}\n\nexperimentManagerUI <- function(id, width = 12) {\n\n ns <- NS(id)\n\n tagList(\n\n default_box(\n # selection ====\n title = \"Experiments\", width = width,\n uiOutput(ns(\"experiment\")),\n footer = div(\n tooltipInput(actionButton, ns(\"experiment_refresh\"), label = \"Refresh\", icon = icon(\"refresh\"), tooltip = \"Refresh experiments.\"),\n spaces(1),\n # FIXME\n tooltipInput(actionButton, ns(\"experiment_new\"), label = \"New experiment\", icon = icon(\"plus\"), tooltip = \"Add new experiment.\"),\n spaces(1),\n deviceControlButton(ns(\"control\"), label = \"Control Devices\"),\n spaces(1),\n actionButton(ns(\"start_recording\"), label = \"Start Recording\",\n icon = icon(\"play\"), style=\"color: #fff; background-color: #007f1f; border-color: #2e6da4\") %>% hidden(),\n actionButton(ns(\"stop_recording\"), label = \"Stop Recording\",\n icon = icon(\"stop\"), style=\"color: #fff; background-color: #f22e10; border-color: #2e6da4\") %>% hidden()\n )\n ),\n\n div(id = ns(\"archived_msg\"),\n h2(\"Sorry, this experiment is archived and can not be reconfigured. Please use the 'Data' menu on the left to view the data.\")\n ) %>% hidden(),\n\n div(id = ns(\"tabs\"),\n tabsetPanel(\n id = ns(\"tabset\"),\n type = \"tabs\", selected = \"data\",\n # data ===\n tabPanel(\n value = \"data\",\n \"Data\", br(),\n dataPlotUI(ns(\"exp_data_plot\"))\n ),\n # info =====\n tabPanel(\n value = \"info\",\n \"Info\",\n br(),\n default_box(\n title = \"Experiment Information\", width = 12,\n h4(\"ID:\", textOutput(ns(\"exp_ID\"), inline = TRUE)),\n h4(\"Description:\"),\n textAreaInput(ns(\"exp_desc\"), NULL, cols = 50, rows = 5, resize = \"none\"),\n h4(\"Notes:\"),\n textAreaInput(ns(\"exp_notes\"), NULL, width = \"100%\", rows = 10, resize = \"both\"),\n footer = actionButton(ns(\"save_info\"), label = \"Save\", icon = icon(\"save\"))\n )\n ),\n # devices ====\n tabPanel(\n value = \"devices\",\n \"Devices\",\n br(),\n spaces(3),\n deviceFetchAllUI(ns(\"devices_info\")),\n br(), br(),\n deviceDataUI(\n ns(\"devices_info\"), selected_options = \"r_exps\",\n title = \"Device Links & Live Data\",\n output = selectorTableUI(ns(\"device_links\")),\n add_footer = tagList(\n tooltipInput(\n actionButton,\n ns(\"device_links_add\"),\n label = \"Add Links\",\n icon = icon(\"link\"),\n tooltip = \"Add additional device links.\"\n ),\n tooltipInput(\n actionButton,\n ns(\"device_links_delete\"),\n label = \"Delete Links\",\n icon = icon(\"unlink\"),\n tooltip = \"Delete the selected device link(s).\",\n disabled = TRUE\n )\n )\n ),\n deviceStateUI(ns(\"devices_info\")),\n deviceInfoUI(ns(\"devices_info\")),\n deviceLogsUI(ns(\"devices_info\"))\n )\n )) %>% hidden()\n\n )\n\n}\n"
},
{
"alpha_fraction": 0.7116182446479797,
"alphanum_fraction": 0.7116182446479797,
"avg_line_length": 47.20000076293945,
"blob_id": "5f50ab806ee7709ccacf7ab83da5e7fcc2966664",
"content_id": "24cb5057cd65c130c16cdb0646d6c9fbd0afc8be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 482,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 10,
"path": "/inst/templates/credentials.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# CONFIDENTIAL INFORMATION\n# this should never be included in a git repository!!\naccess_token <- \"\" # particle access token\ndb_host <- \"\" # database host\ndb_name <- \"\" # database name\ndb_user <- \"\" # database user\ndb_pwd <- \"\" # database password\ntimezone <- \"\" # timezone for proper datetime conversions, run `OlsonNames()` for full list\ngroup_id <- \"\" # which group to log in as\napp_pwd <- NULL # application password (leave NULL if none or authentication is managed differently)\n"
},
{
"alpha_fraction": 0.5953372120857239,
"alphanum_fraction": 0.597835123538971,
"avg_line_length": 29.024999618530273,
"blob_id": "20c5662c7176c4a21b28b0f1eda324661f2e6450",
"content_id": "688013862d1269ddefb17a9e6b831e2eb5cf86c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1201,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 40,
"path": "/R/file_read.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Read data logs\n#'\n#' Read in an .rds, .csv or .xlsx file with device data logs downloaded from the GUI.\n#'\n#' @param file_path path to the .rds or .xlsx file\n#' @return device data logs data frame\n#' @family data logs functions\n#' @export\nll_read_device_data_logs_from_file <- function(file_path, quiet = default(quiet)) {\n\n # safety checks\n if (missing(file_path) || length(file_path) != 1)\n stop(\"please provide a single file path to an .rds, .csv OR .xlsx file\", call. = FALSE)\n if (!file.exists(file_path))\n stop(\"file path does not exist: \", file_path, call. = FALSE)\n\n # file info\n supported <- c(\"rds\", \"xlsx\", \"csv\")\n ext <- stringr::str_match(file_path, \"\\\\.(\\\\w+)$\")[,2]\n\n if (ext == \"rds\") {\n df <- readr::read_rds(file_path)\n } else if (ext == \"xlsx\") {\n df <- readxl::read_excel(file_path)\n } else if (ext == \"csv\") {\n df <- readr::read_csv(file_path)\n } else {\n glue::glue(\n \"unknown file extension: {ext}\",\n \" (supported: {paste0(supported, collapse = ', ')})\") %>%\n stop(call. = FALSE)\n }\n\n # info message\n if (!quiet) {\n glue::glue(\"Info: read {nrow(df)} device data log entries from .{ext} file\") %>% message()\n }\n\n return(df)\n}\n"
},
{
"alpha_fraction": 0.7667731642723083,
"alphanum_fraction": 0.7782747745513916,
"avg_line_length": 30.299999237060547,
"blob_id": "7f099d91a5d395d4efd722210af2d1fcb3f4195e",
"content_id": "87b8824891eda7aace0780b24de5a2e5bed76a44",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1565,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 50,
"path": "/inst/lambda/Makefile",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# simplifying make commands\nSHELL := /bin/bash\n\n# install the virtual environments and all dependencies\ninstall: bin bin/lambda lib/python2.7/site-packages/pg8000\n\nbin:\n\tvirtualenv . --python=python2.7\n\nbin/lambda:\n\tsource bin/activate && pip install python-lambda\n\nlib/python2.7/site-packages/pg8000:\n\tsource bin/activate && pip install pg8000\n\n# initialize project (only needed once)\ninit: install config.yaml webhooks service.py\n\nservice.py:\n\tsource bin/activate && lambda init\n\n# cleanup the virtual environment installation\nclean:\n\trm -R -f dist\n\trm -R -f lib\n\trm -R -f include\n\trm -R -f bin\n\n# invoke the lambda function for a state log\ninvoke_state_log: install\n\tsource bin/activate && lambda invoke -v --event-file event_state_log.json\n\n# invoke the lambda function for a data log\ninvoke_data_log: install\n\tsource bin/activate && lambda invoke -v --event-file event_data_log.json\n\n# configuration for deployment - generate from template file (automatically git-ignored)\nconfig.yaml: config_template.yaml\n\tcp config_template.yaml config.yaml\n\n# deploy the lambda function (requires config.yaml to exist and have the necessary credentials)\ndeploy: install\n\tsource bin/activate && lambda deploy\n\n# webhook files - generate webhook files from template (automatically git-ignored)\nwebhooks: webhook_data_log.json webhook_state_log.json\nwebhook_data_log.json: webhook_data_log_template.json\n\tcp webhook_data_log_template.json webhook_data_log.json\nwebhook_state_log.json: webhook_state_log_template.json\n\tcp webhook_state_log_template.json webhook_state_log.json\n"
},
{
"alpha_fraction": 0.6138453483581543,
"alphanum_fraction": 0.6149269938468933,
"avg_line_length": 40.088890075683594,
"blob_id": "9baa89479ee24ac104b5539d3484fd50c15b2d15",
"content_id": "125be2f86ad4d50814930a35c25dffa28ee8b87a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 1849,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 45,
"path": "/R/app_module_data_download.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "#' Data Download Server\n#' @param data_func reactive function providing the data\n#' @param filename_func reactive function returning the default file name\ndataDownloadServer <- function(input, output, session, data_func, filename_func) {\n\n # namespace\n ns <- session$ns\n\n # save dialog\n save_dialog <- reactive({\n modalDialog(\n title = \"Save data\", fade = FALSE, easyClose = TRUE, size = \"s\",\n textInput(ns(\"save_name\"), \"Filename:\", filename_func()),\n checkboxGroupInput(ns(\"format\"), \"Formats:\",\n c(\"R Data Storage (.rds)\" = \".rds\",\n \"Excel (.xlsx)\" = \".xlsx\",\n \"Comma Separated Values (.csv)\" = \".csv\")),\n footer =\n tagList(\n downloadButton(ns(\"download\"), label = \"Download\", icon = icon(\"download\")),\n modalButton(\"Close\")\n )\n )})\n observeEvent(input$download_dialog, showModal(save_dialog()))\n observe(shinyjs::toggleState(\"download\", length(input$format) > 0))\n\n # download handler\n output$download <- downloadHandler(\n filename = function() { isolate(stringr::str_replace(input$save_name, \"(\\\\.zip)?$\", \".zip\")) },\n content = function(filename) {\n module_message(ns, \"debug\", \"saving data \", input$save_name, \" (formats \", paste(input$format, collapse = \", \"), \")\")\n file_paths <- isolate(paste0(stringr::str_replace(input$save_name, \"\\\\.zip$\", \"\"), input$format))\n ll_write_device_data_logs_to_file(device_data_logs = data_func(), file_path = file_paths, zip = filename)\n })\n\n}\n\n\n#' Data Download Link\n#' @param label Label for the download link\ndataDownloadLink <- function(id, label = \"Save\", tooltip = \"Save the data in various formats\") {\n ns <- NS(id)\n tooltipInput(actionButton, ns(\"download_dialog\"), label, icon = icon(\"save\"),\n tooltip = tooltip)\n}\n"
},
{
"alpha_fraction": 0.596733033657074,
"alphanum_fraction": 0.5987749099731445,
"avg_line_length": 35.96226501464844,
"blob_id": "a17b339babf72ba36be59d06e08b269a4d36773e",
"content_id": "565b5c31b2a4d14d50fb0fe33086b727decd2ac6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 5877,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 159,
"path": "/R/data_utils.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# general utils =====\n\n# function to reliably turn a list of lists into a data frame\n# @param lists_df a data frame with a column that has lists in each row\n# @param column the name of the column that has the list\n# @param unnest_single_values whether to unnest single values (values that have a none or only a single entry for all retrieve records)\n# @param unpack_sub_lists - whether to unpack remaining list columns (only evaluated if unnest_single_values = TRUE)\n# @param nest_into_data_frame - whether to nest the unpacked lists into a data frame or keep the columns all unpacked\nunpack_lists_data_frame <- function(lists_df, column = lists, unnest_single_values = TRUE, unpack_sub_lists = FALSE, nest_into_data_frame = FALSE) {\n\n # id rows\n col_quo <- enquo(column)\n lists_df_original <- mutate(lists_df, ..nr.. = dplyr::row_number())\n\n # convert lists into data frame format\n lists_df <-\n lists_df_original %>%\n rename(lists = !!col_quo) %>%\n mutate(name = map(lists, ~if(length(.x) == 0) { NA_character_ } else { names(.x) })) %>%\n unnest(name, .drop = FALSE) %>%\n mutate(\n class = map2_chr(lists, name, ~class(.x[[.y]])[1]),\n length = map2_int(lists, name, ~length(.x[[.y]])),\n value = map2(lists, name, ~.x[[.y]]),\n name = str_to_lower(name)\n )\n\n # data classes\n data_classes <-\n lists_df %>%\n filter(name != \"NA\") %>%\n mutate(..order.. = dplyr::row_number()) %>%\n group_by(..order.., name) %>%\n summarize(\n data_class = unique(class)[1],\n value_max_n = as.integer(max(length))) %>%\n ungroup() %>% arrange(..order..) %>% select(-..order..)\n\n # lists wide\n lists_df_wide <- lists_df %>%\n select(..nr.., name, value) %>%\n spread(name, value)\n\n # deal with NAs\n if (\"<NA>\" %in% names(lists_df_wide))\n lists_df_wide <- select(lists_df_wide, -`<NA>`)\n\n # remove columns that hold no data at all\n null_cols <- filter(data_classes, value_max_n == 0 | data_class == \"NULL\")$name\n data_classes <- filter(data_classes, !(value_max_n == 0 | data_class == \"NULL\"))\n lists_df_wide <- lists_df_wide[!names(lists_df_wide) %in% null_cols]\n\n # fill NULL values with NA to not loose records during unnesting (except for lists)\n if (nrow(data_classes) > 0) {\n for (i in 1:nrow(data_classes)) {\n lists_df_wide <-\n with(data_classes[i,], {\n # make sure the function exists\n if (exists(data_class)) {\n if (data_class %in% c(\"character\", \"integer\", \"numeric\", \"logical\"))\n default_value <- do.call(str_c(\"as.\", data_class), args = list(NA))\n else\n default_value <- do.call(data_class, args=list())\n # note, could also do this with a right_join back in (but perhaps slower?)\n mutate(lists_df_wide,\n !!name := map(!!sym(name), ~if (is.null(.x) || length(.x) == 0) { default_value } else { .x }))\n } else {\n # don't do anything if it's not a standard class\n lists_df_wide\n }\n })\n }\n }\n\n # unnest all the ones that have only single value\n if (unnest_single_values) {\n unnest_cols <- filter(data_classes, value_max_n == 1,\n data_class %in% c(\"character\", \"integer\", \"numeric\", \"logical\"))$name\n lists_df_wide <- unnest(lists_df_wide, !!!syms(unnest_cols), .drop = FALSE)\n }\n\n # unpack sub lists\n if (unpack_sub_lists) {\n unpack_cols <- filter(data_classes, data_class == \"list\")$name\n for (col in unpack_cols) {\n new_data <<- lists_df_wide %>% rename(..parent_nr.. = ..nr..) %>%\n unpack_lists_data_frame(\n column = !!sym(col), unnest_single_values = unnest_single_values,\n # don't allow recursive unpacking for now, always nest into data frame\n unpack_sub_lists = FALSE, nest_into_data_frame = TRUE)\n\n lists_df_wide <-\n lists_df_wide %>% rename(..parent_nr.. = ..nr..) %>%\n unpack_lists_data_frame(\n column = !!sym(col), unnest_single_values = unnest_single_values,\n # don't allow recursive unpacking for now, always nest into data frame\n unpack_sub_lists = FALSE, nest_into_data_frame = TRUE) %>%\n rename(..nr.. = ..parent_nr..)\n }\n }\n\n # nest into data frame\n if (nest_into_data_frame) {\n lists_df_wide <- lists_df_wide %>%\n select(!!!syms(c(\"..nr..\", data_classes$name))) %>%\n nest(-..nr.., .key = !!col_quo)\n } else {\n # no nesting, just select right columns\n lists_df_wide <- select(lists_df_wide, !!!syms(c(\"..nr..\", data_classes$name)))\n }\n\n # merge with original data\n lists_df_original %>%\n select(!!quo(-!!sym(quo_text(col_quo)))) %>%\n left_join(lists_df_wide, by = \"..nr..\") %>%\n select(-..nr..)\n}\n\n# remove list columns from a data frame\nremove_list_columns <- function(df) {\n if (missing(df)) stop(\"no data frame supplied\", call. = FALSE)\n list_cols <- df %>% purrr:::map_lgl(rlang::is_list)\n df[!list_cols]\n}\n\n# data simplification ====\n\nspread_state_columns <- function(df) {\n df %>%\n mutate(value = ifelse(!is.na(units), str_c(value, \" \", units), value)) %>%\n select(-units) %>%\n spread(key, value)\n}\n\nspread_data_columns <- function(df) {\n df %>%\n mutate(\n key = str_c(\"#\", idx, \": \", key),\n value = ifelse(!is.na(units), str_c(value, \" \", units), value)\n ) %>%\n select(-units, -idx) %>%\n spread(key, value)\n}\n\n# calculations =====\n\n#' Convert datetime to duration\n#'\n#' Does not need to be sorted upfront, makes different to minimum datetime within the group_by groupings.\n#'\n#' @param df data frame with datetime column\n#' @param units any time unit that lubridate understands\n#' @return df with new column \\code{duration}\n#' @export\nll_calculate_duration <- function(df, units) {\n stopifnot(!missing(units))\n df %>%\n mutate(duration = as.duration(datetime - min(datetime)) %>% as.numeric(units))\n}\n"
},
{
"alpha_fraction": 0.5843421816825867,
"alphanum_fraction": 0.5859563946723938,
"avg_line_length": 30.367088317871094,
"blob_id": "9591aa3ed161ac7b04d2e73b65745f624867b045",
"content_id": "d86199efc80a435e9d0ccc4e9a5eb25f6d1c627a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2478,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 79,
"path": "/R/db_utils.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# general data retrieval functions\nvalidate_db_connection <- function(con_quo) {\n if(!is_quosure(con_quo)) stop(\"connection not supplied as quosure\", call. = FALSE)\n con <- resolve_defaults(con_quo)\n while (is_quosure(con)) con <- eval_tidy(con)\n return(con)\n}\n\n# convert data to sql compatible\nto_sql <- function(..., named = FALSE) {\n values <- list(...)\n if (length(values) == 1 && is.list(values[1])) values <- values[[1]]\n convert_class_to_sql <- function(.x) {\n if (is.null(.x) || is.na(.x)) {\n \"NULL\"\n } else if (is.character(.x)) {\n sprintf(\"'%s'\", str_replace(.x, fixed(\"'\"), \"''\"))\n } else if (is.numeric(.x)) {\n as.character(.x)\n } else if (is.logical(.x)) {\n if (.x) 'true' else 'false'\n } else {\n stop(glue(\"unsupported value type: {class(.x)[1]}\"), call. = FALSE)\n }\n }\n sql_values <- map_chr(values, convert_class_to_sql)\n if (named) {\n if (is.null(names(values)) || any(names(values) == \"\")) stop(\"must provide names for each value\", call. = FALSE)\n sql_values <- sprintf(\"%s=%s\", names(values), sql_values)\n }\n glue::glue_collapse(sql_values, \", \")\n}\n\n# convert whole df to sql compatible list of values\ndf_to_sql <- function(df) {\n df %>%\n ungroup() %>%\n mutate(rowid = dplyr::row_number()) %>%\n nest(-rowid) %>%\n mutate(sql = map_chr(data, ~to_sql(as.list(.x)))) %>%\n { str_c(\"(\", glue::glue_collapse(.$sql, sep = \"), (\"), \")\") }\n}\n\n# make insert statement from data frame\ndf_to_insert_sql <- function(df, table) {\n glue(\"INSERT INTO {table} ({glue::glue_collapse(names(df), sep = ', ')}) VALUES {df_to_sql(df)}\")\n}\n\n# run sql with error catching\nrun_sql <- function(sql, con) {\n con <- validate_db_connection(enquo(con))\n tryCatch(\n result <- dbExecute(con, as.character(sql)),\n error = function(e) {\n glue(\"SQL statement failed ('{sql}') with message:\\n{e$message}\") %>% stop(call. = FALSE)\n })\n return(result)\n}\n\n# run insert sql\nrun_insert_sql <- function(df, table, con, on_conflict_constraint = NULL, on_conflict_do = \"nothing\", quiet) {\n result <-\n df %>%\n df_to_insert_sql(table) %>%\n {\n if (!is.null(on_conflict_constraint))\n paste(., \"ON CONFLICT ON CONSTRAINT\", on_conflict_constraint, \"DO\", on_conflict_do)\n else .\n } %>%\n run_sql(con)\n\n if (!quiet) {\n glue::glue(\n \"{result} record(s) created\",\n if(!is.null(on_conflict_constraint)) \" or updated\" else \"\", \".\") %>%\n message()\n }\n return(result)\n}\n"
},
{
"alpha_fraction": 0.6187568306922913,
"alphanum_fraction": 0.6224645376205444,
"avg_line_length": 36.89256286621094,
"blob_id": "38bf5b42a3ec5b6aa2d615ba8bdfbeaad744360a",
"content_id": "844f66a4caa63b72e04f3b0c9a18d5481a96f7f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 4585,
"license_type": "no_license",
"max_line_length": 435,
"num_lines": 121,
"path": "/R/plotting.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# add data traces, and groups for plotting\nprepare_data_for_plotting <- function(device_data_logs) {\n if (nrow(device_data_logs) == 0) return(device_data_logs)\n device_data_logs %>%\n # grouping and trace with units\n mutate(\n group = paste(exp_id, device_name, data_key, sep = \"_\"),\n data_trace =\n ifelse(!is.na(data_units) & nchar(data_units) > 0, sprintf(\"%s [%s]\", data_key, data_units), data_key)\n )\n}\n\n#' Plot device data logs\n#'\n#' This function helps to visualize data logs retrieved with \\code{\\link{ll_get_device_data_logs}} or read in from downloaded data logs.\n#'\n#' @param device_data_logs data logs\n#' @param duration_breaks specify a duration unit or interval (e.g. \"hours\" or \"20 mins\") to indicate whether x axis should be displayed as a duration since the first data point within each experiment. If interval is specified, this will be used for axis intervals, if just time unit the axis will have regular auto-ticks. If duration_breaks = NULL (the default), the x axis is displayed as regular date time (set date_breaks instead).\n#' @param date_breaks formate the datetime breaks if not plotting duration (i.e. is ignored if duration_units is provided)\n#' @param include_device_info whether to include the device info (name and index) in the data trace label\n#' @param overlay_experiments whether to overlay the experiments instead of paneling (default is panels). This usually makes most sense if x axis is a duration (set via duration units)\n#' @family data logs functions\n#' @export\nll_plot_device_data_logs <- function(device_data_logs, filter = NULL, show_error_range = FALSE, exclude_outliers = FALSE, duration_breaks = NULL, date_breaks = NULL, include_device_info = FALSE, overlay_experiments = FALSE, quiet = default(quiet)) {\n\n filter_quo <- rlang::enquo(filter)\n\n # duration\n if(!is.null(duration_breaks)) {\n duration_matches <- stringr::str_match(duration_breaks, \"(\\\\d+\\\\.?\\\\d*)?\\\\s?([a-zA-Z]+)\")\n duration_interval <- as.numeric(duration_matches[1,2])\n duration_units <- duration_matches[1,3]\n }\n\n # plot df\n plot_df <- device_data_logs %>%\n # no missing values\n dplyr::filter(!is.na(data_value)) %>%\n # duration\n {\n if (!is.null(duration_breaks))\n group_by(., exp_id) %>% ll_calculate_duration(duration_units) %>% ungroup()\n else .\n } %>%\n # filter\n {\n if(!quo_is_null(filter_quo)) dplyr::filter(., !!filter_quo)\n else .\n } %>%\n # grouping and trace with units\n prepare_data_for_plotting()\n\n # outliers\n if (exclude_outliers) {\n plot_df <- plot_df %>% identify_data_outliers() %>%\n mutate(data_value = ifelse(outlier, NA_real_, data_value))\n }\n\n # info messages\n if (!quiet) {\n glue(\"Info: plotting {nrow(plot_df)} data log entries\",\n \"{if(!quo_is_null(filter_quo)) str_c(\\\" (filtered with '\\\", quo_text(filter_quo), \\\"')\\\") else ''}... \") %>%\n message()\n }\n\n # plot\n p <- ggplot(plot_df) + aes(y = data_value)\n\n if (include_device_info) {\n p <- p %+% aes(color = sprintf(\"%s (%d): %s\", device_name, data_idx, data_trace), group = group)\n } else {\n p <- p %+% aes(color = data_trace, group = group)\n }\n\n # error range\n if (show_error_range) {\n if (include_device_info) {\n p <- p + aes(fill = sprintf(\"%s (%d): %s\", device_name, data_idx, data_trace))\n } else {\n p <- p + aes(fill = data_trace)\n }\n p <- p +\n geom_ribbon(\n data = function (df) dplyr::filter(df, !is.na(data_sd)),\n mapping = aes(ymin = data_value - data_sd, ymax = data_value + data_sd, color = NULL),\n alpha = 0.3\n )\n }\n\n p <- p + geom_line() + theme_bw()\n\n # experiments overlay\n if (overlay_experiments) {\n # traces overlay\n p <- p %+% aes(linetype = exp_id) +\n facet_grid(data_trace ~ ., scales = \"free\")\n } else {\n # panel\n p <- p +\n facet_grid(data_trace ~ exp_id, scales = \"free\")\n }\n\n # duration plot aesthetics\n if (!is.null(duration_breaks)) {\n p <- p %+% aes(x = duration) +\n labs(x = str_c(\"Duration [\", duration_units, \"]\"))\n if (!is.na(duration_interval))\n p <- p %+% ggplot2::scale_x_continuous(breaks = seq(0, max(plot_df$duration), by = duration_interval))\n } else {\n if (!is.null(date_breaks))\n p <- p %+% aes(x = datetime) + scale_x_datetime(date_breaks = date_breaks)\n else\n p <- p %+% aes(x = datetime) + scale_x_datetime()\n p <- p + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1)) +\n labs(x = NULL)\n }\n\n p <- p + labs(y = NULL, color = NULL, fill = NULL, linetype = NULL)\n\n return(p)\n}\n"
},
{
"alpha_fraction": 0.6632124185562134,
"alphanum_fraction": 0.6658031344413757,
"avg_line_length": 23.125,
"blob_id": "8f8579d06f4de22f6565e4d8038a9ae7174cc6e6",
"content_id": "6aac5e95b20ca2f8188b9237e9fc247f33552596",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 16,
"path": "/R/zzz.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "initialize_options <- function() {\n # set default package options (always resets options to force deliberate change of settings)\n default_options <- list(\n ll.quiet = FALSE,\n ll.access_token = \"\",\n ll.con = NULL,\n ll.request_timeout = 3,\n ll.debug = FALSE\n )\n options(default_options)\n}\n\n.onLoad <- function(libname, pkgname) {\n initialize_options()\n invisible()\n}\n"
},
{
"alpha_fraction": 0.6270618438720703,
"alphanum_fraction": 0.627504289150238,
"avg_line_length": 36.406620025634766,
"blob_id": "57189e8f7f612a56ae38a55201831a3827448279",
"content_id": "090dba14fd3392784b637d47c0c75beebcdc7bf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 15823,
"license_type": "no_license",
"max_line_length": 431,
"num_lines": 423,
"path": "/R/db_read.R",
"repo_name": "KopfLab/labware_c3",
"src_encoding": "UTF-8",
"text": "# devices ====\n\n#' Retrieve devices\n#' @param group_id devices from which group to fetch\n#' @return devices\n#' @export\nll_get_devices <- function(group_id = default(group_id), filter = NULL,\n select = c(device_id, particle_id, device_name, device_type_id, device_type_desc),\n in_use_only = TRUE, con = default(con), quiet = default(quiet)) {\n\n con <- validate_db_connection(enquo(con))\n filter_quo <- enquo(filter)\n select_quo <- enquo(select)\n group_id_value <- group_id\n\n if (!quiet) {\n glue(\"Info: retrieving {if(in_use_only) 'in-use' else 'all'} devices \",\n \"for group '{group_id_value}'\",\n \"{if(!quo_is_null(filter_quo)) str_c(\\\" with filter '\\\", quo_text(filter_quo), \\\"'\\\") else ''}\",\n \"...\") %>% message(appendLF = FALSE)\n }\n df <- tbl(con, \"devices\") %>%\n left_join(tbl(con, \"device_types\"), by = \"device_type_id\") %>%\n dplyr::filter(group_id == group_id_value, in_use | !in_use_only) %>%\n {\n if(!quo_is_null(filter_quo)) dplyr::filter(., !!filter_quo)\n else .\n } %>%\n dplyr::select(!!select_quo) %>%\n collect()\n if (!quiet) glue(\"found {nrow(df)}.\") %>% message()\n return(df)\n}\n\n#' Get device id\n#'\n#' Convenience shortcut function to get the database integer device identifier for one or multiple devices.\n#' @export\nll_get_device_ids <- function(device_names, group_id = default(group_id), quiet = default(quiet)) {\n device_names_filter <- unique(device_names)\n devices <- ll_get_devices(group_id = group_id, filter = !!quo(device_name %in% !!device_names_filter), quiet = quiet)\n device_name_to_id_map <- setNames(devices$device_id, devices$device_name)\n if (any(missing <- !device_names %in% names(device_name_to_id_map))) {\n glue(\"the folowing device_name(s) do not exist for group '{group_id}' and can therefore not be mapped to id(s): {glue::glue_collapse(device_names[missing], sep = ', ')}\") %>%\n stop(call. = FALSE)\n }\n return(device_name_to_id_map[device_names])\n}\n\n# exeperiments =====\n\n#' Retrieve experiments\n#' @param group_id experiments from which group to fetch\n#' @return experiments\n#' @export\nll_get_experiments <- function(group_id = default(group_id), filter = NULL, convert_to_TZ = Sys.timezone(), con = default(con), quiet = default(quiet)) {\n\n con <- validate_db_connection(enquo(con))\n filter_quo <- enquo(filter)\n group_id_value <- group_id\n\n if (!quiet) {\n glue(\"Info: retrieving experiments for group '{group_id_value}'\",\n \"{if(!quo_is_null(filter_quo)) str_c(\\\" with filter '\\\", quo_text(filter_quo), \\\"'\\\") else ''}\",\n \"... \") %>% message(appendLF = FALSE)\n }\n df <- tbl(con, \"experiments\") %>%\n dplyr::filter(group_id == group_id_value) %>%\n {\n if(!quo_is_null(filter_quo)) dplyr::filter(., !!filter_quo)\n else .\n } %>%\n arrange(desc(recording), desc(last_recording_change)) %>%\n collect()\n\n if (!quiet) glue(\"found {nrow(df)}.\") %>% message(appendLF = FALSE)\n\n if (\"last_recording_change\" %in% names(df)) {\n # local TZ conversion\n if (!is.null(convert_to_TZ)) {\n if (!quiet) glue(\"Converting last_recording_change to timezone '{convert_to_TZ}'.\") %>%\n message(appendLF = FALSE)\n df <- mutate(df, last_recording_change = with_tz(last_recording_change, convert_to_TZ))\n }\n }\n message(\"\")\n\n return(df)\n}\n\n#' Retrieve experiment device links\n#'\n#' Returns experiment-device links (only for in-use devices) joined with experiment, devices and device types tables so filter conditions can be applied on any of these as well.\n#'\n#' @inheritParams ll_get_experiments\n#' @return experiments_devices\n#' @export\nll_get_experiment_device_links <- function(\n group_id = default(group_id), filter = NULL,\n select = c(exp_device_data_id, exp_id, recording, device_id, device_name, particle_id, data_idx, active),\n con = default(con), quiet = default(quiet)) {\n\n con <- validate_db_connection(enquo(con))\n filter_quo <- enquo(filter)\n select_quo <- enquo(select)\n group_id_value <- group_id\n\n if (!quiet) {\n glue(\"Info: retrieving experiment-device links for active devices in group '{group_id_value}'\",\n \"{if(!quo_is_null(filter_quo)) str_c(\\\" with filter '\\\", quo_text(filter_quo), \\\"'\\\") else ''}... \") %>%\n message(appendLF = FALSE)\n }\n\n exp_devices <- tbl(con, \"experiment_device_data\") %>%\n left_join(tbl(con, \"devices\"), by = \"device_id\") %>%\n left_join(tbl(con, \"device_types\"), by = \"device_type_id\") %>%\n left_join(tbl(con, \"experiments\"), by = c(\"exp_id\", \"group_id\")) %>%\n dplyr::filter(group_id == group_id_value, in_use) %>%\n {\n if(!quo_is_null(filter_quo)) dplyr::filter(., !!filter_quo)\n else .\n } %>%\n dplyr::select(!!select_quo, device_name, data_idx) %>%\n collect()\n\n if (nrow(exp_devices) > 0)\n exp_devices <- exp_devices %>%\n # arrange\n arrange(device_name, data_idx) %>%\n dplyr::select(!!select_quo)\n\n if (!quiet) glue(\"found {nrow(exp_devices)} links. \") %>% message()\n return(exp_devices)\n}\n\n# logs ====\n\n#' Retrieve state logs\n#'\n#' Returns state logs joined with devices table so filter conditions can be applied on the devices as well. Sorted by descending order (i.e. latest record first).\n#'\n#' @param filter what filter conditions to apply, if any (forwarded to \\link[dplyr]{filter})\n#' @param select what columns to select (forwarded to \\link[select]{select}), by default a selection of the most commonly used columns\n#' @param max_rows if provided, only selects the indicated number of rows (more efficient this way than part of the filter)\n#' @param convert_to_TZ if provided, converts the log_datetime to the provided timezone (by default the local one stored in \\code{Sys.timezone()}). If NULL, will keep it as UTC.\n#' @return device state logs\n#' @export\nll_get_device_state_logs <- function(\n group_id = default(group_id), filter = NULL,\n select = c(device_id, device_name, device_state_log_id, log_datetime, log_type, log_message, starts_with(\"state\"), notes),\n max_rows = NULL,\n convert_to_TZ = Sys.timezone(),\n con = default(con), quiet = default(quiet)) {\n\n con <- validate_db_connection(enquo(con))\n filter_quo <- enquo(filter)\n select_quo <- enquo(select)\n group_id_value <- group_id\n\n if (!quiet) {\n glue(\"Info: retrieving device state logs for devices in group '{group_id_value}'\",\n \"{if(!quo_is_null(filter_quo)) str_c(\\\" with filter '\\\", quo_text(filter_quo), \\\"'\\\") else ''}\",\n \"{if(!is.null(max_rows)) str_c(\\\", limited to \\\", max_rows, \\\" rows max\\\") else ''}... \") %>%\n message(appendLF = FALSE)\n }\n\n logs <- tbl(con, \"device_state_logs\") %>%\n left_join(tbl(con, \"devices\"), by = \"device_id\") %>%\n arrange(desc(device_state_log_id)) %>%\n dplyr::filter(group_id == group_id_value) %>%\n {\n if(!quo_is_null(filter_quo)) dplyr::filter(., !!filter_quo)\n else .\n } %>%\n {\n if (!is.null(max_rows)) dplyr::filter(., dplyr::row_number() <= max_rows)\n else .\n } %>%\n dplyr::select(!!select_quo) %>%\n collect()\n\n\n if (!quiet) glue(\"found {nrow(logs)} records. \") %>% message(appendLF = FALSE)\n\n # local TZ conversion\n if (!is.null(convert_to_TZ) && \"log_datetime\" %in% names(logs)) {\n if (!quiet) glue(\"Converting to timezone '{convert_to_TZ}'.\") %>%\n message(appendLF = FALSE)\n logs <- mutate(logs, log_datetime = with_tz(log_datetime, convert_to_TZ))\n }\n message(\"\")\n\n return(logs)\n}\n\n#' Retrieve data logs\n#'\n#' Returns data logs from the database joined with experiments, devices, and experiment_device_data (for prefix only) tables so filter conditions can be applied on these as well. Sorted by descending order (i.e. latest record first). Note that the group_id filter restricts the selected experiments, device data may theoretically come from devices not (or no longer) associated with the same group although this is rarely the case.\n#'\n#' @param filter what filter conditions to apply, if any (forwarded to \\link[dplyr]{filter})\n#' @param select what columns to select (forwarded to \\link[dplyr]{select}), by default a selection of the most commonly used columns\n#' @param max_rows if provided, only selects the indicated number of rows (more efficient this way than part of the filter)\n#' @param convert_to_TZ converts the log_datetime to the provided timezone (by default the system's local timezone stored in \\code{Sys.timezone()}).\n#' @return device data logs\n#' @family data logs functions\n#' @export\nll_get_device_data_logs <- function(\n group_id = default(group_id), filter = NULL,\n select = c(exp_id, device_id, device_name, device_data_log_id, datetime, data_idx, starts_with(\"data_key\"), starts_with(\"data_\")),\n max_rows = NULL,\n convert_to_TZ = Sys.timezone(),\n con = default(con), quiet = default(quiet)) {\n\n con <- validate_db_connection(enquo(con))\n filter_quo <- enquo(filter)\n select_quo <- enquo(select)\n group_id_value <- group_id\n\n if (!quiet) {\n glue(\"Info: retrieving device data logs associated with experiments in group '{group_id_value}'\",\n \"{if(!quo_is_null(filter_quo)) str_c(\\\" with filter '\\\", quo_text(filter_quo), \\\"'\\\") else ''}\",\n \"{if(!is.null(max_rows)) str_c(\\\", limited to \\\", max_rows, \\\" rows max\\\") else ''}... \") %>%\n message(appendLF = FALSE)\n }\n\n logs <-\n tbl(con, \"device_data_logs\") %>%\n left_join(dplyr::select(tbl(con, \"devices\"), -group_id), by = \"device_id\") %>%\n left_join(dplyr::select(tbl(con, \"experiment_device_data\"), exp_device_data_id, exp_id), by = \"exp_device_data_id\") %>%\n left_join(tbl(con, \"experiments\"), by = \"exp_id\") %>%\n arrange(desc(device_data_log_id)) %>%\n dplyr::filter(group_id == group_id_value) %>%\n {\n if(!quo_is_null(filter_quo)) dplyr::filter(., !!filter_quo)\n else .\n } %>%\n {\n if (!is.null(max_rows)) dplyr::filter(., dplyr::row_number() <= max_rows)\n else .\n } %>%\n # for time offset calculations\n mutate(datetime = log_datetime) %>%\n dplyr::select(!!select_quo, log_time_offset) %>%\n collect()\n\n if (!quiet) glue(\"found {nrow(logs)} entries. \") %>% message(appendLF = FALSE)\n\n if (nrow(logs) == 0) {\n message()\n return(logs)\n }\n\n # offset\n if (\"datetime\" %in% names(logs))\n logs <- mutate(logs, datetime = datetime - log_time_offset)\n\n # local TZ conversion\n if (!is.null(convert_to_TZ) && any(c(\"log_datetime\", \"datetime\") %in% names(logs))) {\n if (!quiet) glue(\"Converting to timezone '{convert_to_TZ}'.\") %>%\n message(appendLF = FALSE)\n if (\"log_datetime\" %in% names(logs))\n logs <- mutate(logs, log_datetime = with_tz(log_datetime, convert_to_TZ))\n if (\"datetime\" %in% names(logs))\n logs <- mutate(logs, datetime = with_tz(datetime, convert_to_TZ))\n }\n message(\"\")\n\n # after time offset calculation\n return(logs %>% dplyr::select(!!select_quo))\n}\n\n#' Reset device data logs cache\n#'\n#' @param exp_id experiment ID(s)\n#' @export\nll_reset_exp_device_data_logs_cache <- function(exp_id, quiet = default(quiet)) {\n\n # cache paths\n cache_paths <- data_frame(\n exp_id = exp_id,\n path = file.path(\"cache\", str_c(exp_id, \"_data_logs.rds\")),\n exists = file.exists(path)\n ) %>% filter(exists)\n\n # info\n if (!quiet) {\n glue(\"Info: resetting local data logs cache for experiment(s) '{glue::glue_collapse(exp_id, sep = ', ')}'... \") %>%\n message(appendLF = FALSE)\n }\n\n # write cache\n if (nrow(cache_paths) > 0) {\n with(cache_paths, walk(path, ~file.remove(.x)))\n }\n\n if (!quiet) message(\"complete.\")\n}\n\n#' Get device data logs for specific experiment(s)\n#'\n#' Also supports efficient caching of the retrieved data.\n#'\n#' @inheritParams ll_get_device_state_logs\n#' @param exp_id experiment ID(s)\n#' @param ... forwarded to ll_get_device_state_logs\n#' @param cache whether to cache the data logs\n#' @param reac_cache whether to read the cache\n#' @export\nll_get_exp_device_data_logs <- function(exp_id, group_id = default(group_id), ..., cache = TRUE, read_cache = TRUE, quiet = default(quiet)) {\n\n # cache paths\n cache_paths <- data_frame(\n exp_id = exp_id,\n path = file.path(\"cache\", str_c(exp_id, \"_data_logs.rds\")),\n exists = file.exists(path)\n )\n\n # logs\n logs <- data_frame()\n\n # read cache\n if (read_cache && any(cache_paths$exists)) {\n\n if (!quiet) {\n glue(\"Info: reading data logs from local cache for experiment(s) \",\n \"'{glue::glue_collapse(filter(cache_paths, exists)$exp_id, sep = ', ')}'... \") %>%\n message(appendLF = FALSE)\n }\n\n logs <- cache_paths %>% filter(exists) %>%\n mutate(data = map(path, read_rds)) %>%\n select(-exp_id, -path, -exists) %>%\n unnest(data)\n\n if (!quiet) glue(\"recovered {nrow(logs)} records; querying database for newer records...\") %>% message()\n\n # more complex filter to account for each experiment\n filter_quo <-\n cache_paths %>% select(exp_id) %>%\n left_join(logs %>% group_by(exp_id) %>% summarize(max_device_data_log_id = max(device_data_log_id)), by = \"exp_id\") %>%\n with(ifelse(\n !is.na(max_device_data_log_id),\n sprintf(\"(exp_id == \\\"%s\\\" & device_data_log_id > %g)\", exp_id, max_device_data_log_id),\n sprintf(\"exp_id == \\\"%s\\\"\", exp_id))) %>%\n collapse (sep = \" | \") %>%\n parse_expr()\n } else {\n # simple filter for all experiments\n filter_quo <- quo(exp_id %in% !!cache_paths$exp_id)\n }\n\n # fetch from database\n db_logs <- ll_get_device_data_logs(group_id = group_id, filter = !!filter_quo, ..., quiet = quiet)\n logs <- bind_rows(db_logs, logs)\n\n # safety check\n if (nrow(logs) == 0) return(logs)\n\n # write cache\n if (cache && nrow(db_logs) > 0) {\n if (!quiet) {\n glue(\"Info: updating local data logs cache for experiment(s) '{glue::glue_collapse(exp_id, sep = ', ')}'... \") %>%\n message(appendLF = FALSE)\n }\n\n if(!dir.exists(\"cache\")) dir.create(\"cache\")\n logs %>%\n left_join(cache_paths, by = \"exp_id\") %>%\n select(-exists) %>%\n nest(-path) %>%\n with(walk2(data, path, ~write_rds(x = .x, path = .y)))\n\n if (!quiet) message(\"complete.\")\n }\n\n return(logs)\n}\n\n# other ====\n\n#' retrieve active cameras\n#' @export\nll_get_cameras <- function(con = default(con), quiet = default(quiet)) {\n con <- validate_db_connection(enquo(con))\n if (!quiet) cat(\"\\nInfo: retrieving active cameras... \")\n df <- tbl(con, \"cameras\") %>%\n filter(active) %>%\n collect()\n if (!quiet) cat(glue(\"found {nrow(df)}\\n\\n\"))\n return(df)\n}\n\n#' Synchronize device names\n#' @note no longer necessary, sync happens during logging\n#' Update device names in the database from the cloud.\n#' @return devices with updated device names\n#' @export\nll_snyc_device_names_from_cloud <- function(in_use_only = TRUE, con = default(con), access_token = default(access_token), quiet = default(quiet)) {\n\n stop(\"this function is deprecated\", call. = FALSE)\n con <- validate_db_connection(enquo(con))\n\n devices <-\n ll_get_devices(in_use_only = in_use_only, con = con, quiet = quiet) %>%\n mutate(\n device_name = map_chr(device_particle_id, ll_get_device_name_from_cloud, access_token = access_token, quiet = quiet),\n last_name_sync = with_tz(now(), tzone = \"UTC\"),\n values = map2_chr(device_particle_id, device_name, ~sprintf(\"('%s', '%s')\", .x, str_replace(.y, fixed(\"'\"), \"''\")))\n )\n\n sql <-\n glue(\n \"update devices as tbl set device_name = map.device_name\n from (values {glue::glue_collapse(devices$values, ',\\n')}) as map(device_particle_id, device_name)\n where map.device_particle_id = tbl.device_particle_id\")\n\n if (!quiet) cat(glue(\"\\nInfo: updating device names in DB... \"))\n result <- dbExecute(con, sql)\n if (!quiet) cat(glue(\"{result} updated successfully.\\n\\n\"))\n\n # return the updated devices invisibly\n return(invisible(devices %>% select(-values)))\n}\n"
}
] | 38 |
saibunny/fil-shortstory-svm-thesis | https://github.com/saibunny/fil-shortstory-svm-thesis | 361ef278811a176d931faf49c5471ca5360f5632 | 42968b36b87cbde21d2c852e7cf4db57aa35f262 | a8c204d50de09c0008e7a016c10320824d7d55c8 | refs/heads/master | 2020-03-30T15:05:29.076711 | 2018-11-20T16:44:10 | 2018-11-20T16:44:10 | 151,347,905 | 0 | 2 | null | 2018-10-03T01:50:07 | 2018-11-20T16:44:28 | 2018-12-11T16:05:01 | Python | [
{
"alpha_fraction": 0.5987460613250732,
"alphanum_fraction": 0.6018808484077454,
"avg_line_length": 17.764705657958984,
"blob_id": "caa6375cea43a535ae2e54be0d828eb72b01d8d1",
"content_id": "e098a710595caa9ae772fd710b86db59ba19b666",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 319,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 17,
"path": "/stopwords.py",
"repo_name": "saibunny/fil-shortstory-svm-thesis",
"src_encoding": "UTF-8",
"text": "import csv\n\ndef loadTagset(filename):\n loaded = []\n tagset = []\n with open(filename) as csvfile:\n lines = csv.reader(csvfile)\n loaded = list(lines)\n for tag in loaded:\n tagset.append(tag[0])\n return tagset\n\n\n\ntagset = loadTagset(\"data\\\\tags.csv\")\nif \"LM\" in tagset:\n print(\"yes\")\n"
},
{
"alpha_fraction": 0.6603221297264099,
"alphanum_fraction": 0.6632503867149353,
"avg_line_length": 21.766666412353516,
"blob_id": "3ea9d7313e6be7e9cc31b221d21796365397deec",
"content_id": "11c0cb371529d0c9a1ae22e0dedc82a7bee071fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 683,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 30,
"path": "/py4jTest.py",
"repo_name": "saibunny/fil-shortstory-svm-thesis",
"src_encoding": "UTF-8",
"text": "from py4j.java_gateway import JavaGateway\n# gateway = JavaGateway()\n#\n# tagger = gateway.entry_point.getPost()\n#\n#\n# sentence = \"high school. taga-punch, mag-shopping, pala-iyot pala iyot taga-bake taga bake nag i strike nag-i-strike\"\n# print(tagger.tagPOS(sentence))\n\n\n\nclass POSTagger:\n def __init__(self):\n self.gateway = JavaGateway()\n self.tagger = self.gateway.entry_point.getPost()\n\n def tagSentence(self, sentence):\n return self.tagger.tagPOS(sentence)\n\n def returnTag(self, word):\n word = self.tagger.tagPOS(word)\n wordtag = word.split(\"|\")\n return wordtag[1]\n\n\n\n\n# tagger = POSTagger()\n#\n# print(tagger.returnTag(\"high\"))\n"
},
{
"alpha_fraction": 0.6341463327407837,
"alphanum_fraction": 0.6390243768692017,
"avg_line_length": 19.5,
"blob_id": "68c1e4aaded8562436fc36b1c18cce1324b2ef24",
"content_id": "a671b614ce6e6bca5b6a179af95a424e944d493f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 205,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 10,
"path": "/spacytest.py",
"repo_name": "saibunny/fil-shortstory-svm-thesis",
"src_encoding": "UTF-8",
"text": "from spacy.lang.en import English\n\nnlp = English()\n\nsamplesent = [\"couldn't\", \"eating\", \"killed\", \"loving\", \"parties\"]\n\nfor word in samplesent:\n doc = nlp(word)\n word = doc[0].lemma_\n print(word)\n"
},
{
"alpha_fraction": 0.4520295262336731,
"alphanum_fraction": 0.5221402049064636,
"avg_line_length": 39.67499923706055,
"blob_id": "157ba06537d5e887e95e6c0a070ad2a823839bbe",
"content_id": "8d251921fb3d66dc6e84f678c20e05cb26930bdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1626,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 40,
"path": "/yandextranslatetest.py",
"repo_name": "saibunny/fil-shortstory-svm-thesis",
"src_encoding": "UTF-8",
"text": "import json, urllib.request\n\n# key = \"trnsl.1.1.20180821T035101Z.7622bc974ead6403.f3016199d1f56c33e68de316b816750e09daae43\"\n#\n#\n# def toUrlSafe(sentence):\n# return sentence.replace(\" \", \"+\")\n#\n# def translateSentence(sentence):\n# sentence = toUrlSafe(sentence)\n# with urllib.request.urlopen(\"https://translate.yandex.net/api/v1.5/tr.json/translate\"\n# \"?key=\" + key +\n# \"&text=\" + sentence +\n# \"&lang=tl-en\") as url:\n# data = json.loads(url.read().decode())\n# print(data)\n# #print(data[\"text\"])\n# translated = \"\".join(data['text'])\n# return translated\n#\n# text = \"Sinabi ko sa iyo na hindi ako kumakain ng cake\"\n# print(translateSentence(text))\n# #Output is: I told you I'm not eating cake\n\nclass Translator:\n def __init__(self):\n self.key = \"trnsl.1.1.20180821T035101Z.7622bc974ead6403.f3016199d1f56c33e68de316b816750e09daae43\"\n\n def translateWord(self, word):\n for i in range(10):\n try:\n with urllib.request.urlopen(\"https://translate.yandex.net/api/v1.5/tr.json/translate\"\n \"?key=\" + self.key +\n \"&text=\" + word +\n \"&lang=tl-en\") as url:\n data = json.loads(url.read().decode())\n translated = \"\".join(data['text'])\n return translated.lower()\n except Exception as e:\n print(\"Try number \" + str(i) + \" for word :\" + word)"
},
{
"alpha_fraction": 0.6054794788360596,
"alphanum_fraction": 0.6136986017227173,
"avg_line_length": 25.071428298950195,
"blob_id": "c19972c8d9ebbd962785ec04920507ae88df43a7",
"content_id": "37f3ec2ecdddb4a098ae0a4fe949165d71de1d82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 730,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 28,
"path": "/consolidate.py",
"repo_name": "saibunny/fil-shortstory-svm-thesis",
"src_encoding": "UTF-8",
"text": "import csv\n\ndef loadDataset(filename):\n with open(filename) as csvfile:\n lines = csv.reader(csvfile)\n return list(lines)\n\n\n\n\n\ncounti = 0\nfor i in range(6):\n consolidated = []\n counti = i + 1\n\n for j in range(37):\n countj = j+1\n inputdirectory = \"data\\\\validset_batch\" + str(countj) + \"_processed_wordcount\" + str(counti)\n dataset = loadDataset(inputdirectory + \".csv\")\n\n for row in dataset:\n consolidated.append(row)\n\n outputdirectory = \"data\\\\consolidated_validset_processed_wordcount\" + str(counti) + \".csv\"\n with open(outputdirectory, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(consolidated)\n"
},
{
"alpha_fraction": 0.6049165725708008,
"alphanum_fraction": 0.6136962175369263,
"avg_line_length": 32.52941131591797,
"blob_id": "e905802d19f159730a8d7aced4066260506834be",
"content_id": "f4ca68234a4f021e29b8ea20ec5f54beda414b12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1139,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 34,
"path": "/senticnettest.py",
"repo_name": "saibunny/fil-shortstory-svm-thesis",
"src_encoding": "UTF-8",
"text": "from senticnet.senticnet import SenticNet\n##NOTE TO SELF YOU STILL HAVE TO FIX THE EXCEPTION HERE\n\nclass SenticValuer:\n def getSentics(self, word):\n senticsAndItensity = []\n sn = SenticNet('en')\n try:\n sentics = sn.sentics(word)\n polarity_intensity = sn.polarity_intense(word)\n # print(sentics)\n # print(sentics['pleasantness'])\n # print(sentics['attention'])\n # print(sentics['sensitivity'])\n # print(sentics['aptitude'])\n # print(polarity_intensity)\n\n senticsAndItensity.append(float(sentics['pleasantness']))\n senticsAndItensity.append(float(sentics['attention']))\n senticsAndItensity.append(float(sentics['sensitivity']))\n senticsAndItensity.append(float(sentics['aptitude']))\n senticsAndItensity.append(float(polarity_intensity))\n\n return senticsAndItensity\n\n except Exception as e:\n defaultsentics = [0.0, 0.0, 0.0, 0.0, 0.0]\n return defaultsentics\n\n\n\n# ##TESTING AREA\n# yas = SenticValuer()\n# print(yas.getSentics(\"awkward\"))"
},
{
"alpha_fraction": 0.5922865271568298,
"alphanum_fraction": 0.6088154315948486,
"avg_line_length": 25.592357635498047,
"blob_id": "c4bc1115064bf7c97b163105178b5118c058f888",
"content_id": "833b16c0f8bdd3ff41678a277f6d1c6ca1211d8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8349,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 314,
"path": "/preprocess.py",
"repo_name": "saibunny/fil-shortstory-svm-thesis",
"src_encoding": "UTF-8",
"text": "#import for csv\nimport csv\n\n#import for tagger\nfrom py4jTest import POSTagger\n\n#import for translator\nfrom yandextranslatetest import Translator\n\n#import for sentic value tagger\nfrom senticnettest import SenticValuer\n\n#import for english lemmatizer\nfrom spacy.lang.en import English\n\n#import for sorting\nfrom operator import itemgetter\n\nimport re\n\n##ACTUAL CODE\n\n##FUNCTION DEFINITIONS\ndef loadDataset(filename):\n with open(filename) as csvfile:\n lines = csv.reader(csvfile)\n return list(lines)\n\ndef printDataset(dataset):\n print(*dataset, sep='\\n')\n # for row in dataset:\n # print(row)\n\ndef printDataset0(dataset):\n for row in dataset:\n print(row[0])\n\ndef loadTagset(filename):\n tagset = open(filename, 'r').read().split('\\n')\n return tagset\n # loaded = []\n # tagset = []\n # with open(filename) as csvfile:\n # lines = csv.reader(csvfile)\n # loaded = list(lines)\n # for tag in loaded:\n # tagset.append(tag[0])\n # return tagset\n\ndef findAffective(phrase):\n nlp = English()\n sentic = SenticValuer()\n\n affective = ''\n highpolarity = 0.0\n words = phrase.split(\" \")\n for i in range(len(words)):\n onlychars = re.sub(r'\\W+', '', words[i])\n if not onlychars.isalpha():\n return \"\"\n doc = nlp(onlychars)\n words[i] = doc[0].lemma_\n senticVal = sentic.getSentics(words[i])\n if abs(senticVal[4]) >= highpolarity:\n affective = words[i]\n highpolarity = senticVal[4]\n\n return affective\n\n\ndef averageSenticValues(row):\n sum = [0.0, 0.0, 0.0, 0.0, 0.0]\n for senticValues in row[0]:\n sum[0] = sum[0] + senticValues[0]\n sum[1] = sum[1] + senticValues[1]\n sum[2] = sum[2] + senticValues[2]\n sum[3] = sum[3] + senticValues[3]\n sum[4] = sum[4] + senticValues[4]\n for i in range(len(sum)):\n sum[i] = sum[i]/len(row[0])\n\n aveRow = []\n aveRow.append(sum)\n aveRow.append(row[1])\n return aveRow\n\ndef polarityAbs(senticval):\n return abs(senticval[4])\n\ndef onetosix(dataset):\n sixSets = []\n\n for i in range(6):\n count = i+1\n tempDataset = []\n for j in range(len(dataset)):\n tempRow = []\n sortedRow0 = sorted(dataset[j][0], key=polarityAbs, reverse=True)\n\n tempRow.append(sortedRow0[:count])\n tempRow.append(dataset[j][1])\n tempDataset.append(tempRow)\n sixSets.append(tempDataset)\n\n return sixSets\n\n##/FUNCTION DEFINITIONS\n\n\n#MAIN\nbatchnum = 12\n# inputdirectory = \"data\\\\validset_batch\" + str(batchnum)\n# inputdirectory = \"data\\\\sample\"\ninputdirectory = \"data\\\\testsetpred_batch\" + str(batchnum)\n\nprint(inputdirectory)\ndataset = loadDataset(inputdirectory + \".csv\")\n#dataset looks like [sentence, emotion]\n\nprint(\"dataset loaded\")\n\ntagger = POSTagger()\n\nprint(\"start tagging\")\nfor row in dataset:\n row[0] = tagger.tagSentence(row[0])[:-1]\n # row[0] = row[0][:-1]\n#row in dataset looks like [sentence with POStag, emotion]\nprint(\"end tagging\")\n\nprint(\"start splitting by space\")\nfor row in dataset:\n row[0] = row[0].split(\" \")\n#row in dataset looks like [ [word|POStag*], emotion]\n#dapat + pero kleen star na lang. meaning there is more than 1 word per row\n#an array within an array\nprint(\"end splitting by space\")\n\nprint(\"start splitting words and tags\")\nfor row in dataset:\n for i in range(len(row[0])):\n row[0][i] = row[0][i].split(\"|\")\n#dataset looks like [ [ [word,POStag]*], emotion]\n#3D array na ito\nprint(\"end splitting words and tags\")\n\n\nprint(\"start stopword removal\")\nstopwordset = loadTagset(\"data\\\\stopwords.csv\")\n\nfor row in dataset:\n temp = []\n for wordtag in row[0]:\n # temp = [word for word in wordtag[1] if word in tagset]\n if wordtag[0] in stopwordset:\n temp.append(wordtag)\n row[0] = [word for word in row[0] if word not in temp]\n#dataset still looks like the one from earlier except retain most affective POS\nprint(\"end stopword removal\")\n\n\nprintDataset(dataset)\n\nprint(\"start foreclipping\")\ntagset = loadTagset(\"data\\\\tagstop5.csv\")\nfor row in dataset:\n for i in range(len(row[0])):\n if \"-\" in row[0][i][0] and row[0][i][1] is not \"FW\":\n tempword = row[0][i][0].split(\"-\")\n\n while \"\" in tempword:\n tempword.remove(\"\")\n\n temptag = []\n for j in range(len(tempword)):\n if tempword[j] is not '':\n temptag.append(tagger.returnTag(tempword[j]))\n if temptag[j] not in tagset:\n tempwordtag = []\n tempwordtag.append(tempword[j])\n tempwordtag.append(temptag[j])\n row[0][i] = tempwordtag\nprint(\"end foreclipping\")\n\nprint(\"start filtering POS\")\n\nfor row in dataset:\n temp = []\n for wordtag in row[0]:\n # temp = [word for word in wordtag[1] if word in tagset]\n if wordtag[1] in tagset:\n temp.append(wordtag)\n row[0] = [word for word in row[0] if word not in temp]\n#dataset still looks like the one from earlier except retain most affective POS\nprint(\"end filtering POS\")\n\n\nprint(\"start replacing [word|tag] list by word\")\n\nfor row in dataset:\n for i in range(len(row[0])):\n row[0][i] = row[0][i][0]\n# dataset = [[j.lower() for j in i] for i in dataset]\n\n#dataset now looks like [ [word]*, emotion]\nprint(\"end replacing [word|tag] list by word\")\n\n\nprint(\"Start translation\")\ntranslator = Translator()\n\ntranslations = []\n\ncount = 0\nfor row in dataset:\n untransrow = \"<\"\n transrow = \">\"\n\n temptransrow = []\n\n for i in range(len(row[0])):\n untransrow = untransrow + \"|\" + row[0][i]\n temmie = translator.translateWord(row[0][i])\n transrow = transrow + \"|\" + temmie\n row[0][i] = temmie\n\n temptransrow.append(untransrow)\n temptransrow.append(transrow)\n\n translations.append(temptransrow)\n\n count = count + 1\n print(str(count) + \" \" + untransrow + \"|||||\" + transrow)\nprint(\"End translation\")\n#dataset still looks like the one from before except translated to english\n\nprint(\"Start lemmatization\")\n#next is lemmatizer\nnlp = English()\nfor row in dataset:\n if row[0]:\n for i in range(len(row[0])):\n if \" \" in row[0][i]:\n row[0][i] = findAffective(row[0][i])\n else:\n doc = nlp(row[0][i])\n row[0][i] = doc[0].lemma_\n\n#dataset still looks like the one from before but lemmatized\nprint(\"end lemmatization\")\n\nprint(\"start sentic valuing\")\n#next up is senticnet and keep in mind the blank resulting row[0] make the sentic value for that all 0's\nsentic = SenticValuer()\n\nfor row in dataset:\n if row[0]:\n for i in range(len(row[0])):\n row[0][i] = sentic.getSentics(row[0][i])\n else:\n row[0] = [[0.0, 0.0, 0.0, 0.0, 0.0]]\n#the dataset now looks like [ [sentic values]*, emotion]\nprint(\"end sentic valuing\")\n\n\nprintDataset(dataset)\n\nprint(\"start averaging\")\n\nsixSets = onetosix(dataset)\nfor i in range(len(sixSets)):\n for j in range(len(sixSets[i])):\n sixSets[i][j] = averageSenticValues(sixSets[i][j])\nprint(\"end averaging\")\n\nprint(\"start writing to file\")\n\n#Write dataset to file\nfor i in range(len(sixSets)):\n count = i+1\n directory = inputdirectory + \"_processed_wordcount\" + str(count) + '.csv'\n\n finalDataset = []\n\n # for row in sixSets[i]:\n # newRow = []\n # newRow.append(row[0][0])\n # newRow.append(row[0][1])\n # newRow.append(row[0][2])\n # newRow.append(row[0][3])\n # newRow.append(row[0][4])\n # newRow.append(row[1])\n # finalDataset.append(newRow)\n\n for j in range(len(sixSets[i])):\n newRow = []\n newRow.append(sixSets[i][j][0][0])\n newRow.append(sixSets[i][j][0][1])\n newRow.append(sixSets[i][j][0][2])\n newRow.append(sixSets[i][j][0][3])\n newRow.append(sixSets[i][j][0][4])\n newRow.append(sixSets[i][j][1])\n newRow.append(translations[j][0])\n newRow.append(translations[j][1])\n finalDataset.append(newRow)\n\n with open(directory,'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(finalDataset)\n\nprint(\"end writing to file\")\n\nprint(\"complete batch #\" + str(batchnum))\n#/MAIN"
}
] | 7 |
isacdaavid/emotional-faces-psychopy-task | https://github.com/isacdaavid/emotional-faces-psychopy-task | bb716a8ac9eef41b4aab5e66b0ce2f290143f7ce | 4257ca29c2a7c6c105fd735ab57f6c681c7eb7ef | 9e1c12def96c4226cd18de2138e813c9ef3df15b | refs/heads/main | 2022-12-31T19:03:27.440683 | 2020-10-16T19:41:00 | 2020-10-16T19:41:00 | 304,709,069 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5264400839805603,
"alphanum_fraction": 0.5337882041931152,
"avg_line_length": 40.64480972290039,
"blob_id": "0163f4a69d79e97ba7096a8a7c236d79c2f3392a",
"content_id": "e50a5735dfbe4426480726035de8d053d2c83325",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 7624,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 183,
"path": "/posthoc-stimulus-validation/analysis.R",
"repo_name": "isacdaavid/emotional-faces-psychopy-task",
"src_encoding": "UTF-8",
"text": "## author: Isaac David <isacdaavid@at@isacdaavid@dot@info>\n## license: GPLv3 or later\n\n## task-dependent neural data is only as good as stimuli\n## allow. stimuli must elicit the adequate behavior and physiological\n## phenomena. this script assesses dependence between intended\n## stimulus category and subjectively-chosen category by study\n## participants, as tested prior to fMRI acquisition.\n\nlibrary(ggplot2)\nBASE_SIZE <- 21\ntheme_set(theme_gray(base_size = BASE_SIZE))\n\nDATA_DIR <- './'\ndirs <- list.dirs(DATA_DIR, recursive = FALSE)\n\n## plot contingency/confusion table and perform Pearson's Chi-squared\n## dependence test on it. FWER-corrected with Bonferroni\n##\n## input: df: frequency dataframe, e.g. from freq_dataframe()\n## n_tests: number of comparisons to correct for\n## output: ggplot object\nplot_conf_matrix <- function(df, n_tests = 1, text_size = 5, nudge_x = 0,\n angle = 0, x_labels_position = \"top\",\n x_labels = c(\"angry\", \"happy\", \"neutral\", \"sad\")) {\n mytest <- chisq.test(xtabs(Frecuencia ~ Respuesta + Realidad, data = df))\n ggplot(df, aes(x = Realidad,\n y = ordered(Respuesta, levels = rev(levels(Respuesta))))) +\n geom_tile(aes(fill = Frecuencia)) +\n geom_text(data = subset(df, Frecuencia != \"0\"),\n aes(label = sprintf(\"%1d\", Frecuencia)),\n angle = angle,\n color = \"white\",\n vjust = 1,\n size = text_size,\n nudge_x = nudge_x) +\n labs(y = \"Respuesta\",\n title = \"Frecuencia conjunta de\\ncategorización de estímulos\",\n caption = paste0(\"χ2 de Pearson = \",\n signif(mytest$statistic, 2),\n \", gdl = \",\n signif(mytest$parameter, 2),\n if (n_tests > 1) \", valor p (Bonferroni) = \" else \", valor p = \",\n signif(mytest$p.value * n_tests, 2))) +\n guides(fill = FALSE) +\n scale_x_discrete(position = x_labels_position, labels = x_labels) +\n theme(axis.text.x = element_text(angle = 90,\n hjust = 1,\n size = BASE_SIZE * .6))\n}\n\n## perform binomial tests on contingency/confusion table\n## columns. I.e., compute the probabilities of observing same or\n## better categorisation under the hypothesis that subjects are\n## randomly guessing, each response being an independent Bernoulli\n## trial of probability 1/4. FWER-corrected with Holm's method.\n##\n## input: conf_mat: contingency matrix\n## output: Holm-adjusted list of binomial tests\nmultiple_binomial_tests <- function(conf_mat) {\n ## assume ordered matrix with n*4 columns and specify which row\n ## contains the hits\n conf_mat <- rbind(conf_mat,\n hits_row = do.call(c, lapply(1:4, function(x) {\n rep(x, ncol(conf_mat) / 4)\n })))\n tests <- apply(conf_mat, MARGIN = 2, function(column) {\n binom.test(column[column[\"hits_row\"]],\n n = sum(column[1:4]),\n p = 1/4,\n alternative = \"greater\")\n })\n corrected_pvals <- p.adjust(unlist(lapply(tests, function(t) t$p.value)),\n \"holm\",\n length(tests))\n ## update tests\n for (i in 1:length(tests)) {\n tests[[i]]$p.value <- corrected_pvals[i]\n }\n return(tests)\n}\n\n## transform numeric p-values vector to character vector, emptying\n## significant values\n##\n## input: pvals: vector with p-values\n## thr = .05: cutoff value\n## output: character vector with rounded p-values or empty elements, depending\n## on the original value and thr\nthreshold <- function(pvals, thr = .05) {\n unname(sapply(pvals, function(p) {\n if (p >= thr) as.character(round(p, 2)) else \" \"\n }))\n}\n\n################################################################################\n## per-emotion analysis: how good are subjects (individually and collectively)\n## at discriminating each intended emotion?\n## identify problematic emotions and problematic subjects\n################################################################################\n\n## transform raw subject responses (as saved by gui.R) into response\n## frequency data frame.\n##\n## input: path to subject directory (character string)\n## output: dataframe with columns c(\"Respuesta\", \"Realidad\", \"Frecuencia\")\nfreq_dataframe <- function(dir) {\n data <- read.csv(paste0(dir, '/subject.csv'), sep = '\\t')\n conf_mat <- table(data$response, data$emotion)\n df <- as.data.frame(conf_mat)\n colnames(df) <- c(\"Respuesta\", \"Realidad\", \"Frecuencia\")\n return(df)\n}\n\nmain_emotions <- function() {\n ## per-subject frequency dataframes (list of)\n freq_dataframes <- lapply(dirs, freq_dataframe)\n\n ## collapse into group frequency dataframe\n global_freq_dataframe <-\n cbind(freq_dataframes[[1]][, c(\"Respuesta\", \"Realidad\")],\n Frecuencia = Reduce(f = \"+\",\n x = lapply(freq_dataframes,\n function(df) { df$Frecuencia } )))\n\n ## per-subject plots, save at subject directories\n lapply(1:length(dirs), function(n) {\n df <- freq_dataframes[[n]]\n x_labels <- threshold(unlist(lapply(\n multiple_binomial_tests(xtabs(Frecuencia ~ Respuesta + Realidad,\n data = df)),\n function(t) t$p.value)))\n svg(paste0(dirs[n], '/conf-matrix.svg'))\n plot(plot_conf_matrix(df,\n n_tests = length(freq_dataframes),\n x_labels = x_labels))\n dev.off()\n })\n\n ## group plot\n x_labels <- threshold(unlist(lapply(\n multiple_binomial_tests(xtabs(Frecuencia ~ Respuesta + Realidad,\n data = global_freq_dataframe)),\n function(t) t$p.value)))\n svg(paste0(DATA_DIR, \"/conf-matrix-global-emotion.svg\"))\n plot(plot_conf_matrix(global_freq_dataframe,\n x_labels = x_labels))\n dev.off()\n}\n\n################################################################################\n## per-stimulus, group-wide analysis: identify problematic images\n################################################################################\n\nmain_stimuli <- function() {\n raw_data <- do.call(rbind, lapply(dirs, function(dir) {\n read.csv(paste0(dir, '/subject.csv'), sep = '\\t')\n }))\n ## preppend intended emotion to stimulus name, this will help with sorting\n raw_data$stimulus <- paste0(raw_data$emotion, '.',\n substr(raw_data$stimulus, 28, 999))\n\n conf_mat <- table(raw_data$response, raw_data$stimulus)\n\n freq_dataframe <- as.data.frame(conf_mat)\n colnames(freq_dataframe) <- c(\"Respuesta\", \"Realidad\", \"Frecuencia\")\n\n ## group plot\n svg(paste0(DATA_DIR, \"/conf-matrix-global-stim.svg\"))\n plot(plot_conf_matrix(freq_dataframe,\n text_size = 3.5,\n angle = 90,\n nudge_x = -.3,\n x_labels_position = \"top\",\n x_labels =\n threshold(unlist(lapply(\n multiple_binomial_tests(conf_mat),\n function(t) t$p.value)))))\n dev.off()\n}\n\nmain_emotions()\nmain_stimuli()\n"
},
{
"alpha_fraction": 0.432471364736557,
"alphanum_fraction": 0.4545672833919525,
"avg_line_length": 40.912750244140625,
"blob_id": "a67ffb6bdf4c4a7fee11629804db67809845175e",
"content_id": "0ca47087ee6e1ef62f0451f85d57dd88303b8844",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 12494,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 298,
"path": "/task_performance.R",
"repo_name": "isacdaavid/emotional-faces-psychopy-task",
"src_encoding": "UTF-8",
"text": "\n## author: Isaac David <isacdaavid@at@isacdaavid@dot@info>\n## license: GPLv3 or later\n\nlibrary(lme4)\nlibrary(nlme)\nlibrary(ggplot2)\nBASE_SIZE <- 21\ntheme_set(theme_gray(base_size = BASE_SIZE))\n\nDATA_DIR <- './emofaces/data/'\nfiles <- list.files(DATA_DIR, \"*.csv\")\nfiles <- files[11:length(files)] # exclude Pablo (missing data) and myself\n\n## load single Psychopy CSV output and make sense of its messiness. very hacky\n##\n## input: path to file\n## output: clean dataframe\nparse_file <- function(path) {\n data <- read.csv(path)\n ## only the following columns contain useful info\n data <- data[, c(\"stimulus\",\n \"gender\",\n \"key_resp_2.keys\",\n \"key_resp_3.keys\",\n \"key_resp_4.keys\",\n \"key_resp_5.keys\",\n \"key_resp_6.keys\",\n \"key_resp_7.keys\",\n \"key_resp_2.rt\",\n \"key_resp_3.rt\",\n \"key_resp_4.rt\",\n \"key_resp_5.rt\",\n \"key_resp_6.rt\",\n \"key_resp_7.rt\")]\n data[is.na(data)] <- 0 # so as to sum RT's with NA's\n data <- data.frame(stimulus = unname(data[\"stimulus\"]),\n gender = data$gender,\n keys = trimws(paste(data$key_resp_2.keys,\n data$key_resp_3.keys,\n data$key_resp_4.keys,\n data$key_resp_5.keys,\n data$key_resp_6.keys,\n data$key_resp_7.keys)),\n rt = Reduce(\"+\", data[, 9:14]))\n data$rt[data$rt == 0] <- NA # RT == 0 was actually a NA, after all\n ## map from button keys to implied stimulus gender (m-ale or f-emale)\n data$keys <- sapply(data$keys, function(k) {\n if (k == \"b\" || k == \"a\") {\"m\"}\n else if (k == \"c\" || k == \"d\") {\"f\"}\n else {\"\"}\n })\n ## evaluate responses during non-gendered trials (copy response\n ## key to gender if correct. i.e., if any response was given)\n correct_nongendered <- (data$gender == \"\") & (! is.na(data$rt))\n data$gender[correct_nongendered] <- data$keys[correct_nongendered]\n ## explicitly make missing responses different from expected gender\n data$keys[is.na(data$rt) & (data$keys == \"\")] <- \"None\"\n return(data)\n}\n\n## put per-file dataframes (as returned by `parse_file()`) together,\n## add extra useful columns\n##\n## input: filenames vector\n## output: global dataframe\nparse_files <- function(paths) {\n ## row-bind individual dataframes\n dframe <- do.call(rbind, lapply(files, function(path) {\n dframe <- parse_file(paste0(DATA_DIR, path))\n run <- regexpr(\"emofaces\", path) + 8 # parse experiment number (1-5)\n sub_id <- substr(path, 1, 2) # parse subject id\n cbind(subject = rep(sub_id, nrow(dframe)),\n run = rep(as.numeric(substring(path, run, run)), nrow(dframe)),\n dframe,\n ## whether gender matches key response (abstract away\n ## actual gender)\n respuesta = (dframe$gender == dframe$keys))\n }))\n ## add trial count for each subject. this is easier once all 5\n ## runs per subject reside in one dataframe\n dframe <-\n cbind(dframe,\n ensayo = do.call(c, lapply(unique(dframe$subject), function(s) {\n 1:nrow(dframe[dframe$subject == s, ])\n })),\n bloque = do.call(c, lapply(unique(dframe$subject), function(s) {\n c(rep(\"blank\", 10),\n rep(\"scrambled\", 10),\n rep(\"happy\", 10),\n rep(\"sad\", 10),\n rep(\"angry\", 10),\n rep(\"neutral\", 10),\n rep(\"happy\", 10),\n rep(\"sad\", 10),\n rep(\"angry\", 10),\n rep(\"neutral\", 10),\n rep(\"scrambled\", 10),\n rep(\"blank\", 10),\n ## run 2\n rep(\"sad\", 10),\n rep(\"neutral\", 10),\n rep(\"blank\", 10),\n rep(\"angry\", 10),\n rep(\"scrambled\", 10),\n rep(\"happy\", 10),\n rep(\"sad\", 10),\n rep(\"neutral\", 10),\n rep(\"blank\", 10),\n rep(\"angry\", 10),\n rep(\"scrambled\", 10),\n rep(\"happy\", 10),\n ## run 3\n rep(\"neutral\", 10),\n rep(\"blank\", 10),\n rep(\"angry\", 10),\n rep(\"scrambled\", 10),\n rep(\"happy\", 10),\n rep(\"sad\", 10),\n rep(\"neutral\", 10),\n rep(\"blank\", 10),\n rep(\"angry\", 10),\n rep(\"scrambled\", 10),\n rep(\"happy\", 10),\n rep(\"sad\", 10),\n ## run 4\n rep(\"happy\", 10),\n rep(\"angry\", 10),\n rep(\"neutral\", 10),\n rep(\"blank\", 10),\n rep(\"sad\", 10),\n rep(\"scrambled\", 10),\n rep(\"happy\", 10),\n rep(\"angry\", 10),\n rep(\"neutral\", 10),\n rep(\"blank\", 10),\n rep(\"sad\", 10),\n rep(\"scrambled\", 10),\n ## run 5\n rep(\"scrambled\", 10),\n rep(\"sad\", 10),\n rep(\"happy\",10),\n rep(\"neutral\", 10),\n rep(\"angry\", 10),\n rep(\"blank\", 10),\n rep(\"scrambled\", 10),\n rep(\"sad\", 10),\n rep(\"happy\", 10),\n rep(\"neutral\", 10),\n rep(\"angry\", 10),\n rep(\"blank\", 10))\n })))\n dframe$bloque <- factor(dframe$bloque,\n levels = c(\"blank\", \"scrambled\", \"neutral\", \"happy\",\n \"sad\", \"angry\"))\n return(dframe)\n}\n\ndf <- parse_files(files)\n\n################################################################################\n## hits vs misses\n################################################################################\n\n## reduced dataframe with hits and misses count per trial,\n## irrespective of subject\ndf2 <- do.call(rbind, lapply(unique(df$ensayo), function(t) {\n data.frame(ensayo = c(t, t),\n respuesta = factor(c(\"errores\", \"aciertos\"),\n c(\"errores\", \"aciertos\")),\n sujetos = c(nrow(df[df$ensayo == t & df$respuesta == FALSE, ]),\n nrow(df[df$ensayo == t & df$respuesta == TRUE, ])))\n}))\n\n## perform per-subject binomial tests\npvals <- unname(sapply(unique(df$subject), function(s) {\n binom.test(table(df[df$subject == s, \"respuesta\"])[\"TRUE\"],\n n = length(df[df$subject == s, \"respuesta\"]),\n p = 1/2,\n alternative = \"greater\")$p.value\n}))\npvals <- p.adjust(pvals, method=\"holm\") # Holm's FWE p-value correction\n\n## perform per-block-type binomial tests\npvals_block <- unname(sapply(unique(df$bloque), function(b) {\n binom.test(table(df[df$bloque == b, \"respuesta\"])[\"TRUE\"],\n n = length(df[df$bloque == b, \"respuesta\"]),\n p = 1/2,\n alternative = \"greater\")$p.value\n}))\npvals_block<- p.adjust(pvals_block, method=\"holm\") # Holm's FWE p-value correction\n\n## plot group performance timeseries as proportion between hits and misses\nsvg(\"./cumm-hits-vs-misses-timeseries.svg\", width=10, height=4.5)\nggplot(df2, aes(x = ensayo, y = sujetos, fill = respuesta)) +\n geom_area() +\n scale_x_continuous(breaks = seq(0, 600, 10),\n labels = sapply(seq(0, 600, 10), function(l) {\n if (l %% 120 == 0) l else \"\"\n })) +\n ## scale_y_continuous(breaks = seq(0, 12, 2), labels = seq(0, 12, 2)) +\n theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),\n legend.position = \"bottom\") +\n labs(caption = paste(\"Prueba binomial del peor sujeto: valor p (Holm) =\",\n signif(max(pvals), 3),\n \"\\nPrueba binomial del peor tipo de bloque: valor p (Holm) = \",\n signif(max(pvals_block), 3)))\ndev.off()\n\n################################################################################\n## reaction times\n################################################################################\n\n## linear\nmodelo <- lm(rt ~ ensayo + bloque, data = df)\nsummary(modelo)\npar(mfrow = c(2,2))\nplot(modelo)\n\n## general linear. same results but provides benchmark to compare with lme\nmodelo_glm <- gls(rt ~ ensayo + bloque, data = df, method = \"ML\", na.action = na.omit)\nsummary(modelo_glm)\nsvg(\"rt-timeseries-model-selection-res1.svg\")\nplot.new()\nplot(modelo_glm)\nabline(.3, 0)\ndev.off()\n\n## now we add mixed effects\nmodelo_glmm <- lme(rt ~ run + bloque,\n random = ~1|subject, data = df,\n method = \"ML\",\n na.action = na.omit)\nsummary(modelo_glmm)\nsvg(\"rt-random-effects.svg\")\nplot(ranef(modelo_glmm)) # random effects\ndev.off()\nsvg(\"rt-timeseries-model-selection-res2.svg\")\nplot.new()\nplot(modelo_glmm)\ndev.off()\n\n## model-selection criteria\ncomp <- anova(modelo_glm, modelo_glmm)\n\n## plot model comparison diagnostics. very hacky!\nsvg(\"./rt-timeseries-model-selection.svg\")\npar(mfrow = c(2,2))\nqqnorm(resid(modelo_glm, type = \"normalized\"), main = \"Modelo efectos fijos\")\nqqline(resid(modelo_glm, type = \"normalized\"))\nqqnorm(resid(modelo_glmm, type = \"normalized\"), main = \"Modelo efectos mixtos\")\nqqline(resid(modelo_glmm, type = \"normalized\"))\ntitle(paste0(\"\\nComparación de modelos de regresión\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\",\n \" AIC: \",\n as.integer(comp$AIC[1]),\n \" \",\n as.integer(comp$AIC[2]),\n \"\\n BIC: \",\n as.integer(comp$BIC[1]),\n \" \",\n as.integer(comp$BIC[2]),\n \"\\nlog-verosimilitud: \",\n as.integer(comp$logLik[1]),\n \" \",\n as.integer(comp$logLik[2])),\n outer=TRUE)\ndev.off()\n\n## plot reaction time timeseries (with fits)\nsvg(\"./rt-timeseries-fit.svg\", width = 10, height = 5)\nggplot(df, aes(x = ensayo, y = rt, group = subject, color = subject)) +\n geom_smooth(show.legend = FALSE, method = \"loess\", span = .1, alpha = .3) +\n ## geom_abline(aes(intercept = modelo$coefficients[1],\n ## slope = modelo$coefficients[2],\n ## color = \"red\"), show.legend = FALSE) +\n geom_abline(aes(intercept = modelo_glmm$coefficients$fixed[1],\n slope = modelo_glmm$coefficients$fixed[2] / 120),\n show.legend = FALSE, linetype = \"dashed\") +\n ## scale_y_continuous(limits = c(0, 3)) +\n scale_x_continuous(breaks = seq(0, 600, 10),\n labels = sapply(seq(0, 600, 10), function(l) {\n if (l %% 120 == 0) l else \"\"\n })) +\n theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5)) +\n labs(y = \"tiempo de reacción (s)\",\n caption = paste0(\"\\ntiempo = \",\n signif(modelo_glmm$coefficients$fixed[2]/120, 2),\n \" \\u00B7 ensayo + X \\u00B7 bloque + U \\u00B7 sujeto + \",\n signif(modelo_glmm$coefficients$fixed[1], 2),\n \"\\nValores p : \",\n signif(summary(modelo_glmm)$tTable[2,5], 3),\n \", \",\n signif(summary(modelo_glmm)$tTable[1,5], 3)))\ndev.off()\n\n## block-type anova\nanova <- aov(rt ~ bloque, data = df)\nsummary(anova)\nTukeyHSD(anova)\n"
},
{
"alpha_fraction": 0.5528255701065063,
"alphanum_fraction": 0.5606879591941833,
"avg_line_length": 33.47457504272461,
"blob_id": "abebf17621255002c506ec1cf203a4d75bab2495",
"content_id": "de933201444b299c9a6169ab818b82cb16aa28ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "R",
"length_bytes": 2035,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 59,
"path": "/posthoc-stimulus-validation/gui.R",
"repo_name": "isacdaavid/emotional-faces-psychopy-task",
"src_encoding": "UTF-8",
"text": "## author: Isaac David <isacdaavid@at@isacdaavid@dot@info>\n## license: GPLv3 or later\n\nlibrary(gWidgets)\nlibrary(gWidgetstcltk)\n\nhappy <- read.csv(\"../emofaces/happy.csv\")\nhappy$gender <- rep(\"happy\", 10)\nsad <- read.csv(\"../emofaces/sad.csv\")\nsad$gender <- rep(\"sad\", 10)\nangry <- read.csv(\"../emofaces/angry.csv\")\nangry$gender <- rep(\"angry\", 10)\nneutral <- read.csv(\"../emofaces/neutral.csv\")\nneutral$gender <- rep(\"neutral\", 10)\n\nstimuli <- rbind(happy, sad, angry, neutral)\nnames(stimuli) <- c('stimulus', 'emotion')\nstimuli$stimulus <- paste0('../emofaces/', stimuli$stimulus)\nstimuli$stimulus <- sapply(stimuli$stimulus, function(s) {gsub('tif', 'gif', s)})\nstimuli <- stimuli[sample(1:nrow(stimuli)), ]\n\nstimulus_counter <- 1\nresponses <- c()\n\nwin <- gwindow(title = '')\ngrp <- ggroup(container = win, horizontal = FALSE)\nimg <- gimage(stimuli[stimulus_counter, 1], container = grp)\n\nhandler <- function(selection) {\n responses <<- c(responses, selection)\n if (stimulus_counter <= nrow(stimuli)) {\n stimulus_counter <<- stimulus_counter + 1\n if (stimulus_counter > nrow(stimuli)) {\n dispose(win)\n write.table(cbind(stimuli, response=responses),\n file = './subject.csv',\n sep = '\\t',\n row.names = FALSE,\n fileEncoding = \"UTF-8\")\n return\n }\n svalue(img) <- stimuli[stimulus_counter, 1]\n }\n}\n\nbtns <- ggroup(container = grp)\n\nbtn_happy <- gbutton(text=\"Sonriente\",\n container=btns,\n handler = function(h, ...) {handler('happy')})\nbtn_happy <- gbutton(text = \"Neutra\",\n container = btns,\n handler = function(h, ...) {handler('neutral')})\nbtn_happy <- gbutton(text=\"Triste\",\n container=btns,\n handler = function(h, ...) {handler('sad')})\nbtn_happy <- gbutton(text=\"Enojada\",\n container=btns,\n handler = function(h, ...) {handler('angry')})\n\n"
},
{
"alpha_fraction": 0.6074618101119995,
"alphanum_fraction": 0.6275662183761597,
"avg_line_length": 40.92058181762695,
"blob_id": "bde0662e4940beb59678c3ead83b5de9a09378f5",
"content_id": "14af5d36737d457c3dcfed92497beef9f3c80b27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 77602,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 1851,
"path": "/emofaces/emofaces5_lastrun.py",
"repo_name": "isacdaavid/emotional-faces-psychopy-task",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis experiment was created using PsychoPy3 Experiment Builder (v3.0.1),\n on noviembre 21, 2019, at 21:02\nIf you publish work using this script please cite the PsychoPy publications:\n Peirce, JW (2007) PsychoPy - Psychophysics software in Python.\n Journal of Neuroscience Methods, 162(1-2), 8-13.\n Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy.\n Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008\n\"\"\"\n\nfrom __future__ import absolute_import, division\nfrom psychopy import locale_setup, sound, gui, visual, core, data, event, logging, clock\nfrom psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,\n STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import (sin, cos, tan, log, log10, pi, average,\n sqrt, std, deg2rad, rad2deg, linspace, asarray)\nfrom numpy.random import random, randint, normal, shuffle\nimport os # handy system and path functions\nimport sys # to get file system encoding\n\n\n# Ensure that relative paths start from the same directory as this script\n_thisDir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(_thisDir)\n\n# Store info about the experiment session\npsychopyVersion = '3.0.1'\nexpName = 'emofaces5' # from the Builder filename that created this script\nexpInfo = {'participant': '', 'session': '001'}\ndlg = gui.DlgFromDict(dictionary=expInfo, title=expName)\nif dlg.OK == False:\n core.quit() # user pressed cancel\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\nexpInfo['expName'] = expName\nexpInfo['psychopyVersion'] = psychopyVersion\n\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\nfilename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])\n\n# An ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath='G:\\\\emofaces\\\\emofaces5_lastrun.py',\n savePickle=True, saveWideText=True,\n dataFileName=filename)\n# save a log file for detail verbose info\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\n\nendExpNow = False # flag for 'escape' or other condition => quit the exp\n\n# Start Code - component code to be run before the window creation\n\n# Setup the Window\nwin = visual.Window(\n size=[800, 600], fullscr=True, screen=0,\n allowGUI=False, allowStencil=False,\n monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb',\n blendMode='avg', useFBO=True,\n units='height')\n# store frame rate of monitor if we can measure it\nexpInfo['frameRate'] = win.getActualFrameRate()\nif expInfo['frameRate'] != None:\n frameDur = 1.0 / round(expInfo['frameRate'])\nelse:\n frameDur = 1.0 / 60.0 # could not measure, so guess\n\n# Initialize components for Routine \"instructions\"\ninstructionsClock = core.Clock()\ntext = visual.TextStim(win=win, name='text',\n text='Instrucciones:\\n\\nDurante los siguientes 6 minutos verás una serie de imágenes.\\n\\nMantén la atención en ellas e intenta no mover la cabeza.',\n font='Arial',\n pos=(0, 0), height=0.08, wrapWidth=None, ori=0, \n color='white', colorSpace='rgb', opacity=1, \n languageStyle='LTR',\n depth=0.0);\n\n# Initialize components for Routine \"instructions2\"\ninstructions2Clock = core.Clock()\ntext_2 = visual.TextStim(win=win, name='text_2',\n text='Presiona un botón cada vez que notes un cambio en la imagen, utilizando los 2 botones de forma alternada:\\n\\nizq, der, izq, ...',\n font='Arial',\n pos=(0, 0), height=0.08, wrapWidth=None, ori=0, \n color='white', colorSpace='rgb', opacity=1, \n languageStyle='LTR',\n depth=0.0);\n\n# Initialize components for Routine \"instrucciones_3\"\ninstrucciones_3Clock = core.Clock()\ntext_3 = visual.TextStim(win=win, name='text_3',\n text='Si piensas que la imagen es un hombre o una mujer, utiliza los botones así:\\n\\nizq:HOMBRE der:MUJER\\n\\nPresiona un botón para iniciar...',\n font='Arial',\n pos=(0, 0), height=0.08, wrapWidth=None, ori=0, \n color='white', colorSpace='rgb', opacity=1, \n languageStyle='LTR',\n depth=0.0);\n\n# Initialize components for Routine \"start\"\nstartClock = core.Clock()\npolygon_3 = visual.ShapeStim(\n win=win, name='polygon_3', vertices='cross',\n size=(0.05, 0.05),\n ori=0, pos=(0, 0),\n lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb',\n fillColor=[1,1,1], fillColorSpace='rgb',\n opacity=.2, depth=0.0, interpolate=True)\n\n# Initialize components for Routine \"scrambled\"\nscrambledClock = core.Clock()\nimage_5 = visual.ImageStim(\n win=win, name='image_5',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"sad\"\nsadClock = core.Clock()\nimage_2 = visual.ImageStim(\n win=win, name='image_2',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"happy\"\nhappyClock = core.Clock()\nimage = visual.ImageStim(\n win=win, name='image',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"neutral\"\nneutralClock = core.Clock()\nimage_4 = visual.ImageStim(\n win=win, name='image_4',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"angry\"\nangryClock = core.Clock()\nimage_3 = visual.ImageStim(\n win=win, name='image_3',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"blank\"\nblankClock = core.Clock()\npolygon = visual.ShapeStim(\n win=win, name='polygon', vertices='cross',\n size=(.05, .05),\n ori=0, pos=[0,0],\n lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb',\n fillColor=[1,1,1], fillColorSpace='rgb',\n opacity=.2, depth=0.0, interpolate=True)\n\n# Initialize components for Routine \"scrambled\"\nscrambledClock = core.Clock()\nimage_5 = visual.ImageStim(\n win=win, name='image_5',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"sad\"\nsadClock = core.Clock()\nimage_2 = visual.ImageStim(\n win=win, name='image_2',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"happy\"\nhappyClock = core.Clock()\nimage = visual.ImageStim(\n win=win, name='image',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"neutral\"\nneutralClock = core.Clock()\nimage_4 = visual.ImageStim(\n win=win, name='image_4',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"angry\"\nangryClock = core.Clock()\nimage_3 = visual.ImageStim(\n win=win, name='image_3',\n image='sin', mask=None,\n ori=0, pos=(0, 0), size=(1, 1),\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\n# Initialize components for Routine \"blank\"\nblankClock = core.Clock()\npolygon = visual.ShapeStim(\n win=win, name='polygon', vertices='cross',\n size=(.05, .05),\n ori=0, pos=[0,0],\n lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb',\n fillColor=[1,1,1], fillColorSpace='rgb',\n opacity=.2, depth=0.0, interpolate=True)\n\n# Initialize components for Routine \"end\"\nendClock = core.Clock()\npolygon_2 = visual.ShapeStim(\n win=win, name='polygon_2',\n vertices=[[-(0.5, 0.5)[0]/2.0,-(0.5, 0.5)[1]/2.0], [+(0.5, 0.5)[0]/2.0,-(0.5, 0.5)[1]/2.0], [0,(0.5, 0.5)[1]/2.0]],\n ori=0, pos=(0, 0),\n lineWidth=1, lineColor=[1,1,1], lineColorSpace='rgb',\n fillColor=[1,1,1], fillColorSpace='rgb',\n opacity=0, depth=0.0, interpolate=True)\n\n# Create some handy timers\nglobalClock = core.Clock() # to track the time since experiment started\nroutineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine \n\n# ------Prepare to start Routine \"instructions\"-------\nt = 0\ninstructionsClock.reset() # clock\nframeN = -1\ncontinueRoutine = True\n# update component parameters for each repeat\nkey_resp_9 = event.BuilderKeyResponse()\n# keep track of which components have finished\ninstructionsComponents = [text, key_resp_9]\nfor thisComponent in instructionsComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n# -------Start Routine \"instructions\"-------\nwhile continueRoutine:\n # get current time\n t = instructionsClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *text* updates\n if t >= 0.0 and text.status == NOT_STARTED:\n # keep track of start time/frame for later\n text.tStart = t\n text.frameNStart = frameN # exact frame index\n text.setAutoDraw(True)\n \n # *key_resp_9* updates\n if t >= 0.0 and key_resp_9.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_9.tStart = t\n key_resp_9.frameNStart = frameN # exact frame index\n key_resp_9.status = STARTED\n # keyboard checking is just starting\n event.clearEvents(eventType='keyboard')\n if key_resp_9.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n # a response ends the routine\n continueRoutine = False\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instructionsComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n# -------Ending Routine \"instructions\"-------\nfor thisComponent in instructionsComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n# the Routine \"instructions\" was not non-slip safe, so reset the non-slip timer\nroutineTimer.reset()\n\n# ------Prepare to start Routine \"instructions2\"-------\nt = 0\ninstructions2Clock.reset() # clock\nframeN = -1\ncontinueRoutine = True\n# update component parameters for each repeat\nkey_resp_10 = event.BuilderKeyResponse()\n# keep track of which components have finished\ninstructions2Components = [text_2, key_resp_10]\nfor thisComponent in instructions2Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n# -------Start Routine \"instructions2\"-------\nwhile continueRoutine:\n # get current time\n t = instructions2Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *text_2* updates\n if t >= 0.0 and text_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n text_2.tStart = t\n text_2.frameNStart = frameN # exact frame index\n text_2.setAutoDraw(True)\n \n # *key_resp_10* updates\n if t >= 0.0 and key_resp_10.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_10.tStart = t\n key_resp_10.frameNStart = frameN # exact frame index\n key_resp_10.status = STARTED\n # keyboard checking is just starting\n event.clearEvents(eventType='keyboard')\n if key_resp_10.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n # a response ends the routine\n continueRoutine = False\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instructions2Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n# -------Ending Routine \"instructions2\"-------\nfor thisComponent in instructions2Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n# the Routine \"instructions2\" was not non-slip safe, so reset the non-slip timer\nroutineTimer.reset()\n\n# ------Prepare to start Routine \"instrucciones_3\"-------\nt = 0\ninstrucciones_3Clock.reset() # clock\nframeN = -1\ncontinueRoutine = True\n# update component parameters for each repeat\nkey_resp_8 = event.BuilderKeyResponse()\n# keep track of which components have finished\ninstrucciones_3Components = [text_3, key_resp_8]\nfor thisComponent in instrucciones_3Components:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n# -------Start Routine \"instrucciones_3\"-------\nwhile continueRoutine:\n # get current time\n t = instrucciones_3Clock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *text_3* updates\n if t >= 0.0 and text_3.status == NOT_STARTED:\n # keep track of start time/frame for later\n text_3.tStart = t\n text_3.frameNStart = frameN # exact frame index\n text_3.setAutoDraw(True)\n \n # *key_resp_8* updates\n if t >= 0.0 and key_resp_8.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_8.tStart = t\n key_resp_8.frameNStart = frameN # exact frame index\n key_resp_8.status = STARTED\n # keyboard checking is just starting\n event.clearEvents(eventType='keyboard')\n if key_resp_8.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n # a response ends the routine\n continueRoutine = False\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in instrucciones_3Components:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n# -------Ending Routine \"instrucciones_3\"-------\nfor thisComponent in instrucciones_3Components:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n# the Routine \"instrucciones_3\" was not non-slip safe, so reset the non-slip timer\nroutineTimer.reset()\n\n# ------Prepare to start Routine \"start\"-------\nt = 0\nstartClock.reset() # clock\nframeN = -1\ncontinueRoutine = True\n# update component parameters for each repeat\nkey_resp_11 = event.BuilderKeyResponse()\n# keep track of which components have finished\nstartComponents = [polygon_3, key_resp_11]\nfor thisComponent in startComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n# -------Start Routine \"start\"-------\nwhile continueRoutine:\n # get current time\n t = startClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *polygon_3* updates\n if t >= 0.0 and polygon_3.status == NOT_STARTED:\n # keep track of start time/frame for later\n polygon_3.tStart = t\n polygon_3.frameNStart = frameN # exact frame index\n polygon_3.setAutoDraw(True)\n \n # *key_resp_11* updates\n if t >= 0.0 and key_resp_11.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_11.tStart = t\n key_resp_11.frameNStart = frameN # exact frame index\n key_resp_11.status = STARTED\n # keyboard checking is just starting\n event.clearEvents(eventType='keyboard')\n if key_resp_11.status == STARTED:\n theseKeys = event.getKeys(keyList=['s'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n # a response ends the routine\n continueRoutine = False\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in startComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n# -------Ending Routine \"start\"-------\nfor thisComponent in startComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n# the Routine \"start\" was not non-slip safe, so reset the non-slip timer\nroutineTimer.reset()\n\n# set up handler to look after randomisation of conditions etc\ntrials = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('scrambled.csv'),\n seed=None, name='trials')\nthisExp.addLoop(trials) # add the loop to the experiment\nthisTrial = trials.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)\nif thisTrial != None:\n for paramName in thisTrial:\n exec('{} = thisTrial[paramName]'.format(paramName))\n\nfor thisTrial in trials:\n currentLoop = trials\n # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)\n if thisTrial != None:\n for paramName in thisTrial:\n exec('{} = thisTrial[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"scrambled\"-------\n t = 0\n scrambledClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_5.setImage(stimulus)\n key_resp_6 = event.BuilderKeyResponse()\n # keep track of which components have finished\n scrambledComponents = [image_5, key_resp_6]\n for thisComponent in scrambledComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"scrambled\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = scrambledClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_5* updates\n if t >= 0.0 and image_5.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_5.tStart = t\n image_5.frameNStart = frameN # exact frame index\n image_5.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_5.status == STARTED and t >= frameRemains:\n image_5.setAutoDraw(False)\n \n # *key_resp_6* updates\n if t >= 0.0 and key_resp_6.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_6.tStart = t\n key_resp_6.frameNStart = frameN # exact frame index\n key_resp_6.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_6.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_6.status == STARTED and t >= frameRemains:\n key_resp_6.status = FINISHED\n if key_resp_6.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_6.keys == []: # then this was the first keypress\n key_resp_6.keys = theseKeys[0] # just the first key pressed\n key_resp_6.rt = key_resp_6.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in scrambledComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"scrambled\"-------\n for thisComponent in scrambledComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_6.keys in ['', [], None]: # No response was made\n key_resp_6.keys=None\n trials.addData('key_resp_6.keys',key_resp_6.keys)\n if key_resp_6.keys != None: # we had a response\n trials.addData('key_resp_6.rt', key_resp_6.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_2 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('sad.csv'),\n seed=None, name='trials_2')\nthisExp.addLoop(trials_2) # add the loop to the experiment\nthisTrial_2 = trials_2.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)\nif thisTrial_2 != None:\n for paramName in thisTrial_2:\n exec('{} = thisTrial_2[paramName]'.format(paramName))\n\nfor thisTrial_2 in trials_2:\n currentLoop = trials_2\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)\n if thisTrial_2 != None:\n for paramName in thisTrial_2:\n exec('{} = thisTrial_2[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"sad\"-------\n t = 0\n sadClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_2.setImage(stimulus)\n key_resp_3 = event.BuilderKeyResponse()\n # keep track of which components have finished\n sadComponents = [image_2, key_resp_3]\n for thisComponent in sadComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"sad\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = sadClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_2* updates\n if t >= 0.0 and image_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_2.tStart = t\n image_2.frameNStart = frameN # exact frame index\n image_2.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_2.status == STARTED and t >= frameRemains:\n image_2.setAutoDraw(False)\n \n # *key_resp_3* updates\n if t >= 0.0 and key_resp_3.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_3.tStart = t\n key_resp_3.frameNStart = frameN # exact frame index\n key_resp_3.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_3.status == STARTED and t >= frameRemains:\n key_resp_3.status = FINISHED\n if key_resp_3.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'd'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_3.keys == []: # then this was the first keypress\n key_resp_3.keys = theseKeys[0] # just the first key pressed\n key_resp_3.rt = key_resp_3.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in sadComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"sad\"-------\n for thisComponent in sadComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_3.keys in ['', [], None]: # No response was made\n key_resp_3.keys=None\n trials_2.addData('key_resp_3.keys',key_resp_3.keys)\n if key_resp_3.keys != None: # we had a response\n trials_2.addData('key_resp_3.rt', key_resp_3.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_2'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_3 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('happy.csv'),\n seed=None, name='trials_3')\nthisExp.addLoop(trials_3) # add the loop to the experiment\nthisTrial_3 = trials_3.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_3.rgb)\nif thisTrial_3 != None:\n for paramName in thisTrial_3:\n exec('{} = thisTrial_3[paramName]'.format(paramName))\n\nfor thisTrial_3 in trials_3:\n currentLoop = trials_3\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_3.rgb)\n if thisTrial_3 != None:\n for paramName in thisTrial_3:\n exec('{} = thisTrial_3[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"happy\"-------\n t = 0\n happyClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image.setImage(stimulus)\n key_resp_2 = event.BuilderKeyResponse()\n # keep track of which components have finished\n happyComponents = [image, key_resp_2]\n for thisComponent in happyComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"happy\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = happyClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image* updates\n if t >= 0.0 and image.status == NOT_STARTED:\n # keep track of start time/frame for later\n image.tStart = t\n image.frameNStart = frameN # exact frame index\n image.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image.status == STARTED and t >= frameRemains:\n image.setAutoDraw(False)\n \n # *key_resp_2* updates\n if t >= 0.0 and key_resp_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_2.tStart = t\n key_resp_2.frameNStart = frameN # exact frame index\n key_resp_2.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_2.status == STARTED and t >= frameRemains:\n key_resp_2.status = FINISHED\n if key_resp_2.status == STARTED:\n theseKeys = event.getKeys(keyList=['b', 'c'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_2.keys == []: # then this was the first keypress\n key_resp_2.keys = theseKeys[0] # just the first key pressed\n key_resp_2.rt = key_resp_2.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in happyComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"happy\"-------\n for thisComponent in happyComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_2.keys in ['', [], None]: # No response was made\n key_resp_2.keys=None\n trials_3.addData('key_resp_2.keys',key_resp_2.keys)\n if key_resp_2.keys != None: # we had a response\n trials_3.addData('key_resp_2.rt', key_resp_2.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_3'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_4 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('neutral.csv'),\n seed=None, name='trials_4')\nthisExp.addLoop(trials_4) # add the loop to the experiment\nthisTrial_4 = trials_4.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_4.rgb)\nif thisTrial_4 != None:\n for paramName in thisTrial_4:\n exec('{} = thisTrial_4[paramName]'.format(paramName))\n\nfor thisTrial_4 in trials_4:\n currentLoop = trials_4\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_4.rgb)\n if thisTrial_4 != None:\n for paramName in thisTrial_4:\n exec('{} = thisTrial_4[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"neutral\"-------\n t = 0\n neutralClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_4.setImage(stimulus)\n key_resp_5 = event.BuilderKeyResponse()\n # keep track of which components have finished\n neutralComponents = [image_4, key_resp_5]\n for thisComponent in neutralComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"neutral\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = neutralClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_4* updates\n if t >= 0.0 and image_4.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_4.tStart = t\n image_4.frameNStart = frameN # exact frame index\n image_4.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_4.status == STARTED and t >= frameRemains:\n image_4.setAutoDraw(False)\n \n # *key_resp_5* updates\n if t >= 0.0 and key_resp_5.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_5.tStart = t\n key_resp_5.frameNStart = frameN # exact frame index\n key_resp_5.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_5.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_5.status == STARTED and t >= frameRemains:\n key_resp_5.status = FINISHED\n if key_resp_5.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_5.keys == []: # then this was the first keypress\n key_resp_5.keys = theseKeys[0] # just the first key pressed\n key_resp_5.rt = key_resp_5.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in neutralComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"neutral\"-------\n for thisComponent in neutralComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_5.keys in ['', [], None]: # No response was made\n key_resp_5.keys=None\n trials_4.addData('key_resp_5.keys',key_resp_5.keys)\n if key_resp_5.keys != None: # we had a response\n trials_4.addData('key_resp_5.rt', key_resp_5.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_4'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_5 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('angry.csv'),\n seed=None, name='trials_5')\nthisExp.addLoop(trials_5) # add the loop to the experiment\nthisTrial_5 = trials_5.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_5.rgb)\nif thisTrial_5 != None:\n for paramName in thisTrial_5:\n exec('{} = thisTrial_5[paramName]'.format(paramName))\n\nfor thisTrial_5 in trials_5:\n currentLoop = trials_5\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_5.rgb)\n if thisTrial_5 != None:\n for paramName in thisTrial_5:\n exec('{} = thisTrial_5[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"angry\"-------\n t = 0\n angryClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_3.setImage(stimulus)\n key_resp_4 = event.BuilderKeyResponse()\n # keep track of which components have finished\n angryComponents = [image_3, key_resp_4]\n for thisComponent in angryComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"angry\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = angryClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_3* updates\n if t >= 0.0 and image_3.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_3.tStart = t\n image_3.frameNStart = frameN # exact frame index\n image_3.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_3.status == STARTED and t >= frameRemains:\n image_3.setAutoDraw(False)\n \n # *key_resp_4* updates\n if t >= 0.0 and key_resp_4.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_4.tStart = t\n key_resp_4.frameNStart = frameN # exact frame index\n key_resp_4.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_4.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_4.status == STARTED and t >= frameRemains:\n key_resp_4.status = FINISHED\n if key_resp_4.status == STARTED:\n theseKeys = event.getKeys(keyList=['b', 'c'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_4.keys == []: # then this was the first keypress\n key_resp_4.keys = theseKeys[0] # just the first key pressed\n key_resp_4.rt = key_resp_4.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in angryComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"angry\"-------\n for thisComponent in angryComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_4.keys in ['', [], None]: # No response was made\n key_resp_4.keys=None\n trials_5.addData('key_resp_4.keys',key_resp_4.keys)\n if key_resp_4.keys != None: # we had a response\n trials_5.addData('key_resp_4.rt', key_resp_4.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_5'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_6 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('blank.csv'),\n seed=None, name='trials_6')\nthisExp.addLoop(trials_6) # add the loop to the experiment\nthisTrial_6 = trials_6.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_6.rgb)\nif thisTrial_6 != None:\n for paramName in thisTrial_6:\n exec('{} = thisTrial_6[paramName]'.format(paramName))\n\nfor thisTrial_6 in trials_6:\n currentLoop = trials_6\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_6.rgb)\n if thisTrial_6 != None:\n for paramName in thisTrial_6:\n exec('{} = thisTrial_6[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"blank\"-------\n t = 0\n blankClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n polygon.setPos([x, y])\n key_resp_7 = event.BuilderKeyResponse()\n # keep track of which components have finished\n blankComponents = [polygon, key_resp_7]\n for thisComponent in blankComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"blank\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = blankClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *polygon* updates\n if t >= 0.0 and polygon.status == NOT_STARTED:\n # keep track of start time/frame for later\n polygon.tStart = t\n polygon.frameNStart = frameN # exact frame index\n polygon.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if polygon.status == STARTED and t >= frameRemains:\n polygon.setAutoDraw(False)\n \n # *key_resp_7* updates\n if t >= 0.0 and key_resp_7.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_7.tStart = t\n key_resp_7.frameNStart = frameN # exact frame index\n key_resp_7.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_7.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_7.status == STARTED and t >= frameRemains:\n key_resp_7.status = FINISHED\n if key_resp_7.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_7.keys == []: # then this was the first keypress\n key_resp_7.keys = theseKeys[0] # just the first key pressed\n key_resp_7.rt = key_resp_7.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in blankComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"blank\"-------\n for thisComponent in blankComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_7.keys in ['', [], None]: # No response was made\n key_resp_7.keys=None\n trials_6.addData('key_resp_7.keys',key_resp_7.keys)\n if key_resp_7.keys != None: # we had a response\n trials_6.addData('key_resp_7.rt', key_resp_7.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_6'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_7 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('scrambled.csv'),\n seed=None, name='trials_7')\nthisExp.addLoop(trials_7) # add the loop to the experiment\nthisTrial_7 = trials_7.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_7.rgb)\nif thisTrial_7 != None:\n for paramName in thisTrial_7:\n exec('{} = thisTrial_7[paramName]'.format(paramName))\n\nfor thisTrial_7 in trials_7:\n currentLoop = trials_7\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_7.rgb)\n if thisTrial_7 != None:\n for paramName in thisTrial_7:\n exec('{} = thisTrial_7[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"scrambled\"-------\n t = 0\n scrambledClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_5.setImage(stimulus)\n key_resp_6 = event.BuilderKeyResponse()\n # keep track of which components have finished\n scrambledComponents = [image_5, key_resp_6]\n for thisComponent in scrambledComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"scrambled\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = scrambledClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_5* updates\n if t >= 0.0 and image_5.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_5.tStart = t\n image_5.frameNStart = frameN # exact frame index\n image_5.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_5.status == STARTED and t >= frameRemains:\n image_5.setAutoDraw(False)\n \n # *key_resp_6* updates\n if t >= 0.0 and key_resp_6.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_6.tStart = t\n key_resp_6.frameNStart = frameN # exact frame index\n key_resp_6.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_6.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_6.status == STARTED and t >= frameRemains:\n key_resp_6.status = FINISHED\n if key_resp_6.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_6.keys == []: # then this was the first keypress\n key_resp_6.keys = theseKeys[0] # just the first key pressed\n key_resp_6.rt = key_resp_6.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in scrambledComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"scrambled\"-------\n for thisComponent in scrambledComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_6.keys in ['', [], None]: # No response was made\n key_resp_6.keys=None\n trials_7.addData('key_resp_6.keys',key_resp_6.keys)\n if key_resp_6.keys != None: # we had a response\n trials_7.addData('key_resp_6.rt', key_resp_6.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_7'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_8 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('sad.csv'),\n seed=None, name='trials_8')\nthisExp.addLoop(trials_8) # add the loop to the experiment\nthisTrial_8 = trials_8.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_8.rgb)\nif thisTrial_8 != None:\n for paramName in thisTrial_8:\n exec('{} = thisTrial_8[paramName]'.format(paramName))\n\nfor thisTrial_8 in trials_8:\n currentLoop = trials_8\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_8.rgb)\n if thisTrial_8 != None:\n for paramName in thisTrial_8:\n exec('{} = thisTrial_8[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"sad\"-------\n t = 0\n sadClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_2.setImage(stimulus)\n key_resp_3 = event.BuilderKeyResponse()\n # keep track of which components have finished\n sadComponents = [image_2, key_resp_3]\n for thisComponent in sadComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"sad\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = sadClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_2* updates\n if t >= 0.0 and image_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_2.tStart = t\n image_2.frameNStart = frameN # exact frame index\n image_2.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_2.status == STARTED and t >= frameRemains:\n image_2.setAutoDraw(False)\n \n # *key_resp_3* updates\n if t >= 0.0 and key_resp_3.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_3.tStart = t\n key_resp_3.frameNStart = frameN # exact frame index\n key_resp_3.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_3.status == STARTED and t >= frameRemains:\n key_resp_3.status = FINISHED\n if key_resp_3.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'd'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_3.keys == []: # then this was the first keypress\n key_resp_3.keys = theseKeys[0] # just the first key pressed\n key_resp_3.rt = key_resp_3.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in sadComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"sad\"-------\n for thisComponent in sadComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_3.keys in ['', [], None]: # No response was made\n key_resp_3.keys=None\n trials_8.addData('key_resp_3.keys',key_resp_3.keys)\n if key_resp_3.keys != None: # we had a response\n trials_8.addData('key_resp_3.rt', key_resp_3.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_8'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_9 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('happy.csv'),\n seed=None, name='trials_9')\nthisExp.addLoop(trials_9) # add the loop to the experiment\nthisTrial_9 = trials_9.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_9.rgb)\nif thisTrial_9 != None:\n for paramName in thisTrial_9:\n exec('{} = thisTrial_9[paramName]'.format(paramName))\n\nfor thisTrial_9 in trials_9:\n currentLoop = trials_9\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_9.rgb)\n if thisTrial_9 != None:\n for paramName in thisTrial_9:\n exec('{} = thisTrial_9[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"happy\"-------\n t = 0\n happyClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image.setImage(stimulus)\n key_resp_2 = event.BuilderKeyResponse()\n # keep track of which components have finished\n happyComponents = [image, key_resp_2]\n for thisComponent in happyComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"happy\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = happyClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image* updates\n if t >= 0.0 and image.status == NOT_STARTED:\n # keep track of start time/frame for later\n image.tStart = t\n image.frameNStart = frameN # exact frame index\n image.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image.status == STARTED and t >= frameRemains:\n image.setAutoDraw(False)\n \n # *key_resp_2* updates\n if t >= 0.0 and key_resp_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_2.tStart = t\n key_resp_2.frameNStart = frameN # exact frame index\n key_resp_2.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_2.status == STARTED and t >= frameRemains:\n key_resp_2.status = FINISHED\n if key_resp_2.status == STARTED:\n theseKeys = event.getKeys(keyList=['b', 'c'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_2.keys == []: # then this was the first keypress\n key_resp_2.keys = theseKeys[0] # just the first key pressed\n key_resp_2.rt = key_resp_2.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in happyComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"happy\"-------\n for thisComponent in happyComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_2.keys in ['', [], None]: # No response was made\n key_resp_2.keys=None\n trials_9.addData('key_resp_2.keys',key_resp_2.keys)\n if key_resp_2.keys != None: # we had a response\n trials_9.addData('key_resp_2.rt', key_resp_2.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_9'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_10 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('neutral.csv'),\n seed=None, name='trials_10')\nthisExp.addLoop(trials_10) # add the loop to the experiment\nthisTrial_10 = trials_10.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_10.rgb)\nif thisTrial_10 != None:\n for paramName in thisTrial_10:\n exec('{} = thisTrial_10[paramName]'.format(paramName))\n\nfor thisTrial_10 in trials_10:\n currentLoop = trials_10\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_10.rgb)\n if thisTrial_10 != None:\n for paramName in thisTrial_10:\n exec('{} = thisTrial_10[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"neutral\"-------\n t = 0\n neutralClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_4.setImage(stimulus)\n key_resp_5 = event.BuilderKeyResponse()\n # keep track of which components have finished\n neutralComponents = [image_4, key_resp_5]\n for thisComponent in neutralComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"neutral\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = neutralClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_4* updates\n if t >= 0.0 and image_4.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_4.tStart = t\n image_4.frameNStart = frameN # exact frame index\n image_4.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_4.status == STARTED and t >= frameRemains:\n image_4.setAutoDraw(False)\n \n # *key_resp_5* updates\n if t >= 0.0 and key_resp_5.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_5.tStart = t\n key_resp_5.frameNStart = frameN # exact frame index\n key_resp_5.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_5.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_5.status == STARTED and t >= frameRemains:\n key_resp_5.status = FINISHED\n if key_resp_5.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_5.keys == []: # then this was the first keypress\n key_resp_5.keys = theseKeys[0] # just the first key pressed\n key_resp_5.rt = key_resp_5.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in neutralComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"neutral\"-------\n for thisComponent in neutralComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_5.keys in ['', [], None]: # No response was made\n key_resp_5.keys=None\n trials_10.addData('key_resp_5.keys',key_resp_5.keys)\n if key_resp_5.keys != None: # we had a response\n trials_10.addData('key_resp_5.rt', key_resp_5.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_10'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_11 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('angry.csv'),\n seed=None, name='trials_11')\nthisExp.addLoop(trials_11) # add the loop to the experiment\nthisTrial_11 = trials_11.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_11.rgb)\nif thisTrial_11 != None:\n for paramName in thisTrial_11:\n exec('{} = thisTrial_11[paramName]'.format(paramName))\n\nfor thisTrial_11 in trials_11:\n currentLoop = trials_11\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_11.rgb)\n if thisTrial_11 != None:\n for paramName in thisTrial_11:\n exec('{} = thisTrial_11[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"angry\"-------\n t = 0\n angryClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n image_3.setImage(stimulus)\n key_resp_4 = event.BuilderKeyResponse()\n # keep track of which components have finished\n angryComponents = [image_3, key_resp_4]\n for thisComponent in angryComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"angry\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = angryClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *image_3* updates\n if t >= 0.0 and image_3.status == NOT_STARTED:\n # keep track of start time/frame for later\n image_3.tStart = t\n image_3.frameNStart = frameN # exact frame index\n image_3.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if image_3.status == STARTED and t >= frameRemains:\n image_3.setAutoDraw(False)\n \n # *key_resp_4* updates\n if t >= 0.0 and key_resp_4.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_4.tStart = t\n key_resp_4.frameNStart = frameN # exact frame index\n key_resp_4.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_4.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_4.status == STARTED and t >= frameRemains:\n key_resp_4.status = FINISHED\n if key_resp_4.status == STARTED:\n theseKeys = event.getKeys(keyList=['b', 'c'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_4.keys == []: # then this was the first keypress\n key_resp_4.keys = theseKeys[0] # just the first key pressed\n key_resp_4.rt = key_resp_4.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in angryComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"angry\"-------\n for thisComponent in angryComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_4.keys in ['', [], None]: # No response was made\n key_resp_4.keys=None\n trials_11.addData('key_resp_4.keys',key_resp_4.keys)\n if key_resp_4.keys != None: # we had a response\n trials_11.addData('key_resp_4.rt', key_resp_4.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_11'\n\n\n# set up handler to look after randomisation of conditions etc\ntrials_12 = data.TrialHandler(nReps=1, method='random', \n extraInfo=expInfo, originPath=-1,\n trialList=data.importConditions('blank.csv'),\n seed=None, name='trials_12')\nthisExp.addLoop(trials_12) # add the loop to the experiment\nthisTrial_12 = trials_12.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb = thisTrial_12.rgb)\nif thisTrial_12 != None:\n for paramName in thisTrial_12:\n exec('{} = thisTrial_12[paramName]'.format(paramName))\n\nfor thisTrial_12 in trials_12:\n currentLoop = trials_12\n # abbreviate parameter names if possible (e.g. rgb = thisTrial_12.rgb)\n if thisTrial_12 != None:\n for paramName in thisTrial_12:\n exec('{} = thisTrial_12[paramName]'.format(paramName))\n \n # ------Prepare to start Routine \"blank\"-------\n t = 0\n blankClock.reset() # clock\n frameN = -1\n continueRoutine = True\n routineTimer.add(3.000000)\n # update component parameters for each repeat\n polygon.setPos([x, y])\n key_resp_7 = event.BuilderKeyResponse()\n # keep track of which components have finished\n blankComponents = [polygon, key_resp_7]\n for thisComponent in blankComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n \n # -------Start Routine \"blank\"-------\n while continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = blankClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *polygon* updates\n if t >= 0.0 and polygon.status == NOT_STARTED:\n # keep track of start time/frame for later\n polygon.tStart = t\n polygon.frameNStart = frameN # exact frame index\n polygon.setAutoDraw(True)\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if polygon.status == STARTED and t >= frameRemains:\n polygon.setAutoDraw(False)\n \n # *key_resp_7* updates\n if t >= 0.0 and key_resp_7.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_resp_7.tStart = t\n key_resp_7.frameNStart = frameN # exact frame index\n key_resp_7.status = STARTED\n # keyboard checking is just starting\n win.callOnFlip(key_resp_7.clock.reset) # t=0 on next screen flip\n event.clearEvents(eventType='keyboard')\n frameRemains = 0.0 + 3- win.monitorFramePeriod * 0.75 # most of one frame period left\n if key_resp_7.status == STARTED and t >= frameRemains:\n key_resp_7.status = FINISHED\n if key_resp_7.status == STARTED:\n theseKeys = event.getKeys(keyList=['c', 'b'])\n \n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n if key_resp_7.keys == []: # then this was the first keypress\n key_resp_7.keys = theseKeys[0] # just the first key pressed\n key_resp_7.rt = key_resp_7.clock.getTime()\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in blankComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n \n # -------Ending Routine \"blank\"-------\n for thisComponent in blankComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_resp_7.keys in ['', [], None]: # No response was made\n key_resp_7.keys=None\n trials_12.addData('key_resp_7.keys',key_resp_7.keys)\n if key_resp_7.keys != None: # we had a response\n trials_12.addData('key_resp_7.rt', key_resp_7.rt)\n thisExp.nextEntry()\n \n# completed 1 repeats of 'trials_12'\n\n\n# ------Prepare to start Routine \"end\"-------\nt = 0\nendClock.reset() # clock\nframeN = -1\ncontinueRoutine = True\nroutineTimer.add(10.000000)\n# update component parameters for each repeat\n# keep track of which components have finished\nendComponents = [polygon_2]\nfor thisComponent in endComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n# -------Start Routine \"end\"-------\nwhile continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = endClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n \n # *polygon_2* updates\n if t >= 0.0 and polygon_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n polygon_2.tStart = t\n polygon_2.frameNStart = frameN # exact frame index\n polygon_2.setAutoDraw(True)\n frameRemains = 0.0 + 10- win.monitorFramePeriod * 0.75 # most of one frame period left\n if polygon_2.status == STARTED and t >= frameRemains:\n polygon_2.setAutoDraw(False)\n \n # check for quit (typically the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n \n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in endComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n \n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n# -------Ending Routine \"end\"-------\nfor thisComponent in endComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n# these shouldn't be strictly necessary (should auto-save)\nthisExp.saveAsWideText(filename+'.csv')\nthisExp.saveAsPickle(filename)\nlogging.flush()\n# make sure everything is closed down\nthisExp.abort() # or data files will save again on exit\nwin.close()\ncore.quit()\n"
},
{
"alpha_fraction": 0.7566909790039062,
"alphanum_fraction": 0.7615571618080139,
"avg_line_length": 57.71428680419922,
"blob_id": "90828dd8242af8978128f367748510464eb993aa",
"content_id": "64b5980f7c6e304220c0ea842168d2be001ebae0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 411,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 7,
"path": "/README.md",
"repo_name": "isacdaavid/emotional-faces-psychopy-task",
"src_encoding": "UTF-8",
"text": "- Psychopy task per-se: `emofaces/emofaces{1..5}.pyexp`. Button\n presses saved in `emofaces/data/`\n- Behavioral analysis (in-scanner button presses): `task_performance.R`\n- Behavioral analysis (stimuli validation): `posthoc-stimulus-validation/`\n\n - `gui.R`: runs randomized stimulus-category classification task and saves responses.\n - `analysis.R`: runs both individual and group analysis with responses\n"
}
] | 5 |
xaxadmin/asap-authentication-python | https://github.com/xaxadmin/asap-authentication-python | 32ee649faccefe95759585a639e8b7bb3108cb8c | c9df73fa96f160217ebd19da76137a86a20bc45d | 1986effaf98fc30ed993de90e93c7346c56e3ad7 | refs/heads/master | 2018-04-06T11:33:03.845427 | 2017-04-20T23:24:54 | 2017-04-20T23:24:54 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.713567852973938,
"alphanum_fraction": 0.7236180901527405,
"avg_line_length": 21.11111068725586,
"blob_id": "7c18e089c79b3145f4f681b5d32a386d0928dacd",
"content_id": "1ef74488069ea06d7b604d49db0e04336d3cf09a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 199,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 9,
"path": "/atlassian_jwt_auth/contrib/aiohttp/__init__.py",
"repo_name": "xaxadmin/asap-authentication-python",
"src_encoding": "UTF-8",
"text": "\"\"\"Provide asyncio support\"\"\"\nimport sys\n\nif sys.version_info >= (3, 5):\n from .auth import JWTAuth\n from .key import HTTPSPublicKeyRetriever\n from .verifier import JWTAuthVerifier\n\ndel sys\n"
}
] | 1 |
Swetha-14/opinion-mining | https://github.com/Swetha-14/opinion-mining | fa249a56d953c136b778711864aceb1507843f0d | 98bbfa4fefb343454668f2775a4e2756e94477a4 | 2c32193626ade72c798a60442ffea299724b82ee | refs/heads/master | 2023-07-15T00:45:03.443896 | 2021-08-17T12:22:02 | 2021-08-17T12:22:02 | 388,768,707 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.705539345741272,
"alphanum_fraction": 0.7077259421348572,
"avg_line_length": 41.90625,
"blob_id": "af256b31f11fd402b046b4c79aaf50f641755d12",
"content_id": "051843acc35a3baf2975d38de4f1111eb181522b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1372,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 32,
"path": "/network/models.py",
"repo_name": "Swetha-14/opinion-mining",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.core.validators import MinValueValidator, MaxLengthValidator\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom profanity.validators import validate_is_profane\n\nclass User(AbstractUser):\n followers = models.ManyToManyField('self',symmetrical = False, related_name='following', blank=True)\n\n\nclass Post(models.Model):\n user = models.ForeignKey('User', on_delete=models.CASCADE, related_name=\"posts\")\n content = models.TextField(blank=True)\n image = models.ImageField(upload_to='images/', blank=True, null = True)\n likes = models.ManyToManyField('User', related_name=\"liked_posts\", blank=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"user\": self.user.username,\n \"content\": self.content,\n \"likes\": self.likes,\n \"timestamp\": self.timestamp.strftime(\"%b %-d %Y, %-I:%M %p\")\n }\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(\"User\", on_delete=models.CASCADE, related_name=\"commented_user\")\n post = models.ForeignKey(\"Post\", on_delete=models.CASCADE, related_name=\"comments\")\n comment = models.TextField(max_length=500, validators=[validate_is_profane])"
},
{
"alpha_fraction": 0.5926622748374939,
"alphanum_fraction": 0.6284101605415344,
"avg_line_length": 35.655174255371094,
"blob_id": "53c32c1d07c5d099d43090ce6478f851dc62029e",
"content_id": "6eeb2bd000e9dc650021f97d0a958fb54262de46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1063,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 29,
"path": "/network/migrations/0010_auto_20210606_1750.py",
"repo_name": "Swetha-14/opinion-mining",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.3 on 2021-06-06 12:20\n\nfrom django.conf import settings\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('network', '0009_auto_20210519_1336'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='post',\n name='comments',\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('comment', models.TextField(max_length=500, validators=[django.core.validators.MaxLengthValidator(1000)])),\n ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='network.post')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='commented_user', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.5872340202331543,
"alphanum_fraction": 0.6340425610542297,
"avg_line_length": 23.736841201782227,
"blob_id": "48e03c81f30c1bfcb9e0556b72f4785e44539684",
"content_id": "600832bbd044a0e695925f6223789fbda7efff47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 470,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 19,
"path": "/network/migrations/0014_alter_comment_comment.py",
"repo_name": "Swetha-14/opinion-mining",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.3 on 2021-07-23 08:09\n\nfrom django.db import migrations, models\nimport profanity.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('network', '0013_alter_post_image'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='comment',\n name='comment',\n field=models.TextField(max_length=500, validators=[profanity.validators.validate_is_profane]),\n ),\n ]\n"
},
{
"alpha_fraction": 0.554347813129425,
"alphanum_fraction": 0.5615941882133484,
"avg_line_length": 17.46666717529297,
"blob_id": "9ce8fc28f63245278fc1f56cdce45efa4142d1ce",
"content_id": "7eb093dc2a50cff60a47f6977b0364755ce52074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 15,
"path": "/network/templates/network/following.html",
"repo_name": "Swetha-14/opinion-mining",
"src_encoding": "UTF-8",
"text": "{% extends \"network/layout.html\" %}\n\n{% load static %}\n\n{% block body %}\n <h1>Following</h1>\n <br/>\n <div id=\"posts-holder\">\n </div>\n\n{% endblock %}\n\n{% block script %}\n <script id=\"following-script\" src=\"{% static 'network/edit.js' %}\"></script>\n{% endblock %}"
},
{
"alpha_fraction": 0.5727272629737854,
"alphanum_fraction": 0.607272744178772,
"avg_line_length": 24,
"blob_id": "919e4940980952242ae9b21d10165ffd461e89a1",
"content_id": "083829be7d9c0df504e70729b0ff2e990cbfd338",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 22,
"path": "/network/migrations/0009_auto_20210519_1336.py",
"repo_name": "Swetha-14/opinion-mining",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.3 on 2021-05-19 08:06\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('network', '0008_alter_post_image'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='post',\n name='comments',\n field=models.ManyToManyField(blank=True, related_name='commented_posts', to=settings.AUTH_USER_MODEL),\n ),\n migrations.DeleteModel(\n name='Comment',\n ),\n ]\n"
},
{
"alpha_fraction": 0.6133651733398438,
"alphanum_fraction": 0.6169450879096985,
"avg_line_length": 29.756879806518555,
"blob_id": "47686f3fd762a09e1c8759c32370982b66795897",
"content_id": "710da9a787d30675cb461eb3f7d00bf7e14d482b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6704,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 218,
"path": "/network/views.py",
"repo_name": "Swetha-14/opinion-mining",
"src_encoding": "UTF-8",
"text": "import json\nimport csv\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\nfrom better_profanity import profanity\nimport re\nimport string\n\nfrom .models import User, Post, Comment\nfrom .forms import *\n\n\n\n# Create Post\ndef index(request):\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES or None)\n\n if form.is_valid():\n post = Post(\n user=User.objects.get(pk=request.user.id),\n content=form.cleaned_data.get(\"content\"),\n image = form.cleaned_data.get(\"image\")\n )\n \n post.save()\n return redirect('index')\n else:\n form = PostForm()\n \n posts = Post.objects.all().order_by(\"-timestamp\")\n paginator = Paginator(posts, 10)\n page_obj = paginator.get_page(request.GET.get('page'))\n return render(request, \"network/index.html\", {\n \"page_obj\": page_obj,\n 'form' : form\n\n })\n\n@login_required\ndef user_page(request, username):\n target_user = User.objects.get(username=username)\n\n posts = Post.objects.filter(user__username=username).order_by(\"-timestamp\")\n\n paginator = Paginator(posts, 10)\n page_obj = paginator.get_page(request.GET.get('page'))\n\n return render(request, \"network/profile.html\", {\n \"target_user\": target_user,\n \"page_obj\": page_obj\n })\n\n@login_required\ndef following(request):\n posts = [] \n all_posts = Post.objects.order_by(\"-timestamp\").all()\n \n # Iterating each and every post and checking if that post's owner is in the loggedin user's following list\n for post in all_posts:\n \n if post.user in request.user.following.all():\n # Then append that post to the post list initialized above\n posts.append(post)\n\n paginator = Paginator(posts, 10)\n page_obj = paginator.get_page(request.GET.get('page'))\n\n return render(request, \"network/following.html\", {\n \"page_obj\": page_obj\n })\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"network/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"network/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"network/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/register.html\")\n\n\n\n@csrf_exempt\ndef follow_user(request, username):\n target_user = User.objects.get(username=username)\n\n # Unfollow the user if the user is already in the followers\n if request.user in target_user.followers.all():\n target_user.followers.remove(request.user)\n target_user.save()\n\n return JsonResponse({\"message\": f'{username} unfollowed!'})\n\n # Follow the user if the user is not in the followers\n target_user.followers.add(request.user)\n target_user.save()\n\n return JsonResponse({\"message\": f'{username} followed!'})\n\n@csrf_exempt\n@login_required\ndef edit_post(request):\n # Liking a new post must be via PUT\n if request.method == \"PUT\":\n data = json.loads(request.body)\n post_id = data.get(\"postId\", \"\")\n content = data.get(\"content\", \"\")\n post = Post.objects.get(pk=post_id)\n\n # Ensure to edit only the user's own posts\n if request.user.username != post.user.username:\n return JsonResponse({\"error\": \"Can't edit another user's post\"}, status=403)\n \n post.content = content\n post.save()\n\n return JsonResponse({\"message\": \"Post edited!\"}, status=200)\n else:\n return JsonResponse({\"error\": \"Must be PUT method\"}, status=400)\n\n \n\n@csrf_exempt\n@login_required\ndef like_post(request):\n # Liking a new post must be via PUT\n if request.method == \"PUT\":\n data = json.loads(request.body)\n post_id = data.get(\"postId\", \"\")\n post = Post.objects.get(pk=post_id)\n\n # Unlike if the User already liked \n if request.user in post.likes.all():\n post.likes.remove(request.user)\n post.save()\n\n return JsonResponse({\"liked\": False}, status=200)\n \n # Else Like it \n post.likes.add(request.user)\n post.save()\n\n return JsonResponse({\"liked\": True}, status=200)\n else:\n return JsonResponse({\"error\": \"Must be PUT method\"}, status=400)\n\n@csrf_exempt\n@login_required\ndef comment_post(request, id):\n if request.method == \"POST\":\n \n if not request.user.is_authenticated:\n messages.warning(request, 'Log in to submit comments, like posts and more!')\n return HttpResponseRedirect(reverse(\"index\"))\n\n else:\n if request.POST[\"type\"] == \"comment\":\n added_comment = request.POST[\"content\"]\n\n comment = Comment(\n user = User.objects.get(pk=request.user.id),\n post = Post.objects.get(pk=id),\n comment = added_comment\n \n )\n\n comment.save()\n return HttpResponseRedirect(reverse(\"index\"))"
},
{
"alpha_fraction": 0.828000009059906,
"alphanum_fraction": 0.828000009059906,
"avg_line_length": 82.33333587646484,
"blob_id": "5dd4b80ced80427d974ff3625f495851ff3e0042",
"content_id": "4d4ad9e9e25996ce4662bcbe71c4fc9e62958646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 3,
"path": "/README.md",
"repo_name": "Swetha-14/opinion-mining",
"src_encoding": "UTF-8",
"text": "# Opinion Mining in Social Networking Website\n\n**A social networking website like Instagram for creating posts either by uploading an image or by text message with the functionality of profanity filtering by clearing any offensive content/language**\n"
}
] | 7 |
amnasamhadana/PersonalSiteTest | https://github.com/amnasamhadana/PersonalSiteTest | 964e6b06872e99a767f31750c510ee7f4222b050 | 67aeeb75b07373b9e69a7f7eb53dfb87b9bbcce1 | e7209bc96e83414b6c16a4abc3a342f9c714398c | refs/heads/master | 2020-03-23T16:13:35.253361 | 2018-07-21T09:54:48 | 2018-07-21T09:54:48 | 141,799,405 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6531531810760498,
"alphanum_fraction": 0.6711711883544922,
"avg_line_length": 19.272727966308594,
"blob_id": "2bddaabc48d977891b26ee3f9d6367b2a5224ee5",
"content_id": "14390a872d137df8479bf99a04b6e2b8acfb8ac6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 222,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 11,
"path": "/webpage.py",
"repo_name": "amnasamhadana/PersonalSiteTest",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nfrom random import randint\n\n\napp = Flask(__name__)\[email protected]('/')\ndef loadPage():\n\treturn render_template('page.html')\n\nif __name__ == '__main__':\n\tapp.run(port = 4000, debug=True)"
}
] | 1 |
filwaitman/jinja2-standalone-compiler | https://github.com/filwaitman/jinja2-standalone-compiler | 2a3e7a689707e741333c58c567df7e388dd1fd47 | 4a422a44e408a6f17d349c1003ab8d4ce9538574 | 103e16a9646b5c00245963ea5ba87508c5247120 | refs/heads/master | 2021-01-16T23:59:34.862133 | 2020-05-22T19:22:50 | 2020-05-22T19:22:50 | 15,515,558 | 11 | 7 | null | 2013-12-29T21:50:22 | 2017-02-10T21:06:32 | 2017-02-24T22:58:01 | Python | [
{
"alpha_fraction": 0.6384780406951904,
"alphanum_fraction": 0.6393812894821167,
"avg_line_length": 38.89639663696289,
"blob_id": "181387e41eac9f23f967fb708d27957606c36b15",
"content_id": "7ea077b25d5e25b0b00c132e3a8a6e927317a009",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8857,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 222,
"path": "/jinja2_standalone_compiler/__init__.py",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals, print_function\nimport argparse\nimport fnmatch\nimport imp\nimport os\nimport re\nimport sys\n\nfrom jinja2 import Environment, FileSystemLoader, StrictUndefined, defaults\n\ntry:\n from colorama import init, Fore, Style\n init(autoreset=True)\n using_colorama = True\n\n style_JINJA_FILE = Fore.MAGENTA\n style_WARNING = Fore.YELLOW + Style.BRIGHT\n style_SETTING = Fore.CYAN\n style_RENDERED_FILE = Fore.CYAN\n style_SUCCESS = Fore.GREEN\n style_ALL_DONE = Fore.GREEN + Style.BRIGHT\n\nexcept:\n using_colorama = False\n\n style_JINJA_FILE = ''\n style_WARNING = ''\n style_SETTING = ''\n style_RENDERED_FILE = ''\n style_SUCCESS = ''\n style_ALL_DONE = ''\n\n\ndef print_log(msg, verbose_msg=False, verbose=False, silent=False):\n if silent:\n return\n\n if not verbose and verbose_msg:\n return\n\n print(msg)\n\n\ndef render_template(jinja_template, extra_variables, output_options, jinja_environment, template_root):\n environment = Environment(\n loader=FileSystemLoader(template_root),\n block_start_string=jinja_environment.get('BLOCK_START_STRING', defaults.BLOCK_START_STRING),\n block_end_string=jinja_environment.get('BLOCK_END_STRING', defaults.BLOCK_END_STRING),\n variable_start_string=jinja_environment.get('VARIABLE_START_STRING', defaults.VARIABLE_START_STRING),\n variable_end_string=jinja_environment.get('VARIABLE_END_STRING', defaults.VARIABLE_END_STRING),\n comment_start_string=jinja_environment.get('COMMENT_START_STRING', defaults.COMMENT_START_STRING),\n comment_end_string=jinja_environment.get('COMMENT_END_STRING', defaults.COMMENT_END_STRING),\n line_statement_prefix=jinja_environment.get('LINE_STATEMENT_PREFIX', defaults.LINE_STATEMENT_PREFIX),\n line_comment_prefix=jinja_environment.get('LINE_COMMENT_PREFIX', defaults.LINE_COMMENT_PREFIX),\n trim_blocks=jinja_environment.get('TRIM_BLOCKS', True),\n lstrip_blocks=jinja_environment.get('LSTRIP_BLOCKS', True),\n newline_sequence=jinja_environment.get('NEWLINE_SEQUENCE', defaults.NEWLINE_SEQUENCE),\n keep_trailing_newline=jinja_environment.get('KEEP_TRAILING_NEWLINE', defaults.KEEP_TRAILING_NEWLINE)\n )\n environment.undefined = StrictUndefined\n\n dirname = os.path.dirname(jinja_template)\n\n relpath = os.path.relpath(dirname, template_root)\n basename = os.path.basename(jinja_template)\n\n filename = os.path.join(relpath, basename)\n\n template = environment.get_template(filename)\n return template.render(extra_variables)\n\n\ndef main(path, out_path=None, verbose=False, silent=False, settings=None):\n extra_variables = {}\n ignore_jinja_templates = []\n output_options = {}\n jinja_environment = {}\n if settings:\n extra_variables = getattr(settings, 'EXTRA_VARIABLES', {})\n ignore_jinja_templates = getattr(settings, 'IGNORE_JINJA_TEMPLATES', [])\n output_options = getattr(settings, 'OUTPUT_OPTIONS', {})\n jinja_environment = getattr(settings, 'JINJA_ENVIRONMENT', {})\n\n print_log('Additional context and options:', True, verbose, silent)\n print_log(' EXTRA_VARIABLES : {}'.format(extra_variables), True, verbose, silent)\n print_log(' OUTPUT_OPTIONS : {}'.format(output_options), True, verbose, silent)\n print_log(' JINJA_ENVIRONMENT: {}'.format(jinja_environment), True, verbose, silent)\n\n if os.path.isdir(path):\n print_log('Looking for jinja templates in: {}{}'.format(style_JINJA_FILE, path), False, verbose, silent)\n template_root = path\n jinja_templates = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, '*.jinja*'):\n jinja_templates.append(os.path.join(root, filename))\n else:\n template_root = os.path.dirname(path)\n jinja_templates = [path, ] # path is just a file, actually\n\n print_log(' Jinja files found: {}{}'.format(style_JINJA_FILE, len(jinja_templates)), False, verbose, silent)\n\n for jinja_template in jinja_templates:\n print_log('Processing:' + style_JINJA_FILE + jinja_template, False, verbose, silent)\n\n skip = False\n for jinja_template_to_be_ignored in ignore_jinja_templates:\n if re.match(jinja_template_to_be_ignored, jinja_template):\n print_log(' Skipping: ' + style_WARNING + jinja_template, False, verbose, silent)\n skip = True\n break\n\n if skip:\n continue\n\n if out_path:\n rel_path = os.path.relpath(jinja_template, path)\n if rel_path == '.':\n rel_path = os.path.basename(path)\n template_file = os.path.join(out_path, rel_path)\n\n template_dir = os.path.dirname(template_file)\n if not os.path.exists(template_dir):\n try:\n os.makedirs(template_dir)\n except:\n raise IOError('Cannot create sub output directory: {}'.format(template_dir))\n\n template_file, _ = os.path.splitext(template_file)\n else:\n template_file, _ = os.path.splitext(jinja_template)\n\n template_file = os.path.abspath(template_file)\n\n if output_options.get('remove_double_extension', False):\n template_file, _ = os.path.splitext(template_file)\n\n template_file = '{}{}'.format(template_file, output_options.get('extension', '.html'))\n\n print_log(' Creating: ' + style_RENDERED_FILE + template_file, False, verbose, silent)\n\n if os.path.abspath(jinja_template) == os.path.abspath(template_file):\n raise IOError(\"write target is also a source file, aborting to prevent blanking\")\n try:\n with open(template_file, 'w') as f:\n f.write(render_template(\n jinja_template,\n extra_variables=extra_variables,\n output_options=output_options,\n jinja_environment=jinja_environment,\n template_root=template_root\n ))\n except:\n os.unlink(template_file)\n raise\n\n\ndef _first_set(*values):\n for value in values:\n if value is not None:\n return value\n\n\ndef main_command(path=None, settings=None, out=None, verbose=None, silent=None):\n has_path = bool(path)\n path_help = '{}Path to base files.'.format('' if has_path else '[REQUIRED] ')\n\n parser = argparse.ArgumentParser(description='jinja2_standalone_compiler')\n parser.add_argument('--path', dest='path', help=path_help, required=not(has_path))\n parser.add_argument('--settings', '-s', dest='settings', action='append', nargs=1, help='Settings file(s) to use.')\n parser.add_argument('--out', '-o', dest='out', help='Output path.')\n parser.add_argument('--verbose', dest='verbose', help='Detailed output.', action='store_true', default=False)\n parser.add_argument('--silent', dest='silent', help='Suppress output.', action='store_true', default=False)\n args = parser.parse_args()\n\n args_settings = None\n if args.settings:\n args_settings = [x[0] for x in args.settings]\n\n path = _first_set(path, args.path)\n settings = _first_set(settings, args_settings)\n out = _first_set(out, args.out)\n verbose = _first_set(verbose, args.verbose)\n silent = _first_set(silent, args.silent)\n\n current_dir = os.getcwd()\n\n if not using_colorama and not silent:\n print(\"<optional dependency 'colorama' not found, try 'pip install colorama==0.3.7' to see colored output>\")\n\n if out and not os.path.exists(out):\n out = os.path.normpath(out)\n try:\n os.makedirs(out)\n except:\n raise IOError('Cannot create output directory: {}'.format(out))\n\n if settings:\n if not silent:\n print_log('{}Number of settings files: {}'.format(style_SUCCESS, len(settings)), False, verbose, silent)\n\n for setting in settings:\n settings_file = os.path.normpath(os.path.join(current_dir, setting))\n if not os.path.exists(settings_file):\n raise IOError('Settings file not found: {}'.format(settings_file))\n else:\n if not silent:\n print_log('Using settings file: ' + style_SETTING + settings_file, False, verbose, silent)\n\n sys.path.insert(0, '')\n setting = imp.load_source(current_dir, setting)\n work_dir = os.path.normpath(os.path.join(current_dir, path))\n\n main(work_dir, out, verbose, silent, setting)\n\n print_log(style_SUCCESS + 'Done.', False, verbose, silent)\n\n else:\n work_dir = os.path.join(current_dir, path)\n\n main(work_dir, out, verbose, silent)\n\n print_log(style_ALL_DONE + 'All done.', False, verbose, silent)\n"
},
{
"alpha_fraction": 0.675930917263031,
"alphanum_fraction": 0.6786292791366577,
"avg_line_length": 53.5,
"blob_id": "4eaaae6c12c682b022514482f3645e8f4f30388e",
"content_id": "ed3311612207ae1629bc3da61c0ca0aa8af09c2d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3706,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 68,
"path": "/tests/test_jinja_standalone_compiler.py",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\nfrom collections import namedtuple\nimport fnmatch\nimport os\nimport unittest\n\nfrom jinja2 import UndefinedError\n\nfrom jinja2_standalone_compiler import main\n\nfixtures_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures')\n\n\nclass MainTestCase(unittest.TestCase):\n def tearDown(self):\n for root, dirnames, filenames in os.walk(fixtures_dir):\n for filename in fnmatch.filter(filenames, '*.html'):\n os.unlink(os.path.join(root, filename))\n\n def test_extends(self):\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'child_base', 'child.html')))\n main(os.path.join(fixtures_dir, 'child_base'))\n self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'child_base', 'child.html')))\n\n file_content = open(os.path.join(fixtures_dir, 'child_base', 'child.html')).read()\n self.assertEquals(file_content, 'begin parent\\nparent content\\n\\nchild content\\nend parent')\n\n def test_extends_and_include(self):\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))\n main(os.path.join(fixtures_dir, 'header_footer'))\n self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))\n\n file_content = open(os.path.join(fixtures_dir, 'header_footer', 'child.html')).read()\n self.assertEquals(file_content, 'header!\\nbegin parent\\nparent content\\n\\nchild content\\nend parent\\nfooter!')\n\n def test_extends_and_include_and_custom_vars(self):\n Settings = namedtuple('Settings', ['EXTRA_VARIABLES'])\n settings = Settings(EXTRA_VARIABLES={'number': 42, 'triplicate': lambda x: x * 3})\n\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'custom_vars', 'child.html')))\n main(os.path.join(fixtures_dir, 'custom_vars'), settings=settings)\n self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'custom_vars', 'child.html')))\n\n file_content = open(os.path.join(fixtures_dir, 'custom_vars', 'child.html')).read()\n self.assertEquals(file_content, 'header!\\nbegin parent\\nparent content\\n\\nchild content\\nworks! works! works! '\n '\\n42 * 2 = 84\\nend parent\\nfooter!')\n\n def test_ignore_jinja_templates(self):\n Settings = namedtuple('Settings', ['IGNORE_JINJA_TEMPLATES'])\n settings = Settings(IGNORE_JINJA_TEMPLATES=['.*base.jinja', ])\n\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'base.html')))\n main(os.path.join(fixtures_dir, 'header_footer'), settings=settings)\n self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'child.html')))\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'header_footer', 'base.html')))\n\n def test_undefined_vars_raises_errors(self):\n Settings = namedtuple('Settings', ['EXTRA_VARIABLES'])\n settings = Settings(EXTRA_VARIABLES={'name': 'Filipe Waitman'})\n\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))\n self.assertRaises(UndefinedError, main, path=os.path.join(fixtures_dir, 'undefined_vars'))\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))\n\n self.assertFalse(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))\n main(os.path.join(fixtures_dir, 'undefined_vars'), settings=settings)\n self.assertTrue(os.path.exists(os.path.join(fixtures_dir, 'undefined_vars', 'child.html')))\n"
},
{
"alpha_fraction": 0.6387711763381958,
"alphanum_fraction": 0.6504237055778503,
"avg_line_length": 32.71428680419922,
"blob_id": "3ec7dc206e7f4caa9e728658e5cff18e4c224725",
"content_id": "d3afd40da142fbe28097e4e12528b02f5c7c35f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 944,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 28,
"path": "/setup.py",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "from setuptools import setup\n\nVERSION = '1.3.1'\n\n\nsetup(\n name='jinja2_standalone_compiler',\n packages=['jinja2_standalone_compiler', ],\n version=VERSION,\n author='Filipe Waitman',\n author_email='[email protected]',\n install_requires=[x.strip() for x in open('requirements.txt').readlines()],\n url='https://github.com/filwaitman/jinja2-standalone-compiler',\n download_url='https://github.com/filwaitman/jinja2-standalone-compiler/tarball/{}'.format(VERSION),\n test_suite='tests',\n keywords=['Jinja2', 'Jinja', 'renderer', 'compiler', 'HTML'],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\",\n \"Operating System :: OS Independent\",\n ],\n entry_points=\"\"\"\\\n [console_scripts]\n jinja2_standalone_compiler = jinja2_standalone_compiler:main_command\n \"\"\",\n)\n"
},
{
"alpha_fraction": 0.6327868700027466,
"alphanum_fraction": 0.6377049088478088,
"avg_line_length": 31.972972869873047,
"blob_id": "4fe84c44f5f34888b829620cc22246322035ebe8",
"content_id": "03eb7796f8a6689103b8acf90e24291baf45611c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1220,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 37,
"path": "/settings_example.py",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Which templates don't you want to generate? (You can use regular expressions here!)\n# Use strings (with single or double quotes), and separate each template/regex in a line terminated with a comma.\nIGNORE_JINJA_TEMPLATES = [\n '.*base.jinja',\n '.*tests/.*'\n]\n\n# Here you can override the default jinja environment setup\nJINJA_ENVIRONMENT = {\n # 'BLOCK_START_STRING': '{%',\n # 'BLOCK_END_STRING': '%}',\n # 'VARIABLE_START_STRING': '{{',\n # 'VARIABLE_END_STRING': '}}',\n # 'COMMENT_START_STRING': '{#',\n # 'COMMENT_END_STRING': '#}',\n # 'LINE_STATEMENT_PREFIX': None,\n # 'LINE_COMMENT_PREFIX': None,\n # 'TRIM_BLOCKS': True, # Jinja default is False\n # 'LSTRIP_BLOCKS': True, #Jinja default is False\n # 'NEWLINE_SEQUENCE': '\\n',\n # 'KEEP_TRAILING_NEWLINE': False\n}\n\n# Do you have any additional variables to the templates? Put 'em here! (use dictionary ('key': value) format)\nEXTRA_VARIABLES = {\n 'project_name': 'WaitCorp',\n 'current_year': 2042,\n 'debug': False,\n 'triplicate': lambda x: x * 3\n}\n\nOUTPUT_OPTIONS = {\n 'extension': '.html', # Including leading '.'\n 'remove_double_extension': False # If you use something like sample.jinja.html\n}\n"
},
{
"alpha_fraction": 0.4516128897666931,
"alphanum_fraction": 0.6774193644523621,
"avg_line_length": 14.5,
"blob_id": "e789f0ef9cc028a1af94332ffd3c8d9c3177ca32",
"content_id": "56db9b26ae7437d2989a7a3d49caa071de4042a6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 31,
"license_type": "permissive",
"max_line_length": 18,
"num_lines": 2,
"path": "/requirements_test.txt",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "setuptools==19.1.1\nnose==1.3.7\n"
},
{
"alpha_fraction": 0.6747720241546631,
"alphanum_fraction": 0.7066869139671326,
"avg_line_length": 49.61538314819336,
"blob_id": "57505034a3ee6701661f6dd170a80fda5996d776",
"content_id": "e23f5ed1c378d0cc2483402afdafe4287d86ca8e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 658,
"license_type": "permissive",
"max_line_length": 293,
"num_lines": 13,
"path": "/AUTHORS.md",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "Project owners\n===========================\n\n* Filipe Waitman\n\n\nContributors (alphabetical order)\n===========================\n\n* @bertonha [1](https://github.com/filwaitman/jinja2-standalone-compiler/pull/1)\n* @TomSmartBishop [1](https://github.com/filwaitman/jinja2-standalone-compiler/pull/2) [2](https://github.com/filwaitman/jinja2-standalone-compiler/pull/3) [3](https://github.com/filwaitman/jinja2-standalone-compiler/pull/4) [4](https://github.com/filwaitman/jinja2-standalone-compiler/pull/5)\n* @SleepyHarry [1](https://github.com/filwaitman/jinja2-standalone-compiler/pull/8)\n* @mathieui [1](https://github.com/filwaitman/jinja2-standalone-compiler/pull/9)\n"
},
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7211664319038391,
"avg_line_length": 37.632911682128906,
"blob_id": "68576e60893f0b1a201b067bc1cd2cb32654cdb0",
"content_id": "dfd4bdd7954c2e068aa43c894a283e9e3d3addc3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 3052,
"license_type": "permissive",
"max_line_length": 284,
"num_lines": 79,
"path": "/README.rst",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "THIS PROJECT IS ARCHIVED AND UNMAINTAINED. There are plenty alternatives to this, such as:\n* https://github.com/kolypto/j2cli\n* https://github.com/mattrobenolt/jinja2-cli\n\n\nJinja2 Standalone Compiler\n===========================\n\nThis provides a silly way to create HTML templates based on Jinja2 ones without a framework.\n\nThis aims to be stupidly simple - in order to be used to whom have never heard about python before.\n\nThe least you should do to work with this is learn a bit of `Jinja2 <http://jinja.pocoo.org/>`_. Do it now, you won't regret. =)\n\nAh! This project works well on both Python2 and Python3.\n\nThis is:\n-------------\n* a way to use Jinja2 outside python frameworks.\n* a way to use Jinja2 not knowing anything in python.\n\nThis is not:\n-------------\n* another templating language for python. Jinja2 is good enough. =)\n\nInstallation:\n-------------\n.. code:: bash\n\n pip install jinja2-standalone-compiler\n\n\nUsage:\n-------------\n.. code:: bash\n\n jinja2_standalone_compiler <path-to-jinja-files> # Note that jinja files will be searched recursively\n\n* For a more intense usage, you can also use a settings file to define a bunch of things. To use settings file:\n\n .. code:: bash\n\n jinja2_standalone_compiler <path-to-jinja-files> --settings=path/to/settings.py\n\n * You can also use -s instead of --settings\n * Please note that settings file is a Python file, so Python syntax applies. =P\n * It's also possible to use multiple settings files, so that a template will be rendered multiple times with a different setup: -s <settings-1> -s <settings-2> ...\n * In order to see what can be done with this settings file, please refer to `this example <https://github.com/filwaitman/jinja2-standalone-compiler/blob/master/settings_example.py>`_\n\n* To turn off console ouput you can use the following option:\n\n .. code:: bash\n\n jinja2_standalone_compiler <path-to-jinja-files> --silent\n\n* Or if you want to get more console ouput:\n\n .. code:: bash\n\n jinja2_standalone_compiler <path-to-jinja-files> --verbose\n\n* You can also specify an alternative output directory instead putting the rendered templates into the same directory as the jinja files. The relative directory structure (if present) will be reconstructed inside the new ouput. The ouput directory will be created if it doesn't exist:\n\n .. code:: bash\n\n jinja2_standalone_compiler <path-to-jinja-files> --out=<path-to-output-dir>\n\n * You can also use -o instead of --out\n\nContribute\n----------\nDid you think in some interesting feature, or have you found a bug? Please let me know!\n\nOf course you can also download the project and send me some `pull requests <https://github.com/filwaitman/jinja2-standalone-compiler/pulls>`_.\n\n\nYou can send your suggestions by `opening issues <https://github.com/filwaitman/jinja2-standalone-compiler/issues>`_.\n\nYou can contact me directly as well. Take a look at my contact information at `http://filwaitman.github.io/ <http://filwaitman.github.io/>`_ (email is preferred rather than mobile phone).\n"
},
{
"alpha_fraction": 0.4166666567325592,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 11,
"blob_id": "a67983e9b8af5091aa3ceb6fd677c4548c11c2b9",
"content_id": "f10e84ecd4b6d6b3dc077d1814c99df5ecca28e7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 12,
"license_type": "permissive",
"max_line_length": 11,
"num_lines": 1,
"path": "/requirements.txt",
"repo_name": "filwaitman/jinja2-standalone-compiler",
"src_encoding": "UTF-8",
"text": "Jinja2>=2.8\n"
}
] | 8 |
EdwinVanRooij/DV | https://github.com/EdwinVanRooij/DV | 7e5ce1403ce3c823aba6f210dee75358b3c3af59 | b9bf4c549a94058981eec5dfb820d3fb30dbf5a7 | 45e8d6066b49b91822fbb79132dc3aa2b83b7efb | refs/heads/master | 2021-03-22T04:38:35.887427 | 2017-12-21T14:18:30 | 2017-12-21T14:18:30 | 104,338,293 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.519132673740387,
"alphanum_fraction": 0.543367326259613,
"avg_line_length": 24.29032325744629,
"blob_id": "97a6504a9baf0409023e98d8163eeee5287d2b94",
"content_id": "da4bbea820e2fd4c94505e2998b26c867cca1721",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 784,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 31,
"path": "/02/scripts/quarterly-post-counter.py",
"repo_name": "EdwinVanRooij/DV",
"src_encoding": "UTF-8",
"text": "import csv\n\nwith open('../data.csv', 'r') as f:\n reader = csv.reader(f)\n your_list = list(reader)\n\nfinal_dict = dict()\n\nfor item in your_list[1:]:\n year = item[1][0:4]\n month = item[1][5:7]\n if 1 <= int(month) <= 3:\n quarter = 1\n elif 4 <= int(month) <= 6:\n quarter = 2\n elif 7 <= int(month) <= 9:\n quarter = 3\n else:\n quarter = 4\n key = year + '-' + str(quarter)\n final_dict[key] = final_dict.get(key, 0) + 1\n\nprint(final_dict)\n\nwith open('../posts-by-quarter.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(['year', 'amount'])\n for key in final_dict:\n writer.writerow([key, final_dict[key]])\n"
},
{
"alpha_fraction": 0.6259999871253967,
"alphanum_fraction": 0.6579999923706055,
"avg_line_length": 34.71428680419922,
"blob_id": "1e98800ece614f5b67b2190abd07930fa8fc08dc",
"content_id": "8260159bf33290565218654768a2746f9c03e986",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 500,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 14,
"path": "/Scripting/top-stories.py",
"repo_name": "EdwinVanRooij/DV",
"src_encoding": "UTF-8",
"text": "# Get the top n stories by word x\nimport pandas as pd\n\npd.options.display.max_colwidth = 5000\n\nstories = 10\nword = 'china'\n\ndf = pd.read_csv('../data/data/data.csv', sep=',', low_memory=False, encoding = 'ISO-8859-1')\ndf = df.drop(labels=['subreddit', 'over_18', 'time_created', 'down_votes'], axis=1)\ndf = df[df['title'].str.contains(word)]\ndf = df.sort_values(by=['up_votes'], ascending=False)\n\ndf[0:stories].to_csv('../data/top-{}-stories-{}.csv'.format(stories, word), sep=',', encoding='utf-8')\n"
},
{
"alpha_fraction": 0.578066885471344,
"alphanum_fraction": 0.589219331741333,
"avg_line_length": 24.619047164916992,
"blob_id": "2882892158b6b08a9fbbebb8824e6912aee6ca3c",
"content_id": "440430a4da8f0aa9736d41932b9d1f2340a12ec2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 538,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/02/scripts/year-post-counter.py",
"repo_name": "EdwinVanRooij/DV",
"src_encoding": "UTF-8",
"text": "import csv\n\nwith open('../data.csv', 'r') as f:\n reader = csv.reader(f)\n your_list = list(reader)\n\nfinal_dict = dict()\n\nfor item in your_list[1:]:\n key = item[1][0:4]\n final_dict[key] = final_dict.get(key, 0) + 1\n\nprint(final_dict)\n\nwith open('../posts-by-year.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(['year', 'amount'])\n for key in final_dict:\n writer.writerow([key, final_dict[key]])\n"
},
{
"alpha_fraction": 0.6432486176490784,
"alphanum_fraction": 0.6611858606338501,
"avg_line_length": 23.475608825683594,
"blob_id": "2423bc82d386a2cfe05bd377ebab84e7970167d4",
"content_id": "fe958e759b84901b8c80fa91df417d7448014622",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2007,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 82,
"path": "/04/exporter-by-year.py",
"repo_name": "EdwinVanRooij/DV",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib as mpl\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"year\")\nparser.add_argument('--amount', dest='amount', action='store', default=100,\n help='amount of words to store')\n\nargs = parser.parse_args()\n\nyear = int(args.year)\namount = int(args.amount)\n\nprint(\"Getting words by year \" + str(year) + \".\")\nprint(\"Getting \" + str(amount) + \" words.\")\n\n\ndf = pd.read_csv('../data/data.csv', sep=',', low_memory=False, encoding = 'ISO-8859-1')\n\n# Let's remove the unused column first\ndf = df.drop(labels='subreddit', axis=1) # axis 1 drops columns, 0 will drop rows that match index value in labels\n\n# Get all posts from 2012 till 2014.\ntdf = df.copy()\n\n# Ensure date_created is a datetime object\ntdf['date_created'] = pd.to_datetime(df['date_created']) \n\n# Create and apply a boolean mask \nmask = (tdf['date_created'] >= str(year)+'-1-1') & (tdf['date_created'] < str(year+1)+'-1-1')\n#mask = (tdf['date_created'] > '2012-1-1') & (tdf['date_created'] <= '2014-1-1')\ntdf = tdf.loc[mask]\n\n# Only keep the titles\ntdf = tdf[['title']]\n\ntitles = tdf['title'].tolist()\n\n\n# Now count the words\nfrom collections import Counter\nfrom nltk.corpus import stopwords\n\ncounts = Counter()\ns_words = stopwords.words('english')\ns_words.append('-')\ns_words.append('--')\ns_words.append('---')\ns_words.append('----')\ns_words.append('|')\ns_words.append('')\n\nfor sentence in titles:\n words = [word.strip('.,?!\"\\'').lower() for word in sentence.split()]\n filtered_words = [w for w in words if w not in s_words]\n counts.update(filtered_words)\n\n\nmost_common = counts.most_common(amount)\nmost_common\n\nL = zip(*most_common)\nwords, frequency = L\n\nprint(\"Words:\")\nprint(words)\n\nprint(\"Frequency:\")\nprint(frequency)\n\n\nf = open(\"{}_{}.csv\".format(str(year), str(amount)), \"w\")\n\nf.write(\"word,frequency\\n\")\nfor i in range(len(words)):\n f.write(\"{},{}\\n\".format(words[i], frequency[i]))\n\nf.close()\n"
},
{
"alpha_fraction": 0.7305502891540527,
"alphanum_fraction": 0.8149905204772949,
"avg_line_length": 36.64285659790039,
"blob_id": "cc15b248431b082c36643f1b923ca1ac3dcbdce0",
"content_id": "7eee08da718aaf4fdcd57e2fc817955302b9bdae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1054,
"license_type": "no_license",
"max_line_length": 215,
"num_lines": 28,
"path": "/README.md",
"repo_name": "EdwinVanRooij/DV",
"src_encoding": "UTF-8",
"text": "# Summary\n// A short 4-sentence summary of your visualization\n\n# Design\n// Explain any design choices you made including changes to the visualization after collecting feedback\n\n# Feedback\n// Include all feedback you received from others on your visualization from the first sketch to the final visualization\n\n# Resources & Tools\n// Links to sources & tools I used in the whole process\n\n\n</br>\n</br>\n</br>\n# Extras\n\n## Slides\nhttps://portal.fhict.nl/Studentenplein/LMC/1718nj/Forms/AllItems.aspx?RootFolder=%2fStudentenplein%2fLMC%2f1718nj%2fMinoren%2fAppliedDataScience%2fDV%2fSlides&FolderCTID=0x012000CD80FF5BC8629A4FA999340E00C2C261\n\n## Assignments\nhttps://portal.fhict.nl/Studentenplein/LMC/1718nj/Forms/AllItems.aspx?RootFolder=%2fStudentenplein%2fLMC%2f1718nj%2fMinoren%2fAppliedDataScience%2fDV%2fAssignments&FolderCTID=0x012000CD80FF5BC8629A4FA999340E00C2C261\n\n## Udacity course\nhttps://www.udacity.com/course/data-visualization-and-d3js--ud507\n\n## Note: Portfolio wordt een pdf document waarin je proces beschrijft, mag links bevatten naar werk\n"
}
] | 5 |
LuennyEvelly/Projeto_Flask | https://github.com/LuennyEvelly/Projeto_Flask | a0bc04f00699db95293be1cffd14bfd14b315eb1 | 8e60f36193e7c4b562312a5ef568dd2d5494d0aa | 53becb6839e1ac489ffdcb3cea48dd7d6ffb53cd | refs/heads/master | 2023-07-02T18:57:50.063530 | 2021-08-07T18:11:41 | 2021-08-07T18:11:41 | 393,757,895 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6628943681716919,
"alphanum_fraction": 0.6628943681716919,
"avg_line_length": 32.91860580444336,
"blob_id": "6c90ed0a114ed5fbf798c4793fcf75f4b4c9828a",
"content_id": "1381b1cd31144405260d7f7cfe2b66419f8d4a17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2919,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 86,
"path": "/venv/views.py",
"repo_name": "LuennyEvelly/Projeto_Flask",
"src_encoding": "UTF-8",
"text": "from flask import render_template, request, redirect, session, flash, url_for\nfrom models import Carro\nfrom venv.carro import app, db\nfrom dao import CarroDao, UsuarioDao\n\ncarro_dao = CarroDao(db)\nusuario_dao = UsuarioDao(db)\n\[email protected]('/')\ndef index():\n return render_template('lista.html',titulo='carro', carros=carro_dao.listar())\n\[email protected]('/novo')\ndef novo():\n if 'usuario_logado' not in session or session['usuario_logado'] == None:\n return redirect(url_for('login', proxima=url_for('novo')))\n return render_template('novo.html', titulo='Novo Carro')\n\[email protected]('/criar', methods=['POST',])\ndef criar():\n id = request.form['id']\n marca = request.form['marca']\n modelo = request.form['modelo']\n cor = request.form['cor']\n combustivel = request.form['combustivel']\n ano = request.form['ano']\n carro = Carro(marca, modelo, cor, combustivel, ano)\n carro = carro_dao.salvar(carro)\n return redirect(url_for('index'))\n\[email protected]('/editar/<int:id>')\ndef editar(id):\n if 'usuario_logado' not in session or session['usuario_logado'] == None:\n return redirect(url_for('login', proxima=url_for('editar', id=id)))\n carro = carro_dao.busca_por_id(id)\n return render_template('editar.html', titulo='Editando Carro', carro=carro)\n\[email protected]('/atualizar', methods=['POST',])\ndef atualizar():\n id = request.form['id']\n marca = request.form['marca']\n modelo = request.form['modelo']\n cor = request.form['cor']\n combustivel = request.form['combustivel']\n ano = request.form['ano']\n carro = Carro(marca, modelo, cor, combustivel, ano, id)\n carro_dao.salvar(carro)\n return redirect(url_for('index'))\n\[email protected]('/deletar/<int:id>')\ndef deletar(id):\n if 'usuario_logado' not in session or session['usuario_logado'] == None:\n return redirect(url_for('login', proxima=url_for('deletar', id=id)))\n carro_dao.deletar(id)\n flash(\"O carro foi removido com sucesso!\")\n return redirect(url_for('index'))\n\[email protected]('/login')\ndef login():\n proxima = request.args.get('proxima')\n return render_template('login.html', titulo='login', proxima=proxima)\n\n\[email protected]('/autenticar', methods=['POST',])\ndef autenticar():\n usuario = usuario_dao.autenticar(request.form['usuario'], request.form['senha'])\n if usuario:\n session['usuario_logado'] = usuario.id\n flash(usuario.nome + 'Logado com sucesso!')\n proxima_pagina = request.form['proxima']\n return redirect((proxima_pagina))\n else:\n flash('Usuário ou senha inválida, tente novamente!')\n return redirect(url_for('login'))\n\n\[email protected]('/logout')\ndef logout():\n session['usuario_logado'] = None\n flash('Nenhum usuário logado!')\n return redirect(url_for('index'))\n\[email protected]('/carro/<int:id>')\ndef carro(id):\n carro = carro_dao.busca_por_id(id)\n return render_template('carros.html', titulo='Carro', carro=carro)"
},
{
"alpha_fraction": 0.554959774017334,
"alphanum_fraction": 0.554959774017334,
"avg_line_length": 25.714284896850586,
"blob_id": "3082e5d96ca639ecf0d4eedcf37d7083cd077329",
"content_id": "26b3ec7ae95b0b626710a35a2a0bcc2f56e9584e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 373,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 14,
"path": "/venv/models.py",
"repo_name": "LuennyEvelly/Projeto_Flask",
"src_encoding": "UTF-8",
"text": "class Carro:\n def _init_(self, marca, modelo, cor, combustivel, ano, id = None):\n self.id = id\n self.marca = marca\n self.modelo = modelo\n self.cor = cor\n self.combustivel = combustivel\n self.ano = ano\n\nclass Usuario:\n def _init_(self, id, nome, senha):\n self.id = id\n self.nome = nome\n self.senha = senha"
},
{
"alpha_fraction": 0.6284104585647583,
"alphanum_fraction": 0.6349347829818726,
"avg_line_length": 38.22093200683594,
"blob_id": "9ef60ca43b3b6af62edf0c450998ec3c643afe59",
"content_id": "c22332ebfaba0e69494b045df2c71e13a2b9c367",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3372,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 86,
"path": "/venv/dao.py",
"repo_name": "LuennyEvelly/Projeto_Flask",
"src_encoding": "UTF-8",
"text": "from models import Carro, Usuario\nimport psycopg2.extras\n\n\nSQL_DELETA_CARRO = 'delete from carro where id = %s'\nSQL_CARRO_POR_ID = 'SELECT id,marca, modelo, cor, combustivel, ano from carro where id = %s'\nSQL_USUARIO_POR_ID = 'SELECT id, nome, senha from usuario where id = %s'\nSQL_ATUALIZA_CARRO = 'UPDATE carro SET marca=%s, modelo=%s, cor=%s, combustivel=%s, ano=%s where id = %s'\nSQL_BUSCA_CARRO = 'SELECT id, marca, modelo, cor, combustivel, ano from carro'\nSQL_CRIA_CARRO = 'INSERT into carro ( marca, modelo, cor, combustivel, ano) values (%s, %s, %s, %s, %s) RETURNING id'\nSQL_CRIA_USUARIO = 'INSERT into usuario (id, nome, senha) values (%s, %s, %s)'\nSQL_ATUALIZA_USUARIO = 'UPDATE usuario SET id=%s, nome=%s, senha=%s where id = %s'\nSQL_AUTENTICAR_USUARIO = 'SELECT id, nome, senha from usuario where id = %s AND senha = %s'\n\nclass CarroDao:\n def _init_(self, db):\n self.__db = db\n\n def salvar(self, carro):\n cursor = self.__db.cursor()\n\n if (carro.id):\n cursor.execute(SQL_ATUALIZA_CARRO, (carro.marca, carro.modelo, carro.cor, carro.combustivel, carro.ano, carro.id))\n else:\n cursor.execute(SQL_CRIA_CARRO, (carro.marca, carro.modelo, carro.cor, carro.combustivel, carro.ano))\n carro.id = cursor.fetchone()[0]\n self.__db.commit()\n cursor.close()\n return carro\n\n def listar(self):\n cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(SQL_BUSCA_CARRO)\n carros = traduz_carros(cursor.fetchall())\n cursor.close()\n return carros\n\n def busca_por_id(self, id):\n cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(SQL_CARRO_POR_ID, (id,))\n tupla = cursor.fetchone()\n cursor.close()\n return Carro(tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], id=tupla[0])\n\n def deletar(self, id):\n cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(SQL_DELETA_CARRO, (id,))\n self.__db.commit()\n cursor.close()\n\nclass UsuarioDao:\n def _init_(self, db):\n self.__db = db\n\n def buscar_por_id(self,id):\n cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(SQL_USUARIO_POR_ID, (id,))\n dados = cursor.fetchone()\n usuario = traduz_usuario(dados) if dados else None\n return usuario\n\n def autenticar(self, id, senha):\n cursor = self.__db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cursor.execute(SQL_AUTENTICAR_USUARIO, (id, senha))\n dados = cursor.fetchone()\n usuario = traduz_usuario(dados) if dados else None\n return usuario\n\n def salvar(self, usuario):\n cursor = self.__db.cursor()\n\n #if (usuario.id):\n #cursor.execute(SQL_ATUALIZA_USUARIO,(usuario.id, usuario.nome, usuario.senha))\n #else:\n cursor.execute(SQL_ATUALIZA_USUARIO, (usuario.id, usuario.nome, usuario.senha))\n self.__db.commit()\n cursor.close()\n return usuario\n\ndef traduz_usuario(tupla):\n return Usuario(tupla[0], tupla[1], tupla[2])\n\ndef traduz_carros(carros):\n def cria_carro_com_tupla(tupla):\n return Carro( tupla[1], tupla[2], tupla[3], tupla[4], tupla[5], id=tupla[0])\n return list(map(cria_carro_com_tupla, carros))"
},
{
"alpha_fraction": 0.6456692814826965,
"alphanum_fraction": 0.6811023354530334,
"avg_line_length": 22.18181800842285,
"blob_id": "aada16fbfb4e594f6859bea8dff411c493325042",
"content_id": "e8a14477b589e812e9d6e406d47377cd5bb106e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 254,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 11,
"path": "/venv/carro.py",
"repo_name": "LuennyEvelly/Projeto_Flask",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nimport psycopg2\n\napp = Flask(_name_)\napp.secret_key = '4intin'\ndb = psycopg2.connect(database='carros', user='postgres', password='postgres', host='127.0.0.1')\n\nfrom views import *\n\nif _name_ == '_main_':\n app.run(debug=True)"
}
] | 4 |
KahKitZheng/greenhouse | https://github.com/KahKitZheng/greenhouse | 54498632b3250fd6137a84f880db6f29352c5a43 | 67fdd2946229ed0e32a48f350f8099a752c03882 | c26b23cac644ba6aa883f1b6801db38c07c3568c | refs/heads/master | 2022-12-07T08:23:33.906327 | 2020-08-28T17:35:23 | 2020-08-28T17:35:23 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5210526585578918,
"alphanum_fraction": 0.5210526585578918,
"avg_line_length": 46.75,
"blob_id": "8694ece146ca4463906b77822613173bdbb7cb5e",
"content_id": "958d3fe4cf03683d63a1216b9fcdd00d96c199b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 4,
"path": "/client/src/utils/routes.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "export const DASHBOARD = \"/\";\nexport const WEATHER = \"/weather\";\nexport const PLANTS = \"/plants\";\nexport const PLANT_DETAILS = \"/plants/:id\";"
},
{
"alpha_fraction": 0.6382978558540344,
"alphanum_fraction": 0.6595744490623474,
"avg_line_length": 19.88888931274414,
"blob_id": "edf59cb5e27578a2b7a07877d839f7812627f21c",
"content_id": "84eefe2d43ea77c4ad6325ef775fb2ea6295e185",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 9,
"path": "/client/src/components/layout/Container.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\n\nconst Container = (props) => (\n <div className=\"h-full w-full px-6 mb-16 lg:mb-0 overflow-y-auto\">\n {props.children}\n </div>\n);\n\nexport default Container;\n"
},
{
"alpha_fraction": 0.5436720252037048,
"alphanum_fraction": 0.5659536719322205,
"avg_line_length": 28.526315689086914,
"blob_id": "79f1f31a973610008f5e367e5942706a36e3276e",
"content_id": "34692371e4763c369fe3b0353d856af777c49c81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1122,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 38,
"path": "/client/src/components/views/weather/DailyForecast.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport iconPicker from \"utils/iconPicker\";\n\nconst convertTimestamp = (timestamp) => {\n const dateObject = new Date(timestamp * 1000);\n const humanDateFormat = dateObject.toLocaleString(\"en-NL\", {\n weekday: \"short\",\n });\n\n return humanDateFormat;\n};\n\nconst DailyForecast = ({ data }) => {\n return (\n <div>\n <div className=\"grid grid-flow-col lg:flex lg:items-center lg:justify-between overflow-x-auto no-scroll lg:mx-6\">\n {data.map((day) => (\n <div\n key={day.dt}\n className=\"w-16 lg:w-24 mr-6 lg:mr-0 last:mr-0 font-semibold flex flex-col items-center justify-start lg:justify-center\"\n >\n <p className=\"text-md text-gray-600\">{convertTimestamp(day.dt)}</p>\n <img\n className=\"h-12 w-12\"\n src={iconPicker(day.weather[0].icon)}\n alt=\"\"\n />\n <p className=\"text-sm text-gray-600 text-center leading-5\">\n {day.weather[0].description}\n </p>\n </div>\n ))}\n </div>\n </div>\n );\n};\n\nexport default DailyForecast;\n"
},
{
"alpha_fraction": 0.5118759274482727,
"alphanum_fraction": 0.5220552682876587,
"avg_line_length": 23,
"blob_id": "55f79c3b044cb66e548d0637244259f7ae040d6a",
"content_id": "26ad8a057de479abd6ec0c4d73a64cf2c8b274e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2063,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 86,
"path": "/raspberry_pi/grove.py/src/app.py",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport time\n\nfrom datetime import datetime\n\nfrom mraa import getGpioLookup\nfrom seeed_dht import DHT\nfrom grove.grove_moisture_sensor import GroveMoistureSensor\n \nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_restful import Api, Resource, reqparse\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app)\n\nclass TempHumid(Resource):\n def get(self):\n # Grove - Temperature&Humidity Sensor connected to port D5\n sensor = DHT('11', 5)\n\n humidity, temperature = sensor.read()\n \n now = datetime.now()\n dt_string = now.strftime(\"%B %d, %Y %H:%M\")\n \n data = {\n \"humidity\": {\n \"value\": humidity,\n \"units\": \"%\"\n },\n \"temperature\": {\n \"value\": temperature,\n \"units\": \"celcius\"\n },\n \"issued_at\": dt_string\n }\n\n return data\n\nclass Moisture(Resource):\n def getMoistLevel():\n # Grove - Moisture Sensor connected to port A0\n sensor = GroveMoistureSensor(0)\n\n # Sensor output value of dry, moist and wet soil\n moisture = sensor.moisture\n if 0 <= moisture and moisture < 300:\n return {\n \"moisture\": moisture,\n \"description\": 'dry'\n }\n elif 300 <= moisture and moisture < 600:\n return {\n \"moisture\": moisture,\n \"description\": 'moist'\n }\n else:\n return {\n \"moisture\": moisture,\n \"description\": 'wet'\n }\n \n def get(self):\n moisture = Moisture.getMoistLevel()\n \n now = datetime.now()\n dt_string = now.strftime(\"%B %d, %Y %H:%M\")\n\n data = {\n \"moisture\": moisture,\n \"issued_at\": dt_string\n }\n\n return data\n\n \napi.add_resource(TempHumid, \"/api/temp_and_humid\")\napi.add_resource(Moisture, \"/api/moisture\")\n\napp.run(debug=True)\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0')"
},
{
"alpha_fraction": 0.3342246115207672,
"alphanum_fraction": 0.6604278087615967,
"avg_line_length": 40.55555725097656,
"blob_id": "7076b5d607fbfc11fc7b526a8c2cc16054ed5f53",
"content_id": "f9d68693bd380018703cdc5f4abe9acbe4ea42d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 374,
"license_type": "no_license",
"max_line_length": 209,
"num_lines": 9,
"path": "/client/src/icons/HomeIcon.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\n\nconst HomeIcon = () => (\n <svg viewBox=\"0 0 20 20\" fill=\"currentColor\" className=\"home w-6 h-6\">\n <path d=\"M10.707 2.293a1 1 0 00-1.414 0l-7 7a1 1 0 001.414 1.414L4 10.414V17a1 1 0 001 1h2a1 1 0 001-1v-2a1 1 0 011-1h2a1 1 0 011 1v2a1 1 0 001 1h2a1 1 0 001-1v-6.586l.293.293a1 1 0 001.414-1.414l-7-7z\" />\n </svg>\n);\n\nexport default HomeIcon;\n"
},
{
"alpha_fraction": 0.5257575511932373,
"alphanum_fraction": 0.5473484992980957,
"avg_line_length": 32.417720794677734,
"blob_id": "eeb0b10f9945ace91d9a8f4ec7ffed8cc290529e",
"content_id": "1c8ce1f4fee17333a380f507e720f7018ca6030f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2642,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 79,
"path": "/client/src/components/views/weather/CurrentWeather.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport iconPicker from \"utils/iconPicker\";\nimport Divider from \"components/layout/Divider\";\n\nconst CurrentWeather = ({ data }) => {\n const {\n weather,\n temp,\n humidity,\n feels_like,\n wind_speed,\n visibility,\n clouds,\n pressure,\n } = data;\n\n return (\n <div className=\"lg:grid lg:grid-cols-2 lg:my-32\">\n <div className=\"flex flex-col items-center justify-center lg:flex-grow\">\n <img\n className=\"hidden lg:block\"\n src={iconPicker(weather[0].icon)}\n alt=\"\"\n />\n <p className=\"font-black text-6xl lg:text-7xl leading-none mt-8 lg:mt-0\">\n {Math.round(temp)}°C\n </p>\n <p className=\"font-medium text-gray-800 lg:text-3xl mt-4\">\n Amsterdam / {weather[0].description}\n </p>\n </div>\n\n <Divider className=\"lg:hidden\" />\n\n <div>\n <ul className=\"grid grid-cols-2 grid-rows-3 gap-6 h-full w-full\">\n <li className=\"flex flex-col font-semibold\">\n <span className=\"text-sm lg:text-md text-gray-600\">Wind speed</span>\n <span className=\"text-lg lg:text-xl text-gray-800\">\n {wind_speed} km/h\n </span>\n </li>\n <li className=\"flex flex-col font-semibold\">\n <span className=\"text-sm lg:text-md text-gray-600\">\n Perceived temperature\n </span>\n <span className=\"text-lg lg:text-xl text-gray-800\">\n {Math.round(feels_like)}°C\n </span>\n </li>\n <li className=\"flex flex-col font-semibold\">\n <span className=\"text-sm lg:text-md text-gray-600\">Humidity</span>\n <span className=\"text-lg lg:text-xl text-gray-800\">\n {humidity}%\n </span>\n </li>\n <li className=\"flex flex-col font-semibold\">\n <span className=\"text-sm lg:text-md text-gray-600\">Visibility</span>\n <span className=\"text-lg lg:text-xl text-gray-800\">\n {visibility / 1000} km\n </span>\n </li>\n <li className=\"flex flex-col font-semibold\">\n <span className=\"text-sm lg:text-md text-gray-600\">Clouds</span>\n <span className=\"text-lg lg:text-xl text-gray-800\">{clouds}%</span>\n </li>\n <li className=\"flex flex-col font-semibold\">\n <span className=\"text-sm lg:text-md text-gray-600\">Pressure</span>\n <span className=\"text-lg lg:text-xl text-gray-800\">\n {pressure} hPa\n </span>\n </li>\n </ul>\n </div>\n </div>\n );\n};\n\nexport default CurrentWeather;\n"
},
{
"alpha_fraction": 0.6259186863899231,
"alphanum_fraction": 0.637187659740448,
"avg_line_length": 20.828876495361328,
"blob_id": "7b1d570a1fdd33fbfbd48a334ef1dabe9b304d64",
"content_id": "93aa443897d6b61dc7ba182479476f47522ae65c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4090,
"license_type": "no_license",
"max_line_length": 302,
"num_lines": 187,
"path": "/README.md",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "# Greenhouse\n\nFor the course Visualization and Platform I needed to create a small web application that incorporates self generated data from a sensor. So I created a dashboard-like application. It displays the temperature, humidity, soil level of my plant. \n\n---\n\n## Table of Contents\n\n - [Prerequisites](#prerequisites)\n - [Getting Started](#getting-started)\n - [Hardware setup](#wiring)\n - [Sensor API](#sensor-api)\n - [Client](#client)\n - [Server](#server)\n - [Built With](#built-with)\n\n---\n\n## Prerequisites\n\n| Parts needed | Amount |\n| :-------------------------------------------------- | -----: |\n| Raspberry Pi 3B | 1x |\n| Seeed Grove - Base Hat for Raspberry Pi | 1x |\n| Seeed Grove - Temperature & Humidity Sensor (DHT11) | 1x |\n| Seeed Grove - Moisture Sensor | 1x |\n| Seeed Grove - Connectors | 2x |\n| Micro USB | 1x |\n| Ethernet cable | 1x |\n\n---\n\n## Getting Started\n\n### Hardware setup\n1. Connect the grove - base hat to the Raspberry Pi.\n2. Connect the grove - temperature and humidity sensor to port **D5**.\n3. Connect the grove - moisture sensor to port **A0**.\n4. Use micro USB to connect Raspberry Pi with PC.\n\n\n### Sensor API\n\n**Step 1: Install the Raspberry Pi OS**\n<br>\nInstall the Raspberry PI OS using the Raspberry PI Imager. This tool makes the process of creating images faster. Choose the right Raspberry PI imager tool depending on the operating system you currently are on. You can find the official download links [here](https://www.raspberrypi.org/downloads/).\n\n<br />\n\n**Step 2: Activate headless mode**\n<br>\nIn the ```/boot``` directory, create a file named ssh. This way we can access the Raspberry Pi on your own computer without having to use an extra mouse, keyboard and monitor.\n\n<br />\n\n**Step 3: Connect to the Raspberry Pi**\n<br />\nTurn the Pi on and open a new terminal. Connect using the following command:\n\n```bash\nssh pi@your_pi_address\n```\n\nThe default credentials for the Raspberry Pi is:\n- username: **pi**\n- password: **raspberry**\n\nTo find the address of the Raspberry Pi you can use following commands:\n```bash\nhostname -I\n# or\nping raspberrypi.local\n```\n\n<br />\n\n**Step 4: Change the Raspberry config**\n<br />\nAfter you successfully connect to the Pi, you need to modify the configurations of it.\n1. Use the command: `sudo raspi-config`\n1. Select #5 Interfacing options\n2. Select P5 I2C\n3. Select Yes\n4. Select Ok\n5. Select Finish\n\n<br />\n\n**Step 5: Clone the git repository**\n```bash\ngit clone https://github.com/KahKitZheng/Greenhouse\n```\n\n<br />\n\n**Step 6: Install the Seeed grove library**\n```bash\ncd Greenhouse/raspberry_pi/grove.py/src\n\ncurl -sL https://github.com/Seeed-Studio/grove.py/raw/master/install.sh | sudo bash -s\n```\n\n<br />\n\n**Step 7: Install the flask dependencies**\n```bash\npip3 install flask-restful Flask-Cors\n```\n\n<br />\n\n**Step 8: Run the API**\n```bash\nflask run --host=0.0.0.0\n```\n\n<br />\n<br />\n\n\n### Client\n\n**Step 1: Clone the repository**\n<br />\nOpen a new terminal and clone this time the repository on your PC.\n\n```bash\ngit clone https://github.com/KahKitZheng/Greenhouse\n```\n\n<br />\n\n**Step 2: Install the dependencies**\n<br />\nGo to the client folder and install the node modules\n```bash\ncd client\n\nnpm install\n```\n\n<br />\n\n**Step 3: Run the client**\n```bash\nnpm run start\n```\n\n<br />\n<br />\n\n\n### Server\n\n**Step 1: Go to the server directory**\n<br />\nOpen a new terminal and go to the server directory\n```bash\ncd server\n```\n\n<br />\n\n**Step 2: Install the dependencies**\n<br />\nJust like the client, you need to install the node modules for the backend.\n```bash\nnpm install\n```\n\n<br />\n\n**Step 3: Run the server**\n```bash\nnode server.js\n```\n\n\n\n---\n\n## Built With\n\n- [React](https://reactjs.org/)\n- [Tailwind CSS](https://tailwindcss.com/)\n- [Express](https://expressjs.com/)\n- [Flask](https://flask.palletsprojects.com/en/1.1.x/)\n"
},
{
"alpha_fraction": 0.3779904246330261,
"alphanum_fraction": 0.5645933151245117,
"avg_line_length": 21,
"blob_id": "e16ec6d9c63c16306ff8e991fcb27c57291523cc",
"content_id": "9457e9a1e30e8f8ec5d40567ce845eb1698b1ede",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 418,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 19,
"path": "/client/src/icons/BackArrow.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\n\nconst BackArrow = () => {\n return (\n <svg\n viewBox=\"0 0 20 20\"\n fill=\"currentColor\"\n className=\"chevron-left w-8 h-8\"\n >\n <path\n fillRule=\"evenodd\"\n d=\"M12.707 5.293a1 1 0 010 1.414L9.414 10l3.293 3.293a1 1 0 01-1.414 1.414l-4-4a1 1 0 010-1.414l4-4a1 1 0 011.414 0z\"\n clipRule=\"evenodd\"\n />\n </svg>\n );\n};\n\nexport default BackArrow;\n"
},
{
"alpha_fraction": 0.5688171982765198,
"alphanum_fraction": 0.5795698761940002,
"avg_line_length": 30.704545974731445,
"blob_id": "e1031dc829935bbbb9202160f71a6013f5245ef2",
"content_id": "d97acdb40643e88b6e4de415cd1f143ff689bfa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2790,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 88,
"path": "/client/src/components/views/plants/details/index.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect } from \"react\";\nimport * as ROUTES from \"utils/routes\";\nimport { Link, useParams } from \"react-router-dom\";\nimport axios from \"axios\";\n\n/* Layout components */\nimport Container from \"components/layout/Container\";\nimport Loading from \"components/layout/Loading\";\nimport BackArrow from \"icons/BackArrow\";\n\n/* Plant section components */\nimport PlantInfo from \"components/views/plants/details/Information\";\nimport PlantLocation from \"components/views/plants/details/Location\";\nimport PlantSpecifications from \"components/views/plants/details/Specifications\";\n\nconst PlantDetails = () => {\n const [plant, setPlant] = useState(\"\");\n const [token, setToken] = useState(\"\");\n let { id } = useParams();\n\n useEffect(() => {\n const fetchToken = async () => {\n const response = await axios.get(`http://localhost:8080/api/auth/`, {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Content-Type\": \"application/json\",\n });\n\n setToken(response.data.authToken);\n };\n\n const fetchData = async () => {\n const PROXY_URL = \"https://cors-anywhere.herokuapp.com/\";\n const BASE_URL = \"https://trefle.io/api/v1/species/\";\n\n const response = await axios.get(`${PROXY_URL}${BASE_URL}${id}`, {\n headers: {\n Authorization: `Bearer ${token}`,\n \"Access-Control-Allow-Origin\": \"*\",\n \"Content-Type\": \"application/json\",\n },\n });\n\n setPlant(response.data.data);\n };\n\n fetchToken();\n fetchData();\n }, [id, token]);\n\n return (\n <div className=\"flex flex-col h-full w-full\">\n <div className=\"bg-gray-900 rounded-b-xl flex items-center justify-between h-24 mb-8 lg:mb-12\">\n <Link to={ROUTES.PLANTS} className=\" py-4 px-6\">\n <div className=\"text-white\">\n <BackArrow />\n </div>\n </Link>\n <div className=\"text-center\">\n <h1 className=\"font-bold text-3xl leading-none mt-8 lg:mt-12 text-white\">\n {plant.scientific_name}\n </h1>\n <p className=\"mt-2 mb-8 text-gray-400\">{plant.common_name}</p>\n </div>\n <div className=\"py-4 px-6\"></div>\n </div>\n <Container>\n <div className=\"flex flex-col overflow-y-auto no-scroll h-full lg:px-16\">\n {plant === undefined ? (\n <Loading />\n ) : (\n <div>\n <PlantInfo info={plant} />\n <PlantLocation\n observation={plant.observation}\n distribution={plant.distribution}\n />\n {plant.specifications && (\n <PlantSpecifications specifications={plant.specifications} />\n )}\n </div>\n )}\n </div>\n </Container>\n </div>\n );\n};\n\nexport default PlantDetails;\n"
},
{
"alpha_fraction": 0.6166456341743469,
"alphanum_fraction": 0.6557376980781555,
"avg_line_length": 26.34482765197754,
"blob_id": "6736f64ce26c576f7484ab43b2bc9bff405784e7",
"content_id": "948694ff759f0af9106091fdf402c81a9a84212b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1586,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 58,
"path": "/client/src/utils/iconPicker.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import DAY_CLEAR_SKY from \"icons/weather_icons-10.svg\";\nimport NIGHT_CLEAR_SKY from \"icons/weather_icons-06.svg\";\nimport DAY_FEW_CLOUDS from \"icons/weather_icons-17.svg\";\nimport NIGHT_FEW_CLOUDS from \"icons/weather_icons-18.svg\";\nimport SCATTERED_CLOUDS from \"icons/weather_icons-16.svg\";\nimport BROKEN_CLOUDS from \"icons/weather_icons-42.svg\";\nimport SHOWER_RAIN from \"icons/weather_icons-22.svg\";\nimport DAY_RAIN from \"icons/weather_icons-20.svg\";\nimport NIGHT_RAIN from \"icons/weather_icons-21.svg\";\nimport THUNDERSTORM from \"icons/weather_icons-28.svg\";\nimport SNOW from \"icons/weather_icons-68.svg\";\nimport MIST from \"icons/weather_icons-61.svg\";\nimport WEATHER_VANE from \"icons/weather_icons-67.svg\";\n\nconst iconPicker = (icon) => {\n switch (icon) {\n case \"01d\":\n return DAY_CLEAR_SKY;\n case \"01n\":\n return NIGHT_CLEAR_SKY;\n case \"02d\":\n return DAY_FEW_CLOUDS;\n case \"02n\":\n return NIGHT_FEW_CLOUDS;\n case \"03d\":\n return SCATTERED_CLOUDS;\n case \"03n\":\n return SCATTERED_CLOUDS;\n case \"04d\":\n return BROKEN_CLOUDS;\n case \"04n\":\n return BROKEN_CLOUDS;\n case \"09d\":\n return SHOWER_RAIN;\n case \"09n\":\n return SHOWER_RAIN;\n case \"10d\":\n return DAY_RAIN;\n case \"10n\":\n return NIGHT_RAIN;\n case \"11d\":\n return THUNDERSTORM;\n case \"11n\":\n return THUNDERSTORM;\n case \"13d\":\n return SNOW;\n case \"13n\":\n return SNOW;\n case \"50d\":\n return MIST;\n case \"50n\":\n return MIST;\n default:\n return WEATHER_VANE;\n }\n};\n\nexport default iconPicker;\n"
},
{
"alpha_fraction": 0.49833887815475464,
"alphanum_fraction": 0.5045087933540344,
"avg_line_length": 29.985294342041016,
"blob_id": "9cfc154abeebf8f32a1e2a398c3c5ce74365f70e",
"content_id": "51d0b7c6f87becb2af4ecf652c9f16c034e93d4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2107,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 68,
"path": "/client/src/components/views/plants/details/Specifications.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\n\nconst PlantSpecifications = ({ specifications }) => {\n const {\n average_height,\n growth_form,\n growth_habit,\n growth_rate,\n ligneous_type,\n maximum_height,\n nitrogen_fixation,\n shape_and_orientation,\n toxicity,\n } = specifications;\n\n return (\n <div className=\"mb-8 md:mb-12\">\n <p className=\"text-2xl font-bold uppercase\">Specifications</p>\n <hr className=\"border-2 border-gray-300 mb-4 md:mt-2\" />\n <div className=\"grid grid-cols-2 md:grid-cols-3 gap-4\">\n <div>\n <p className=\"font-bold\">Average height: </p>\n <p>\n {average_height.cm !== null ? average_height.cm + \" cm\" : \"N/A\"}\n </p>\n </div>\n <div>\n <p className=\"font-bold\">Growth form: </p>\n <p>{growth_form !== null ? growth_form : \"N/A\"}</p>\n </div>\n <div>\n <p className=\"font-bold\">Growth habit: </p>\n <p>{growth_habit !== null ? growth_habit : \"N/A\"}</p>\n </div>\n <div>\n <p className=\"font-bold\">Growth rate: </p>\n <p>{growth_rate !== null ? growth_rate : \"N/A\"}</p>\n </div>\n <div>\n <p className=\"font-bold\">Ligneous type: </p>\n <p>{ligneous_type !== null ? ligneous_type : \"N/A\"}</p>\n </div>\n <div>\n <p className=\"font-bold\">Maximum height: </p>\n <p>\n {maximum_height.cm !== null ? maximum_height.cm + \" cm\" : \"N/A\"}\n </p>\n </div>\n <div>\n <p className=\"font-bold\">Nitrogen fixation: </p>\n <p>{nitrogen_fixation !== null ? nitrogen_fixation : \"N/A\"}</p>\n </div>\n <div>\n <p className=\"font-bold\">Shape and orientation: </p>\n <p>\n {shape_and_orientation !== null ? shape_and_orientation : \"N/A\"}\n </p>\n </div>\n <div>\n <p className=\"font-bold\">Toxicity: </p>\n <p>{toxicity !== null ? toxicity : \"N/A\"}</p>\n </div>\n </div>\n </div>\n );\n};\n\nexport default PlantSpecifications;\n"
},
{
"alpha_fraction": 0.5262585878372192,
"alphanum_fraction": 0.5588554739952087,
"avg_line_length": 30.022472381591797,
"blob_id": "a76ad937876f9a7297963e66d4e729ed85e7d2c8",
"content_id": "4f220e5d298c5ce98032d448b654217b8feaab3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2761,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 89,
"path": "/client/src/components/views/dashboard/index.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect } from \"react\";\nimport axios from \"axios\";\n\n/* Import layout components */\nimport Container from \"components/layout/Container\";\n\n/* Import Dashboard components */\nimport TempChart from \"components/views/dashboard/TempChart\";\nimport HumidChart from \"components/views/dashboard/HumidChart\";\nimport SoilLevel from \"components/views/dashboard/SoilLevel\";\n\nconst Dashboard = () => {\n const [temp, setTemp] = useState([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);\n const [humid, setHumid] = useState([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);\n const [moist, setMoist] = useState(\"\");\n\n useEffect(() => {\n const setData = (data, state, setState) => {\n let now = new Date();\n\n if (state.length === 10) {\n state.splice(0, 1);\n setState(state.concat({ ...data, date: now }));\n }\n };\n\n const fetchTempAndHumid = setInterval(() => {\n axios\n .get(`http://192.168.1.104:5000/api/temp_and_humid`, {\n \"Content-Type\": \"application/json\",\n })\n .then((res) => {\n setData(res.data.temperature, temp, setTemp);\n setData(res.data.humidity, humid, setHumid);\n\n console.log(temp);\n console.log(humid);\n });\n }, 10000);\n\n const fetchMoist = setInterval(() => {\n axios\n .get(`http://192.168.1.104:5000/api/moisture`, {\n \"Content-Type\": \"application/json\",\n })\n .then((res) => {\n console.log(moist);\n setMoist(res.data.moisture.description);\n });\n }, 10000);\n\n return () => {\n clearInterval(fetchMoist);\n clearInterval(fetchTempAndHumid);\n };\n }, [temp, humid, moist]);\n\n return (\n <Container>\n <div className=\"xxl:px-6\">\n <div className=\"mt-6 mb-2\">\n <p className=\"text-2xl font-bold uppercase\">Soil level</p>\n <hr className=\"border-2 border-gray-300 mt-1\" />\n <SoilLevel level={moist} />\n </div>\n\n <div className=\"xxl:flex\">\n <div className=\"mt-6 mb-2 lg:h-full w-full lg:first:pr-4\">\n <p className=\"text-2xl font-bold uppercase\">Temperature</p>\n <hr className=\"border-2 border-gray-300 mt-1\" />\n <div className=\"h-screen lg:h- md:h-graphLG w-full relative\">\n <TempChart temp={temp} />\n </div>\n </div>\n\n <div className=\"mt-10 mb-6 lg:h-full w-full lg:last:pl-4\">\n <p className=\"text-2xl font-bold uppercase\">Humidity</p>\n <hr className=\"border-2 border-gray-300 mt-1\" />\n <div className=\"h-screen lg:h- md:h-graphLG w-full relative\">\n <HumidChart humid={humid} />\n </div>\n </div>\n </div>\n </div>\n </Container>\n );\n};\n\nexport default Dashboard;\n"
},
{
"alpha_fraction": 0.522719144821167,
"alphanum_fraction": 0.5409660339355469,
"avg_line_length": 31.882352828979492,
"blob_id": "534dc1ce06d1465ea4bdceb8185ca5458a67c9b6",
"content_id": "ed3d3971d55e5c462d59bac5ab14fe370bb017cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2795,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 85,
"path": "/client/src/components/layout/navigation/index.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport * as ROUTES from \"utils/routes\";\nimport {\n BrowserRouter as Router,\n Switch,\n Route,\n NavLink,\n} from \"react-router-dom\";\n\n/* Views */\nimport Dashboard from \"components/views/dashboard\";\nimport Weather from \"components/views/weather\";\nimport PlantSearch from \"components/views/plants/search\";\nimport PlantDetails from \"components/views/plants/details\";\n\n/* Icons */\nimport HomeIcon from \"icons/HomeIcon\";\nimport WeatherIcon from \"icons/WeatherIcon\";\nimport PlantIcon from \"icons/PlantIcon\";\n\nconst Navigation = () => {\n return (\n <Router>\n <div className=\"flex flex-col-reverse h-full w-full bg-gray-200 lg:flex-row \">\n <div className=\"fixed lg:relative w-full bg-gray-100 rounded-t shadow-2xl-top lg:p-4 lg:shadow-lg lg:w-24 lg:h-full\">\n <p className=\"hidden lg:block font-black text-gray-800 text-4xl text-center mb-4\">\n GH\n </p>\n <ul className=\"grid grid-cols-3 lg:grid-cols-none lg:grid-rows-3 lg:gap-2\">\n <li className=\"text-sm\">\n <NavLink\n exact\n to={ROUTES.DASHBOARD}\n className=\"flex flex-col items-center justify-center text-gray-500 py-2 hover:text-gray-800\"\n activeClassName=\"text-gray-800\"\n >\n <HomeIcon />\n <span className=\"mt-1 lg:text-xs\">Dashboard</span>\n </NavLink>\n </li>\n <li className=\"text-sm\">\n <NavLink\n exact\n to={ROUTES.WEATHER}\n className=\"flex flex-col items-center justify-center text-gray-500 py-2 hover:text-gray-800\"\n activeClassName=\"text-gray-800\"\n >\n <WeatherIcon />\n <span className=\"mt-1 lg:text-xs\">Weather</span>\n </NavLink>\n </li>\n <li className=\"text-sm\">\n <NavLink\n exact\n to={ROUTES.PLANTS}\n className=\"flex flex-col items-center justify-center text-gray-500 py-2 hover:text-gray-800\"\n activeClassName=\"text-gray-800\"\n >\n <PlantIcon />\n <span className=\"mt-1 lg:text-xs\">Plants</span>\n </NavLink>\n </li>\n </ul>\n </div>\n\n <Switch>\n <Route exact path={ROUTES.DASHBOARD}>\n <Dashboard />\n </Route>\n <Route exact path={ROUTES.WEATHER}>\n <Weather />\n </Route>\n <Route exact path={ROUTES.PLANTS}>\n <PlantSearch />\n </Route>\n <Route exact path={ROUTES.PLANT_DETAILS}>\n <PlantDetails />\n </Route>\n </Switch>\n </div>\n </Router>\n );\n};\n\nexport default Navigation;\n"
},
{
"alpha_fraction": 0.6177914142608643,
"alphanum_fraction": 0.6257668733596802,
"avg_line_length": 28.10714340209961,
"blob_id": "ec0d1af94590c46d5ae9851dc6aba18e66f44181",
"content_id": "0bc3a4d052f15291276a28bab6c005d0c55975af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1630,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 56,
"path": "/client/src/components/views/weather/index.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React, { Fragment, useState, useEffect } from \"react\";\nimport axios from \"axios\";\n\n/* Layout components */\nimport Container from \"components/layout/Container\";\nimport Loading from \"components/layout/Loading\";\nimport Divider from \"components/layout/Divider\";\n\n/* Weather components */\nimport CurrentWeather from \"components/views/weather/CurrentWeather\";\nimport HourlyForecast from \"components/views/weather/HourlyForecast\";\nimport DailyForecast from \"components/views/weather/DailyForecast\";\n\nconst Plants = () => {\n const [weatherData, setWeatherData] = useState({});\n\n useEffect(() => {\n const fetchData = async () => {\n /* latitude and longitude for Amsterdam */\n const lat = \"52.3667\";\n const lon = \"4.8945\";\n const units = \"metric\";\n\n const response = await axios.get(\n `https://api.openweathermap.org/data/2.5/onecall?lat=${lat}&lon=${lon}&units=${units}&\n exclude=minutely&appid=${process.env.REACT_APP_WEATHER_API_KEY}`,\n { \"Content-Type\": \"application/json\" }\n );\n\n setWeatherData(response.data);\n };\n\n fetchData();\n }, []);\n\n return (\n <Container>\n {weatherData.current === undefined || weatherData.hourly === null ? (\n <div className=\"flex flex-col h-full\">\n <Loading />\n </div>\n ) : (\n <Fragment>\n <CurrentWeather data={weatherData.current} />\n <Divider />\n <HourlyForecast data={weatherData.hourly} />\n <Divider />\n <DailyForecast data={weatherData.daily} />\n <Divider />\n </Fragment>\n )}\n </Container>\n );\n};\n\nexport default Plants;\n"
},
{
"alpha_fraction": 0.4744420349597931,
"alphanum_fraction": 0.4907607436180115,
"avg_line_length": 33.724998474121094,
"blob_id": "b8f2913d2442d5c07348651d994cf3fc4dbf9ccc",
"content_id": "fefd7758bccba7292b1bc8e6c4d3cad779d38fa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4167,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 120,
"path": "/client/src/components/views/plants/search/index.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React, { useState, useEffect, useCallback } from \"react\";\nimport { Link } from \"react-router-dom\";\nimport axios from \"axios\";\n\n/* Layout components */\nimport Container from \"components/layout/Container\";\nimport Loading from \"components/layout/Loading\";\n\nconst PlantSearch = () => {\n const [plantData, setPlantData] = useState([]);\n const [query, setQuery] = useState(\"aloe\");\n const [token, setToken] = useState(\"\");\n\n const fetchData = useCallback(async () => {\n const PROXY_URL = \"https://cors-anywhere.herokuapp.com/\";\n const response = await axios.get(\n `${PROXY_URL}https://trefle.io/api/v1/plants/search?q=${query}`,\n {\n headers: {\n Authorization: `Bearer ${token}`,\n \"Access-Control-Allow-Origin\": \"*\",\n \"Content-Type\": \"application/json\",\n },\n }\n );\n\n setPlantData(response.data.data);\n }, [query, token]);\n\n useEffect(() => {\n const fetchToken = async () => {\n const response = await axios.get(`http://localhost:8080/api/auth/`, {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Content-Type\": \"application/json\",\n });\n\n setToken(response.data.authToken);\n };\n\n fetchToken();\n fetchData();\n }, [token, fetchData]);\n\n const handleChange = (event) => {\n setQuery(event.target.value);\n };\n\n return (\n <div className=\"flex flex-col h-full w-full\">\n <div className=\"bg-gray-900 rounded-b-xl flex flex-col items-center justify-center h-24 mb-8 lg:mb-12\">\n <h1 className=\"font-bold text-3xl leading-none mt-8 lg:mt-12 text-white\">\n Explore\n </h1>\n <div className=\"flex mt-6 w-4/5\">\n <input\n className=\"appearance-none w-full shadow rounded-l py-2 lg:py-4 px-3 lg:px-9 lg:text-lg text-gray-800 leading-tight focus:outline-none\"\n type=\"text\"\n value={query}\n placeholder={query}\n onChange={handleChange}\n />\n <button\n className=\"bg-gray-700 hover:bg-gray-600 text-white rounded-r shadow px-3\"\n onClick={() => fetchData()}\n >\n Search\n </button>\n </div>\n </div>\n <Container>\n <div className=\"flex flex-col overflow-y-auto h-full lg:px-16\">\n {plantData.length === 0 ? (\n <Loading />\n ) : (\n <div className=\"grid gap-2 grid-cols-1 lg:grid-cols-2 xl:grid-cols-3 no-scroll\">\n {plantData.map((plant) => (\n <Link\n key={plant.id}\n to={`/plants/${plant.slug}`}\n className=\"last:mb-4\"\n >\n <div className=\"flex flex-row items-center justify-start bg-white shadow-md h-24 w-full p-2 border-2 border-white hover:border-gray-500\">\n <img\n className=\"h-20 w-20 object-cover bg-gray-200\"\n src={plant.image_url}\n alt=\"\"\n />\n <div className=\"flex flex-col items-start pl-4\">\n <p className=\"text-gray-800 md:text-lg font-medium text-lgbreak-words truncate\">\n {plant.scientific_name}\n </p>\n <p className=\"text-gray-600 text-sm\">\n <span className=\"hidden md:inline-block\">\n Commonly known as \n </span>\n <strong>\n {plant.common_name === null\n ? \"N/A\"\n : plant.common_name}\n </strong>\n </p>\n <p className=\"text-gray-600 text-sm\">\n <span className=\"hidden md:inline-block\">\n Is a species of the \n </span>\n <strong>{plant.family} family</strong>\n </p>\n </div>\n </div>\n </Link>\n ))}\n </div>\n )}\n </div>\n </Container>\n </div>\n );\n};\n\nexport default PlantSearch;\n"
},
{
"alpha_fraction": 0.62890625,
"alphanum_fraction": 0.64453125,
"avg_line_length": 17.285715103149414,
"blob_id": "570d206342f0751fc6769a8f9a58bd75989b76bc",
"content_id": "42079e457dc802798cf128cd0bd1ae7a9f97b898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 256,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 14,
"path": "/server/app.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "const express = require(\"express\");\nconst cors = require(\"cors\");\nconst app = express();\nconst authRoutes = require(\"./routes/auth\");\n\napp.use(\n cors({\n origin: \"http://localhost:3000\",\n })\n);\n\napp.use(\"/api/auth\", authRoutes);\n\nmodule.exports = app;\n"
},
{
"alpha_fraction": 0.4763062000274658,
"alphanum_fraction": 0.5249088406562805,
"avg_line_length": 25.54838752746582,
"blob_id": "e770989ef7767f836609b49d3784fa154370c935",
"content_id": "db09d90ee713af396f547663c3c458931d896fb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 823,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 31,
"path": "/client/src/components/views/dashboard/SoilLevel.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\n\nconst SoilLevel = ({ level }) => {\n return (\n <div className=\"flex flex-row items-center justify-between py-4\">\n <p\n className={`border-b-4 border-gray-200 text-lg font-bold uppercase text-red-700 ${\n level === \"dry\" ? \"border-red-700\" : \"text-gray-800\"\n }`}\n >\n Dry\n </p>\n <p\n className={`border-b-4 border-gray-200 text-lg font-bold uppercase text-orange-600 ${\n level === \"moist\" ? \"border-orange-600\" : \"text-gray-800\"\n }`}\n >\n Moist\n </p>\n <p\n className={`border-b-4 border-gray-200 text-lg font-bold uppercase text-green-700 ${\n level === \"wet\" ? \"border-green-700\" : \"text-gray-800\"\n }`}\n >\n Wet\n </p>\n </div>\n );\n};\n\nexport default SoilLevel;\n"
},
{
"alpha_fraction": 0.5560040473937988,
"alphanum_fraction": 0.5832492709159851,
"avg_line_length": 27.314285278320312,
"blob_id": "517e8f64bc6dec455f5593c19727e0a61cdcfc40",
"content_id": "dee707d575ace5e6cfe258ca6e91f8c7d1042d20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 35,
"path": "/client/src/components/views/weather/HourlyForecast.js",
"repo_name": "KahKitZheng/greenhouse",
"src_encoding": "UTF-8",
"text": "import React from \"react\";\nimport iconPicker from \"utils/iconPicker\";\n\nconst convertTimestamp = (timestamp) => {\n const dateObject = new Date(timestamp * 1000);\n const humanDateFormat = dateObject.toLocaleString(\"nl-NL\", {\n hour: \"2-digit\",\n minute: \"2-digit\",\n });\n\n return humanDateFormat;\n};\n\nconst HourlyForecast = ({ data }) => {\n return (\n <div className=\"grid grid-flow-col overflow-x-auto no-scroll lg:mx-6\">\n {data.slice(0, 24).map((hour) => (\n <div\n key={hour.dt}\n className=\"w-12 mr-6 lg:mr-10 last:mr-0 font-semibold flex flex-col items-center justify-center\"\n >\n <p className=\"text-md text-gray-600\">{convertTimestamp(hour.dt)}</p>\n <img\n className=\"h-16 w-16\"\n src={iconPicker(hour.weather[0].icon)}\n alt=\"\"\n />\n <p className=\"text-sm text-gray-600\">{Math.round(hour.temp)} °C</p>\n </div>\n ))}\n </div>\n );\n};\n\nexport default HourlyForecast;\n"
}
] | 18 |
goddess5321/photo-trio | https://github.com/goddess5321/photo-trio | f0931fcf1b7ff0741c71d4e4c98cfc6d833e2733 | 4d4a328a109f9c407df89890948ee0696c5d9fdc | a00933606802e5a1a1f3b1712a97d8e046a0f4cb | refs/heads/master | 2021-09-19T18:33:38.402666 | 2018-07-30T13:09:27 | 2018-07-30T13:09:27 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5646802186965942,
"alphanum_fraction": 0.5821220874786377,
"avg_line_length": 21.557376861572266,
"blob_id": "b26a16b764400ac92fe321da2ba7a954e58c0b40",
"content_id": "25416e7de61522ec4feda88ec4e8d7d33cf6b50f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2998,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 122,
"path": "/core.py",
"repo_name": "goddess5321/photo-trio",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport hashlib\nimport logging\n\nimport trio\nimport asks\n\nasks.init(\"trio\")\n\n\nURLS_DATA = \"data.txt\"\nPICS_FILENAME_LENGTH = 16\nPICS_EXT = \".jpg\"\nPICS_DIR = \"pics\"\n# 每次请求延迟(秒)\nDELAY_TIME = 0.35\n# 最大并发数,尽量不要设置得过大\nMAX_CONCURRENCY = 64\n# 最多重试次数\nMAX_RETRY = 5\n# 队列容量\nMAX_QSIZE = 180000\n# 日志等级\nLOG_LEVEL = logging.INFO\nUSER_AGENT = (\n \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36\"\n)\n\nQ = trio.Queue(MAX_QSIZE)\n\n\ndef get_logger():\n \"\"\"\n 获取 logger 实例\n \"\"\"\n formatter = logging.Formatter(\"%(asctime)s - %(message)s\")\n logger = logging.getLogger(\"monitor\")\n logger.setLevel(LOG_LEVEL)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n return logger\n\n\nLOGGER = get_logger()\n\n\ndef get_headers(url):\n \"\"\"\n 根据对应 url 返回 headers\n \"\"\"\n if url.startswith(\"http://i.meizitu.net/\"):\n return {\"User-Agent\": USER_AGENT, \"Referer\": \"http://www.mzitu.com\"}\n if url.startswith(\"http://img.mmjpg.com/\"):\n return {\"User-Agent\": USER_AGENT, \"Referer\": \"http://www.mmjpg.com\"}\n\n\ndef prepare():\n \"\"\"\n 准备工作\n \"\"\"\n # 如果文件夹不存在,则创建文件夹\n if not os.path.exists(PICS_DIR):\n os.mkdir(PICS_DIR)\n\n # 获取所有 url\n with open(URLS_DATA, \"r\", encoding=\"utf8\") as f:\n for u in f.readlines():\n Q.put_nowait((u.strip(), MAX_RETRY))\n\n\nasync def download(sem, url):\n \"\"\"\n 异步获取请求数据\n\n :param sem: Semaphore 实例\n :param url: 请求链接\n \"\"\"\n # 实际 url 以及 重试次数\n url, rt = url\n try:\n # 使用 hash 构建文件名,唯一对应 hash 值\n file_name = hashlib.sha224(url.encode(\"utf8\")).hexdigest()[\n :PICS_FILENAME_LENGTH\n ] + PICS_EXT\n file_path = os.path.join(PICS_DIR, file_name)\n if os.path.exists(file_path):\n LOGGER.info(\"Ignore: {} has existed\".format(file_path))\n return\n await trio.sleep(DELAY_TIME)\n async with sem:\n resp = await asks.get(url, headers=get_headers(url))\n img = resp.content\n async with await trio.open_file(file_path, mode=\"ab\") as f:\n await f.write(img)\n LOGGER.info(\"Save: {}\".format(file_path))\n except Exception as e:\n if rt > 0:\n await Q.put((url, rt - 1))\n LOGGER.error(\"Url: {} download failed\".format(url))\n LOGGER.error(e)\n\n\nasync def run():\n \"\"\"\n 运行主函数\n \"\"\"\n # 创建可复用 Semaphore 实例,减少开销\n sem = trio.Semaphore(MAX_CONCURRENCY)\n async with trio.open_nursery() as nursery:\n while not Q.empty():\n nursery.start_soon(download, sem, await Q.get())\n\n\nif __name__ == \"__main__\":\n prepare()\n trio.run(run)\n"
},
{
"alpha_fraction": 0.6727089881896973,
"alphanum_fraction": 0.7220543622970581,
"avg_line_length": 25.13157844543457,
"blob_id": "44e4d53732db0fce1ca55072534f0cb4cd1cf552",
"content_id": "72d1d665459fd78e83efa08fdd78fb7cd4f5e78f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1255,
"license_type": "permissive",
"max_line_length": 161,
"num_lines": 38,
"path": "/README.md",
"repo_name": "goddess5321/photo-trio",
"src_encoding": "UTF-8",
"text": "# 美女写真图爬虫 trio 版\n\n*其他版本*\n\n* [photo-asyncio 版](https://github.com/chenjiandongx/photo-asyncio)\n* [photo-go 版](https://github.com/chenjiandongx/photo-go)\n* [photo-gevent 版](https://github.com/chenjiandongx/photo-gevent)\n\n### trio/asks\n> 忘了 requests 吧,asks 就足以满足你。 -- 沃·兹基硕德\n\n[Trio](https://github.com/python-trio/trio) 是一个基于 asyncio 的封装库,旨在使异步代码更容易编写,而 [asks](https://github.com/theelous3/asks) 则是 Trio 界的 requests,目前来看除了不支持代理其他都好像很 ok。\n\n\n### 如何运行\n\n#### 图片数据\n图片地址数据保存在了 `data.txt`,共 17w+ 张照片,图片的数据是我从 [mmjpg](https://github.com/chenjiandongx/mmjpg) 和 [mzitu](https://github.com/chenjiandongx/mzitu) 里提取出来的。\n```bash\n$ wc -l data.txt\n178075 data.txt\n```\n\n#### 运行代码\n```bash\n$ git clone https://github.com/chenjiandongx/photo-trio.git\n$ cd photo-trio\n$ pip install -r requirements.txt # 安装依赖\n$ python core.py\n```\n\n#### 运行效果\n\n\n\n## License\n\nMIT [©chenjiandongx](https://github.com/chenjiandongx)\n"
}
] | 2 |
Naxalov/tinydb_backend21 | https://github.com/Naxalov/tinydb_backend21 | 926f06507e5e928862ef636876006f0cbd399c54 | 2ecba5ad3734a3ce8d7e53eb9e42448947594b3f | f6aa53d8993655a62f6b95c03d26c9fdbb9b4a56 | refs/heads/master | 2023-03-05T19:22:43.431634 | 2021-02-17T13:37:06 | 2021-02-17T13:37:06 | 338,786,286 | 0 | 5 | null | null | null | null | null | [
{
"alpha_fraction": 0.6224489808082581,
"alphanum_fraction": 0.6360543966293335,
"avg_line_length": 16.352941513061523,
"blob_id": "666a6f33199968ccfef716bd95b3b941473a2eb0",
"content_id": "bb02710c3e96bbd051893a20993fc741566f7e8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 17,
"path": "/read_db.py",
"repo_name": "Naxalov/tinydb_backend21",
"src_encoding": "UTF-8",
"text": "from tinydb import TinyDB,Query\ndb = TinyDB('db.json')\n# Document\n# db.truncate()\n\nq = Query()\ndb.remove(q.user_id==1)\ndb.remove(q.user_id==2)\ndb.remove(q.user_id==8)\n\ndata = db.all()\n# user = db.search(q.user_id>2)\n# print(user)\n\nfor i in data:\n print(i['user_id'])\n print(i['username'])"
},
{
"alpha_fraction": 0.6272493600845337,
"alphanum_fraction": 0.6786632537841797,
"avg_line_length": 21.882352828979492,
"blob_id": "bc84d8344562e5d24c0903c996e27059f3aa4deb",
"content_id": "eada966cc09fd1241a17091b97129222156ae362",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 389,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 17,
"path": "/main.py",
"repo_name": "Naxalov/tinydb_backend21",
"src_encoding": "UTF-8",
"text": "from tinydb import TinyDB\ndb = TinyDB('db.json')\n# Document\n\n# db.truncate()\nuser1 = {'user_id':1,'username':'username1'}\nuser2 = {'user_id':2,'username':'username2'}\nuser3 = {'user_id':3,'username':'username3'}\nuser4 = {'user_id':4,'username':'username4'}\nuser5 = {'user_id':5,'username':'username5'}\n\n\ndb.insert(user1)\ndb.insert(user2)\ndb.insert(user3)\ndb.insert(user4)\ndb.insert(user5)\n"
},
{
"alpha_fraction": 0.5263158082962036,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 9,
"blob_id": "b6520c830694aa8255bc9bba2419c6da89405ef9",
"content_id": "890459d51ac86e2503ddb828c1ebf8ce13103b78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 19,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 2,
"path": "/readme.md",
"repo_name": "Naxalov/tinydb_backend21",
"src_encoding": "UTF-8",
"text": "# Hello World\n# 123"
},
{
"alpha_fraction": 0.6036585569381714,
"alphanum_fraction": 0.6219512224197388,
"avg_line_length": 23.185184478759766,
"blob_id": "0c1284149c58c64d4925ff08cfdecdaf28138e1c",
"content_id": "71c14e5c8edbd6519b1f5fc8854a21c798bb0374",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 656,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 27,
"path": "/smartphone/main.py",
"repo_name": "Naxalov/tinydb_backend21",
"src_encoding": "UTF-8",
"text": "from tinydb import TinyDB\n\ndef insert_db(data_csv:str)->list:\n products = data_csv.split('\\n')\n # print(products)\n collection = []\n doc_key = products[0].split(',')[1:]\n for row in products[1:]:\n doc_ = {}\n for i,x in enumerate(row.split(',')[1:]):\n doc_[doc_key[i]] = x\n collection.append(doc_)\n\n\n return collection\n\ndb = TinyDB('db.json')\n\ntable1 = db.table('products')\ntable2 = db.table('specifications')\n\ndata1 = open('products.csv').read()\ndata2 = open('specifications.csv').read()\n# print(products)\ndb.truncate()\ntable1.insert_multiple( insert_db(data1))\ntable2.insert_multiple( insert_db(data2))\n\n\n\n"
}
] | 4 |
Proca-Consul/rm_godel_numberings | https://github.com/Proca-Consul/rm_godel_numberings | a177a18e1e47bba7942f25b3feacbb9e7794b834 | 743c17df9a0c141f33799c2b6e46241d6e3be2ae | ef2ea4c1283b2d7fec7ca6979120fe039069843d | refs/heads/master | 2021-01-23T03:12:48.232645 | 2017-03-24T10:51:17 | 2017-03-24T10:51:17 | 86,057,853 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7186978459358215,
"alphanum_fraction": 0.7429048418998718,
"avg_line_length": 45,
"blob_id": "7d6c1bfd7daefad7cd869c07566b74ae5a7db666",
"content_id": "426adb11a9a05057b12d71a296f7e8e8e26eb47e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1198,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 26,
"path": "/README.md",
"repo_name": "Proca-Consul/rm_godel_numberings",
"src_encoding": "UTF-8",
"text": "\n# [Register Machines] Godel's Numberings program decoder\n\nRegister Machines came in the mid '50s as an alternative model of computation to Turing Machines.\nThey operate on natural numbers, stored in (finitely many) registers R0 ... Rn.\nA program takes the form of a finite list of instructions `<label>: <body>` where each body can be one of the following:\n1. Rn+ => Li: add 1 to the content of Rn and jump to instruction Li;\n2. Rn- => Li, Lj: if the content of Rn is > 0, subtract 1 and jump to Li, else jump to Lj (branch instruction);\n3. HALT: stop the execution.\n \nVia Godel's numberings, it is possible to encode each instruction in a number and the so-obtained list of numbers\nin a single one, representing the given program.\n\nThis script is meant to be a support tool for the Register Machines section in the Models Of Computation module \n@Imperial College London, Computing.\nSimply run `python decode.py <program encoding>` to obtain the full program expansion.\n\n## Example\n\nThe natural number 2<sup>46</sup> x 20483 can represent the encoding of a program.\nCalling `decode.py` on the encoding gives back the following:\n```\nL0: R0- => L2, L1\nL1: HALT\nL2: R0- => L0, L1\nL3: R0+ => L0\n```\n\n"
},
{
"alpha_fraction": 0.35061728954315186,
"alphanum_fraction": 0.37654322385787964,
"avg_line_length": 21.80281639099121,
"blob_id": "ca14933681cd5bee1a38275723c1d87bb81f522e",
"content_id": "3d9c12ea8fd9688bcf2dc7135ed7c8f32b9ede3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1620,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 71,
"path": "/decode.py",
"repo_name": "Proca-Consul/rm_godel_numberings",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport math\n\n# The entire encoding is based\n# on Godel's numberings:\n#\n# | <<x, y>> = 2^x (2y + 1) \n# forall x, y natural = |\n# | <x, y> = 2^x (2y + 1) - 1\n#\n###################################################\n\n# Decode prog(e) as list:\n# \n# | [] = 0\n# list = | \n# | (x : rest) = <<x, rest>>\n# \ndef decode_list(program):\n if program == 0:\n return []\n \n body_x = 0\n while (program % 2 == 0):\n program = program / 2\n body_x = body_x + 1\n \n rest = (program - 1) / 2\n \n prog_list = []\n prog_list.append(body_x)\n return prog_list + decode_list(rest)\n\n# Decode body(x):\n# \n# | [Ri+ -> Lj] = <<2i, j>>\n# body(x) = | [Ri- -> Lj, Lk] = <<2i + 1,<j, k>>>\n# | [HALT] = 0\n# \ndef decode_body(ind, body_x):\n if body_x == 0:\n body_enc = 'L' + str(ind) + ': HALT'\n return body_enc \n y = 0\n while (body_x % 2 == 0):\n body_x = body_x / 2\n y = y + 1\n \n z = (body_x - 1) / 2\n if (y % 2 == 0):\n body_enc = 'L' + str(ind) + ': R' + str(y / 2) + '+ => L' + str(z)\n return body_enc \n else:\n i = (y - 1) / 2\n z = z + 1\n\n j = 0\n while (z % 2 == 0):\n z = z / 2\n j = j + 1\n\n k = (z - 1) / 2\n body_enc = 'L' + str(ind) + ': R' + str(i) \n body_enc = body_enc + '- => L' + str(j) + ', L' + str(k)\n return body_enc\n\n\nlist_ = decode_list(int (sys.argv[1]))\nfor i in range(len(list_)):\n print decode_body(i, list_[i])\n\n"
}
] | 2 |
zackhy/statistical-machine-learning | https://github.com/zackhy/statistical-machine-learning | 7a15aedae610ca5fa703e07dab7261a143772a21 | f8dcd75138a6e77a483814e6c6b084073e03830f | b388a22de7f051742ad74171f8a75167b298bd48 | refs/heads/master | 2020-03-19T19:08:03.213737 | 2018-11-06T04:53:22 | 2018-11-06T04:53:22 | 136,840,985 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5197568535804749,
"alphanum_fraction": 0.5501520037651062,
"avg_line_length": 19.5625,
"blob_id": "cf0f9ce2a001a018190deb04ee5598ec9901fb2b",
"content_id": "3075699139f46773c48b3f1e73060f567fbbd15e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 16,
"path": "/models/helper.py",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\n\n\ndef sigmoid(n):\n return 1 / (1 + np.exp(-n))\n\n\ndef log_loss(probs, y_true):\n probs = np.array(probs)\n y_true = np.array(y_true)\n\n term_1 = np.dot(y_true, np.log(probs))\n term_2 = np.dot(1 - y_true, np.log(1 - probs))\n\n return - (1 / len(y_true)) * (term_1 + term_2)\n"
},
{
"alpha_fraction": 0.6755994558334351,
"alphanum_fraction": 0.6967560052871704,
"avg_line_length": 28.54166603088379,
"blob_id": "22b707251bb8554bca607f089d1bd0e6ee7d9417",
"content_id": "b8f7fa71b898bb36c03452fb3b93907635c806d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 709,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 24,
"path": "/test.py",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom models.naive_bayes import BernoulliNB as MyBNB\nfrom models.naive_bayes import MultinomialNB as MyMNB\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\n\nX = np.random.randint(5, size=(6, 100))\ny = np.array([1, 2, 3, 4, 4, 5])\n\nmy_clf = MyBNB().fit(X, y)\nmy_score = my_clf.score(X, y)\n\nclf = BernoulliNB().fit(X, y)\nscore = clf.score(X, y)\n\nprint('My Bernoulli score: {:.3f}\\nSklearn Bernoulli score: {:.3f}'.format(my_score, score))\n\nmy_clf = MyMNB().fit(X, y)\nmy_score = my_clf.score(X, y)\n\nclf = MultinomialNB().fit(X, y)\nscore = clf.score(X, y)\n\nprint('My Multinomial score: {:.3f}\\nSklearn Multinomial score: {:.3f}'.format(my_score, score))\n"
},
{
"alpha_fraction": 0.5897436141967773,
"alphanum_fraction": 0.5960413813591003,
"avg_line_length": 36.6779670715332,
"blob_id": "07a48ee71b7b6ff731027bb599d6fce0731ca5c1",
"content_id": "36276cd09a2dd85e35af847d14a37f301cc95bb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4446,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 118,
"path": "/models/naive_bayes.py",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom models.base import Base\nfrom models.utils import input_check, target_check, fitted_check\n\n\nclass BernoulliNB(Base):\n \"\"\"Bernoulli Naive Bayes Classifier that implements the fit(X, y) and predict(T) methods\"\"\"\n\n def fit(self, X, y):\n \"\"\"\n Fit the Bernoulli Naive Bayes Classifier with input data X and target y\n :param X: Input data. An array-like object. Shape = (n_samples, n_features)\n :param y: Target. An array-like object. Shape = (n_samples, )\n :return: The fitted Bernoulli Naive Bayes Classifier\n \"\"\"\n X = input_check(X)\n y = target_check(y)\n if np.min(X) < 0:\n raise ValueError('Input features should be greater than or equal to 0')\n\n # Convert the features to binary\n if np.max(X) > 1:\n X[X > 1] = 1\n\n self.uniq_classes_, num_docs = np.unique(y, return_counts=True)\n self.num_features_ = X.shape[1]\n\n # Compute prior probability for each class\n self.prior_prob_ = np.array([n / len(y) for n in num_docs])\n\n # Compute document frequencies for each term given a class\n doc_freq = np.vstack([(np.sum(X[y==c, :], axis=0)) for c in self.uniq_classes_])\n\n # Compute conditional probability for each term given a class.\n self.cond_prob_ = (doc_freq + 1) / (num_docs.reshape(-1, 1) + 2)\n\n self.fitted = True\n\n return self\n\n @fitted_check\n def predict(self, X):\n \"\"\"\n Use the fitted Bernoulli Naive Bayes Classifier to make predictions\n :param X: Input data. An array-like object. Shape = (n_samples, n_features)\n :return: Predictions. A 1d numpy array. Shape = (n_samples, )\n \"\"\"\n X = input_check(X)\n if X.shape[1] != self.num_features_:\n raise ValueError('Input X should have a shape of (,{})'.format(self.num_features_))\n\n preds = []\n for t in X:\n # Compute posterior probability\n post_prob = np.log(self.prior_prob_)\n likelihood = np.log(np.power(self.cond_prob_, t)) + np.log(np.power((1-self.cond_prob_), (1-t)))\n post_prob += np.sum(likelihood, axis=1)\n preds.append(np.argmax(post_prob))\n\n return np.array(self.uniq_classes_[preds])\n\n\nclass MultinomialNB(Base):\n \"\"\"Multinomial Naive Bayes Classifier that implements the fit(X, y) and predict(T) methods\"\"\"\n\n def fit(self, X, y):\n \"\"\"\n Fit the Multinomial Naive Bayes Classifier with input data X and target y\n :param X: Input data. An array-like object. Shape = (n_samples, n_features)\n :param y: Target. An array-like object. Shape = (n_samples, )\n :return: The fitted Multinomial Naive Bayes Classifier\n \"\"\"\n X = input_check(X)\n y = target_check(y)\n if np.min(X) < 0:\n raise ValueError('Input features should be greater than or equal to 0')\n\n self.unique_classes_, num_docs = np.unique(y, return_counts=True)\n self.num_features_ = X.shape[1]\n\n # Compute the prior probability\n self.prior_prob_ = np.array([(n / len(y)) for n in num_docs])\n\n # Compute the term frequencies for each term given a class\n term_freq = np.vstack([np.sum(X[y == c, :], axis=0) for c in self.unique_classes_])\n # Add one to avoid zero\n term_freq = term_freq + 1\n\n # Compute the total term frequencies for each class\n tot_freq = np.sum(term_freq, axis=1)\n\n # Compute the conditional probability\n self.cond_prob_ = term_freq / tot_freq.reshape(-1, 1)\n\n self.fitted = True\n\n return self\n\n @fitted_check\n def predict(self, X):\n \"\"\"\n Use the fitted Multinomial Naive Bayes Classifier to make predictions\n :param X: Input data. An array-like object. Shape = (n_samples, n_features)\n :return: Predictions. A 1d numpy array. Shape = (n_samples, )\n \"\"\"\n X = input_check(X)\n if X.shape[1] != self.num_features_:\n raise ValueError('Input X should have a shape of (?,{})'.format(self.num_features_))\n\n preds = []\n for t in X:\n # Compute posterior probability\n post_prob = np.log(self.prior_prob_)\n post_prob += np.sum(np.log(np.power(self.cond_prob_, t)), axis=1)\n preds.append(np.argmax(post_prob))\n\n return np.array(self.unique_classes_[preds])\n"
},
{
"alpha_fraction": 0.5786435604095459,
"alphanum_fraction": 0.5800865888595581,
"avg_line_length": 25.653846740722656,
"blob_id": "cfbf95b2a208607e4da12532e9d4d3555bb7e0d6",
"content_id": "be2dfb8e847dd0dea8c504729f4cd3db50ec1bd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 26,
"path": "/models/base.py",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom models.utils import input_check, target_check, fitted_check\n\n\nclass Base(object):\n def __int__(self):\n pass\n\n def fit(self, X, y):\n return NotImplementedError\n\n def predict(self, T):\n return NotImplementedError\n\n @fitted_check\n def score(self, X, y_true):\n \"\"\"\n :param X: Input data. An array-like object. Shape = (n_samples, n_features)\n :param y_true: Target. An array-like object. Shape = (n_samples, )\n :return: Accuracy score.\n \"\"\"\n X = input_check(X)\n y_true = target_check(y_true)\n preds = self.predict(X)\n return (preds == y_true).mean()\n"
},
{
"alpha_fraction": 0.5487148761749268,
"alphanum_fraction": 0.5517035126686096,
"avg_line_length": 26.42622947692871,
"blob_id": "3068011afbe43c3d87f042bd3baa0a21dfae747c",
"content_id": "af97098228f0ea3176b7a217f490fe6dcf229ad1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1673,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 61,
"path": "/models/logistic_regression.py",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom models.base import Base\nfrom models.helper import sigmoid, log_loss\nfrom models.utils import input_check, target_check, fitted_check\n\n\nclass LogisticRegression(Base):\n \"\"\"Implement a simple logistic regression\"\"\"\n def __init__(self, learning_rate, max_iter):\n self.learning_rate = learning_rate\n self.max_iter = max_iter\n\n def fit(self, X, y):\n \"\"\"Fit the model using stochastic gradient descent\"\"\"\n X = input_check(X)\n y = target_check(y)\n\n n_features = X.shape[1]\n\n # Initialize weights\n coef = np.zeros(n_features)\n intercept = 0\n loss = log_loss(sigmoid(np.matmul(X, coef) + intercept), y)\n\n # Stochastic gradient descent\n while self.max_iter > 0:\n for x, y_true in zip(X, y):\n # Calculate prediction\n z = np.dot(x, coef) + intercept\n y_pred = sigmoid(z)\n\n error = y_pred - y_true\n\n # Calculate gradient\n gradient = x * error\n\n # Update weights\n coef = coef - self.learning_rate * gradient\n intercept = intercept - self.learning_rate * error\n\n self.max_iter -= 1\n\n loss = log_loss(sigmoid(np.matmul(X, coef) + intercept), y)\n\n self.coef_ = coef\n self.intercept_ = intercept\n self.log_loss_ = loss\n\n self.fitted = True\n\n return self\n\n @fitted_check\n def predict(self, X):\n X = input_check(X)\n z = np.matmul(X, self.coef_) + self.intercept_\n\n y_pred = sigmoid(z)\n\n return np.round(y_pred)\n"
},
{
"alpha_fraction": 0.5912208557128906,
"alphanum_fraction": 0.5980795621871948,
"avg_line_length": 23.299999237060547,
"blob_id": "fd1fef482cd3d947c8899451d26614aea922ce36",
"content_id": "7db33f29ccdaead31ff83594247e3a3e21c1d945",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 729,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 30,
"path": "/models/utils.py",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\n\n\ndef fitted_check(func):\n\n def wrapper(self, *args, **kw):\n if not hasattr(self, 'fitted'):\n raise AttributeError(\"This model instance is not fitted yet. Call 'fit' first.\")\n return func(self, *args, **kw)\n\n return wrapper\n\n\ndef input_check(X):\n if not isinstance(X, np.ndarray):\n X = np.array(X)\n if X.ndim != 2:\n raise ValueError('Input X should be a 2d array-like object. Shape = (n_samples, n_features)')\n\n return X\n\n\ndef target_check(y):\n if not isinstance(y, np.ndarray):\n y = np.array(y)\n if y.ndim != 1:\n raise ValueError('Input y should be a 1d array-like object. Shape = (n_samples, )')\n\n return y\n"
},
{
"alpha_fraction": 0.6096361875534058,
"alphanum_fraction": 0.6135693192481995,
"avg_line_length": 28.91176414489746,
"blob_id": "361ff77d98e3abeb47dce9abc44995e14f21f681",
"content_id": "d4da4e68802cd2176fabe041f1b6b3421e2a55a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1017,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 34,
"path": "/models/metrics.py",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport numpy as np\n\ndef confusion_matrix(y_true, y_pred):\n \"\"\"\n :param y_true: True targets. An array-like object. Shape = (n_samples, )\n :param y_pred: Predicted values. An array-like object. Shape = (n_samples, )\n :return: Consufison matrix.\n \"\"\"\n y_true = np.array(y_true)\n y_pred = np.array(y_pred)\n\n if y_true.shape != y_pred.shape:\n raise ValueError('y_true and y_pred must have the same shape.')\n\n labels = np.unique(y_true)\n pred_labels = np.unique(y_pred)\n\n if not np.isin(pred_labels, labels).all():\n raise ValueError('All the labels in y_pred must be in y_true')\n\n label_to_index = dict((l, i) for i, l in enumerate(labels))\n\n # Convert labels to index\n y_true = [label_to_index.get(l) for l in y_true]\n y_pred = [label_to_index.get(l) for l in y_pred]\n\n # Confustion matrix\n cm = np.zeros((len(labels), len(labels)), dtype=np.int32)\n\n for row, col in zip(y_true, y_pred):\n cm[row][col] += 1\n\n return cm\n"
},
{
"alpha_fraction": 0.7537688612937927,
"alphanum_fraction": 0.7688442468643188,
"avg_line_length": 16.909090042114258,
"blob_id": "e8dae0e78bb05de453b45e6d059dff62f941dd02",
"content_id": "9db3ce7bc281a082c199cbfc487d37a98b96c008",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 199,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 11,
"path": "/README.md",
"repo_name": "zackhy/statistical-machine-learning",
"src_encoding": "UTF-8",
"text": "# statistical-machine-learning\n\nRealize some machine learning algorithms from scratch in Python. \n\nImplemented: \n\n1. Bernoulli Naive Bayes \n\n2. Multinomial Naive Bayes \n\n3. Logistic Regression \n"
}
] | 8 |
touretzkyds/cozmo-tools | https://github.com/touretzkyds/cozmo-tools | ccdce25c93200aaa9e36a968ca26ad2366e68524 | 60e4574be171daa99e137d4383776123127b3e76 | 64690a72a2eb6ce485ccff5631bdcbc4e3afc933 | refs/heads/master | 2023-06-09T03:09:55.277088 | 2023-05-31T02:52:01 | 2023-05-31T02:52:01 | 73,160,495 | 92 | 47 | null | 2016-11-08T07:27:37 | 2022-01-11T02:37:31 | 2022-01-20T05:28:05 | Python | [
{
"alpha_fraction": 0.6311250925064087,
"alphanum_fraction": 0.6548042893409729,
"avg_line_length": 47.7066650390625,
"blob_id": "961a67e42a3e1b7281b0749a84205d7a87d12b09",
"content_id": "61e41644a85929c58ee4c84a718c59dbc8c2cf06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7306,
"license_type": "no_license",
"max_line_length": 141,
"num_lines": 150,
"path": "/cozmo_fsm/examples/Boo.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Peek-A-Boo game inspired by the Boo game of Pablo Barros.\n\n This version is coded using the cozmo_fsm package and illustrates\n features such as repetitive polling, nested state machines, and a\n completion transition that uses one completing source node to\n terminate a second source (MoveHead) that doesn't complete.\n\"\"\"\n\nfrom cozmo_fsm import *\n\nclass WaitForPlayer(StateMachineProgram):\n \"\"\"Wait for player's face to appear and remain visible for a little while.\"\"\"\n def start(self,event=None):\n self.set_polling_interval(0.2)\n self.faces_found = 0 # initialize before polling starts\n super().start(event)\n\n def poll(self):\n if self.robot.world.visible_face_count() == 0: return\n self.faces_found += 1\n if self.faces_found > 3:\n self.post_completion()\n\nclass WaitForHide(StateNode):\n \"\"\"Wait for player's face to disappear and remain not visible for a little while.\"\"\"\n def start(self,event=None):\n self.set_polling_interval(0.2)\n self.faces_not_found = 0 # initialize before polling starts\n super().start(event)\n\n def poll(self):\n if self.robot.world.visible_face_count() > 0: return\n self.faces_not_found += 1\n if self.faces_not_found > 2:\n self.post_completion()\n\nclass HeadAndLiftGesture(StateNode):\n \"\"\"Move head and lift simultaneously. Finish when head movement completes.\"\"\"\n def setup(self):\n \"\"\"\n launch: StateNode() =N=> {move_head, move_lift}\n \n move_head: SetHeadAngle(cozmo.robot.MAX_HEAD_ANGLE)\n move_lift: MoveLift(-3)\n \n {move_head, move_lift} =C(1)=> ParentCompletes()\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:10:20 2020:\n \n launch = StateNode() .set_name(\"launch\") .set_parent(self)\n move_head = SetHeadAngle(cozmo.robot.MAX_HEAD_ANGLE) .set_name(\"move_head\") .set_parent(self)\n move_lift = MoveLift(-3) .set_name(\"move_lift\") .set_parent(self)\n parentcompletes1 = ParentCompletes() .set_name(\"parentcompletes1\") .set_parent(self)\n \n nulltrans1 = NullTrans() .set_name(\"nulltrans1\")\n nulltrans1 .add_sources(launch) .add_destinations(move_head,move_lift)\n \n completiontrans1 = CompletionTrans(1) .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(move_head,move_lift) .add_destinations(parentcompletes1)\n \n return self\n\nclass Boo(StateNode):\n def setup(self):\n \"\"\"\n launch: Say(\"Let's play\")\n =C=> SetHeadAngle(30)\n =C=> player_appears\n \n player_appears: WaitForPlayer()\n =C=> AnimationNode('anim_freeplay_reacttoface_identified_01_head_angle_40')\n =C=> SetHeadAngle(cozmo.robot.MIN_HEAD_ANGLE)\n =C=> SetHeadAngle(cozmo.robot.MAX_HEAD_ANGLE)\n =C=> player_hides\n \n player_hides: WaitForHide()\n =C=> AnimationNode('anim_hiking_observe_01')\n =C=> HeadAndLiftGesture()\n =C=> player_reappears\n \n player_reappears: WaitForPlayer()\n =C=> AnimationNode('anim_freeplay_reacttoface_like_01')\n =C=> HeadAndLiftGesture()\n =C=> Say(\"play again\")\n =C=> SetHeadAngle(30)\n =C=> player_hides\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:10:20 2020:\n \n launch = Say(\"Let's play\") .set_name(\"launch\") .set_parent(self)\n setheadangle1 = SetHeadAngle(30) .set_name(\"setheadangle1\") .set_parent(self)\n player_appears = WaitForPlayer() .set_name(\"player_appears\") .set_parent(self)\n animationnode1 = AnimationNode('anim_freeplay_reacttoface_identified_01_head_angle_40') .set_name(\"animationnode1\") .set_parent(self)\n setheadangle2 = SetHeadAngle(cozmo.robot.MIN_HEAD_ANGLE) .set_name(\"setheadangle2\") .set_parent(self)\n setheadangle3 = SetHeadAngle(cozmo.robot.MAX_HEAD_ANGLE) .set_name(\"setheadangle3\") .set_parent(self)\n player_hides = WaitForHide() .set_name(\"player_hides\") .set_parent(self)\n animationnode2 = AnimationNode('anim_hiking_observe_01') .set_name(\"animationnode2\") .set_parent(self)\n headandliftgesture1 = HeadAndLiftGesture() .set_name(\"headandliftgesture1\") .set_parent(self)\n player_reappears = WaitForPlayer() .set_name(\"player_reappears\") .set_parent(self)\n animationnode3 = AnimationNode('anim_freeplay_reacttoface_like_01') .set_name(\"animationnode3\") .set_parent(self)\n headandliftgesture2 = HeadAndLiftGesture() .set_name(\"headandliftgesture2\") .set_parent(self)\n say1 = Say(\"play again\") .set_name(\"say1\") .set_parent(self)\n setheadangle4 = SetHeadAngle(30) .set_name(\"setheadangle4\") .set_parent(self)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(launch) .add_destinations(setheadangle1)\n \n completiontrans3 = CompletionTrans() .set_name(\"completiontrans3\")\n completiontrans3 .add_sources(setheadangle1) .add_destinations(player_appears)\n \n completiontrans4 = CompletionTrans() .set_name(\"completiontrans4\")\n completiontrans4 .add_sources(player_appears) .add_destinations(animationnode1)\n \n completiontrans5 = CompletionTrans() .set_name(\"completiontrans5\")\n completiontrans5 .add_sources(animationnode1) .add_destinations(setheadangle2)\n \n completiontrans6 = CompletionTrans() .set_name(\"completiontrans6\")\n completiontrans6 .add_sources(setheadangle2) .add_destinations(setheadangle3)\n \n completiontrans7 = CompletionTrans() .set_name(\"completiontrans7\")\n completiontrans7 .add_sources(setheadangle3) .add_destinations(player_hides)\n \n completiontrans8 = CompletionTrans() .set_name(\"completiontrans8\")\n completiontrans8 .add_sources(player_hides) .add_destinations(animationnode2)\n \n completiontrans9 = CompletionTrans() .set_name(\"completiontrans9\")\n completiontrans9 .add_sources(animationnode2) .add_destinations(headandliftgesture1)\n \n completiontrans10 = CompletionTrans() .set_name(\"completiontrans10\")\n completiontrans10 .add_sources(headandliftgesture1) .add_destinations(player_reappears)\n \n completiontrans11 = CompletionTrans() .set_name(\"completiontrans11\")\n completiontrans11 .add_sources(player_reappears) .add_destinations(animationnode3)\n \n completiontrans12 = CompletionTrans() .set_name(\"completiontrans12\")\n completiontrans12 .add_sources(animationnode3) .add_destinations(headandliftgesture2)\n \n completiontrans13 = CompletionTrans() .set_name(\"completiontrans13\")\n completiontrans13 .add_sources(headandliftgesture2) .add_destinations(say1)\n \n completiontrans14 = CompletionTrans() .set_name(\"completiontrans14\")\n completiontrans14 .add_sources(say1) .add_destinations(setheadangle4)\n \n completiontrans15 = CompletionTrans() .set_name(\"completiontrans15\")\n completiontrans15 .add_sources(setheadangle4) .add_destinations(player_hides)\n \n return self\n"
},
{
"alpha_fraction": 0.7742615938186646,
"alphanum_fraction": 0.7763713002204895,
"avg_line_length": 44.14285659790039,
"blob_id": "8d304e1f881fef7ee5879a1a9d7f53788093dd25",
"content_id": "28f9163c6db4259335e203fd2833182efaeeb5a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 948,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 21,
"path": "/README.md",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "# cozmo-tools\n\nSee the INSTALL.txt file for installation instructions.\n\nFor a radically different approach to Cozmo programming more suited to beginners, try Calypso at https://Calypso.software\n\n## Tools for programming Anki's Cozmo robot via the Python SDK.\n\n* __simple_cli__ provides a _Command Line Interface_ for the Cozmo SDK\nso you can evaluate expressions in the context of an active SDK connection\nto a robot. It also provides a variety of visualization tools, such as a\ncamera viewer, worldmap viewer, particle viewer, and path viewer.\nRun it by typing: `python3 simple_cli`\n\n* __cozmo_fsm__ is a Finite State Machine package for Cozmo programming.\n\n* __genfsm__ is a preprocessor that converts .fsm files written in\nthe cozmo_fsm notation to .py files that are ready to run.\n\n__Note__: you can install most of the Python dependencies by simply running `pip3 install -r requirements.txt`,\nbut see the INSTALL.txt file for some exceptions.\n"
},
{
"alpha_fraction": 0.7516778707504272,
"alphanum_fraction": 0.7986577153205872,
"avg_line_length": 48.66666793823242,
"blob_id": "81d8f58316f4fc4449f6a561f4eb793427e08ba6",
"content_id": "e38768e68334b04fa23504851b157899985ccf8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 3,
"path": "/aruco/sheets/aruco_4x4_100_44mm/README.txt",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "These markers are 44 mm square and are designed to be printed on\n2 inch square adhesive labels. Marker 00 appears after Marker 99\non the last sheet.\n"
},
{
"alpha_fraction": 0.561977744102478,
"alphanum_fraction": 0.5626741051673889,
"avg_line_length": 32.130767822265625,
"blob_id": "bac5cc92ca266e7c958484f9cb6c34d40a1a65bb",
"content_id": "9915884be5633ce52e616deb4ca44f2d292b2e6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4308,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 130,
"path": "/cozmo_fsm/events.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n The base Event class is imported from evbase.py.\n All other events are defined here.\n\"\"\"\n\nimport cozmo\n\nfrom .evbase import Event\n\nclass CompletionEvent(Event):\n \"\"\"Signals completion of a state node's action.\"\"\"\n pass\n\n\nclass SuccessEvent(Event):\n \"\"\"Signals success of a state node's action.\"\"\"\n def __init__(self,details=None):\n super().__init__()\n self.details = details\n\n\nclass FailureEvent(Event):\n \"\"\"Signals failure of a state node's action.\"\"\"\n def __init__(self,details=None):\n super().__init__()\n self.details = details\n\n def __repr__(self):\n if isinstance(self.details, cozmo.action.Action):\n reason = self.details.failure_reason[0]\n else:\n reason = self.details\n return '<%s for %s: %s>' % (self.__class__.__name__, self.source.name, reason)\n\n\nclass DataEvent(Event):\n \"\"\"Signals a data item broadcasted by the node.\"\"\"\n def __init__(self,data):\n super().__init__()\n self.data = data\n\n\nclass TextMsgEvent(Event):\n \"\"\"Signals a text message broadcasted to the state machine.\"\"\"\n def __init__(self,string,words=None,result=None):\n super().__init__()\n self.string = string\n self.words = words or string.split(None)\n self.result = result\n\nclass SpeechEvent(Event):\n \"\"\"Results of speech recognition process.\"\"\"\n def __init__(self,string,words=None,result=None):\n super().__init__()\n self.string = string\n self.words = words\n self.result = result\n\nclass PilotEvent(Event):\n \"\"\"Results of a pilot request.\"\"\"\n def __init__(self,status,**args):\n super().__init__()\n self.status = status\n self.args = args\n\n def __repr__(self):\n try:\n src_string = self.source.name\n except:\n src_string = repr(self.source)\n return '<%s %s from %s>' % (self.__class__.__name__, self.status.__name__, src_string)\n\n\n#________________ Cozmo-generated events ________________\n\nclass CozmoGeneratedEvent(Event):\n def __init__(self,source,params):\n super().__init__()\n self.source = source\n self.params = params\n # Note regarding generator(): we're going to curry this function\n # to supply EROUTER and EVENT_CLASS as the first two arguments.\n def generator(EROUTER, EVENT_CLASS, cozmo_event, obj=None, **kwargs):\n our_event = EVENT_CLASS(obj,kwargs)\n EROUTER.post(our_event)\n\nclass TapEvent(CozmoGeneratedEvent):\n cozmo_evt_type = cozmo.objects.EvtObjectTapped\n\nclass FaceEvent(CozmoGeneratedEvent):\n cozmo_evt_type = cozmo.faces.EvtFaceAppeared\n\nclass ObservedMotionEvent(CozmoGeneratedEvent):\n cozmo_evt_type = cozmo.camera.EvtRobotObservedMotion\n\n def __repr__(self):\n top = self.params['has_top_movement']\n left = self.params['has_left_movement']\n right = self.params['has_right_movement']\n movement = ''\n if top:\n pos = self.params['top_img_pos']\n movement = movement + ('' if (movement=='') else ' ') + \\\n ('top:(%d,%d)' % (pos.x,pos.y))\n if left:\n pos = self.params['left_img_pos']\n movement = movement + ('' if (movement=='') else ' ') + \\\n ('left:(%d,%d)' % (pos.x,pos.y))\n if right:\n pos = self.params['right_img_pos']\n movement = movement + ('' if (movement=='') else ' ') + \\\n ('right:(%d,%d)' % (pos.x,pos.y))\n if movement == '':\n pos = self.params['img_pos']\n movement = movement + ('' if (movement=='') else ' ') + \\\n ('broad:(%d,%d)' % (pos.x,pos.y))\n return '<%s %s>' % (self.__class__.__name__, movement)\n\n\nclass UnexpectedMovementEvent(CozmoGeneratedEvent):\n cozmo_evt_type = cozmo.robot.EvtUnexpectedMovement\n\n def __repr__(self):\n side = self.params['movement_side']\n # side.id == 0 means the movement_side is \"unknown\"\n # Occurs when reaction triggers are disabled (as is normally the case).\n side_string = ' '+side.name if side.id > 0 else ''\n return '<%s %s%s>' % (self.__class__.__name__,\n self.params['movement_type'].name,\n side_string)\n\n"
},
{
"alpha_fraction": 0.6531531810760498,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 41.935482025146484,
"blob_id": "b70c3bde63c9c187c67e6354e432cc0b368f1a2f",
"content_id": "a1327f219b85d3230f0ea2b615dcff4114be0dc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1332,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 31,
"path": "/cozmo_fsm/TODO.txt",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "Remove arucos from particle landmark lists if the aruco is part of a\nwall. No need to maintain separate mu/sigma info for wall arucos.\nShould still allow solo arucos to be used as landmarks if they are not\npart of a wall definition.\n\nThis will speed up the particle filter because we won't be calling\nprocess_landmark on all the wall arucos for every particle.\n\n**** Current bug: if we kidnap the robot so pf state is 'lost',\nmake_particles_from_landmarks is punting because it only looks for\nsolo aruco landmarks now; doesn't construct walls.\n\n\n================================================================\n\nBug: If we see markers 43 and 44 we create Wall-43. If we then pick\nup the robot so it delocalizes, and put it down so that it only sees\nmarker 45, it will not add the marker or re-localize, even though\nit should know where it is because marker 45 gives it its\nposition with respect to Wall-43.\n\n================================================================\n\nShould replace integer Aruco marker ids with 'Aruco-###' strings\nthroughout.\n\n================================================================\n\nIntersection of two line segments needed for path planner:\nhttps://stackoverflow.com/questions/563198/how-do-you-detect-where-two-line-segments-intersect\nhttp://www.cs.swan.ac.uk/~cssimon/line_intersection.html\n\n"
},
{
"alpha_fraction": 0.5840504765510559,
"alphanum_fraction": 0.6190476417541504,
"avg_line_length": 38.6136360168457,
"blob_id": "fa712b97e3a85fd42444868ba5c90371ba850a86",
"content_id": "e1ddc58b50be068267c108c9508c16f367837039",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1743,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 44,
"path": "/cozmo_fsm/examples/CV_GoodFeatures.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n CV_GoodFeatures demonstrates the Shi and Tomasi (1994) feature\n extractor built in to OpenCV.\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom cozmo_fsm import *\n\nclass CV_GoodFeatures(StateMachineProgram):\n def __init__(self):\n super().__init__(aruco=False, cam_viewer=False, annotate_sdk=False)\n\n def start(self):\n self.colors = np.random.randint(0,255,(101,3),dtype=np.int)\n dummy = numpy.array([[0]],dtype='uint8')\n super().start()\n\n cv2.namedWindow('features')\n cv2.imshow('features',dummy)\n cv2.createTrackbar('maxCorners','features',50,100,lambda self: None)\n cv2.createTrackbar('qualityLevel','features',10,1000,lambda self: None)\n cv2.createTrackbar('minDistance','features',5,50,lambda self: None)\n\n def user_image(self,image,gray):\n cv2.waitKey(1)\n maxCorners = max(1,cv2.getTrackbarPos('maxCorners','features'))\n quality = max(1,cv2.getTrackbarPos('qualityLevel','features'))\n cv2.setTrackbarPos('qualityLevel', 'features', quality) # don't allow zero\n minDist = max(1,cv2.getTrackbarPos('minDistance','features'))\n cv2.setTrackbarPos('minDistance', 'features', minDist) # don't allow zero\n qualityLevel = quality / 1000\n corners = cv2.goodFeaturesToTrack(gray, maxCorners, qualityLevel, minDist)\n (x,y,_) = image.shape\n image = cv2.resize(image,(y*2,x*2))\n i = 0\n for corner in corners:\n x,y = corner.ravel()\n x = int(x*2); y = int(y*2)\n color_index = (x+y) % self.colors.shape[0]\n color = self.colors[color_index].tolist()\n cv2.circle(image, (x, y), 4, color, -1)\n i += 1\n cv2.imshow('features',image)\n"
},
{
"alpha_fraction": 0.5742297172546387,
"alphanum_fraction": 0.597572386264801,
"avg_line_length": 33.54838562011719,
"blob_id": "3ca9a62eb24fdf32cbc5ca3b8be56c2bd82551e5",
"content_id": "d89dfdac5f2959d43a3685f9b06f7a768bc57534",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1071,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 31,
"path": "/cozmo_fsm/examples/Greet.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n The Greet demo illustrates the use of CompletionTrans and TimerTrans\n transitions.\n\n Behavior: Cozmo starts out by saying 'Greetings, human!'. After his\n speech has completed, he waits 5 seconds, then says 'Bye-bye now'.\n\"\"\"\n\nfrom cozmo_fsm import *\n\nclass Greet(StateMachineProgram):\n def setup(self):\n \"\"\"\n say: Say('Greetings, human!') =C=>\n wait: StateNode() =T(5)=>\n say2: Say('Bye-bye now.')\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:12:53 2020:\n \n say = Say('Greetings, human!') .set_name(\"say\") .set_parent(self)\n wait = StateNode() .set_name(\"wait\") .set_parent(self)\n say2 = Say('Bye-bye now.') .set_name(\"say2\") .set_parent(self)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(say) .add_destinations(wait)\n \n timertrans1 = TimerTrans(5) .set_name(\"timertrans1\")\n timertrans1 .add_sources(wait) .add_destinations(say2)\n \n return self\n"
},
{
"alpha_fraction": 0.46350669860839844,
"alphanum_fraction": 0.4836409389972687,
"avg_line_length": 35.121212005615234,
"blob_id": "3671a7801f9c09ba6ae2deff2d4898603f9ff6ce",
"content_id": "f0941787a6cdc9db99291bf9116326cdc1e4cc34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2384,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 66,
"path": "/cozmo_fsm/examples/CV_OpticalFlow.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n CV_OpticalFlow demonstrates the Lucas and Kanade optical flow\n algorithm built in to OpenCV.\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom cozmo_fsm import *\n\nclass CV_OpticalFlow(StateMachineProgram):\n def __init__(self):\n super().__init__(aruco=False, particle_filter=False, cam_viewer=False,\n annotate_sdk = False)\n\n def start(self):\n self.feature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n\n self.lk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS |\n cv2.TERM_CRITERIA_COUNT,\n 10, 0.03) )\n\n self.colors = np.random.randint(0, 255, (100,3), dtype=np.int)\n\n self.prev_gray = None\n self.good_new = None\n self.mask = None\n\n super().start()\n cv2.namedWindow('OpticalFlow')\n\n def user_image(self,image,gray):\n cv2.waitKey(1)\n if self.prev_gray is None:\n self.prev_gray = gray\n self.prev_feat = cv2.goodFeaturesToTrack(gray, mask=None,\n **self.feature_params)\n return\n new_feat, st, err = \\\n cv2.calcOpticalFlowPyrLK(self.prev_gray, gray,\n self.prev_feat, None, **self.lk_params)\n if new_feat is None:\n self.good_new = None\n return\n self.good_new = new_feat[st==1]\n self.good_old = self.prev_feat[st==1]\n self.prev_gray = gray\n self.prev_feat = self.good_new.reshape(-1,1,2)\n\n (x,y,_) = image.shape\n image = cv2.resize(image,(y*2,x*2))\n if self.mask is None:\n self.mask = np.zeros_like(image)\n\n for i,(new,old) in enumerate(zip(self.good_new, self.good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n self.mask = cv2.line(self.mask, (a+a,b+b), (c+c,d+d),\n self.colors[i].tolist(), 2)\n cv2.circle(image,(a+a,b+b),5,self.colors[i].tolist(),-1)\n image = cv2.add(image,self.mask)\n cv2.imshow('OpticalFlow', image)\n"
},
{
"alpha_fraction": 0.6270661354064941,
"alphanum_fraction": 0.6384297609329224,
"avg_line_length": 22.047618865966797,
"blob_id": "49a00cfed4f0b318c52b6fbe61f7967990333e6c",
"content_id": "df003a31e172ef6113a8a12b15af9c0dde958580",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 968,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 42,
"path": "/cozmo_fsm/trace.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Constants for defining tracing levels.\n\"\"\"\n\nclass TRACE:\n def __init__(self):\n self._trace_level = 0\n\n @property\n def trace_level(self): return TRACE._trace_level\n @trace_level.setter\n def trace_level(self,val):\n TRACE._trace_level = val\n \n @property\n def no_tracing(self): return 0\n @property\n def statenode_start(self): return 1\n @property\n def statenode_startstop(self): return 2\n @property\n def transition_fire(self): return 3\n @property\n def transition_startstop(self): return 4\n @property\n def listener_invocation(self): return 5\n @property\n def polling(self): return 6\n @property\n def await_satisfied(self): return 7\n @property\n def event_posted(self): return 8\n @property\n def task_cancel(self): return 9\n\nTRACE = TRACE()\n\ndef tracefsm(level=None):\n if level is not None:\n type(TRACE).trace_level = level\n else:\n return TRACE.trace_level\n"
},
{
"alpha_fraction": 0.5975090861320496,
"alphanum_fraction": 0.6002430319786072,
"avg_line_length": 34.021278381347656,
"blob_id": "b6427a8897ff7c0f294293f2a55db61b36058b81",
"content_id": "595ff35ee29d73823ecb7737b07d0298039ca12e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3292,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 94,
"path": "/cozmo_fsm/pilot0.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTo avoid circular dependencies between pilot.fsm, doorpass.fsm, and\npath_planner.py, we put some pilot classes here so everyone can import\nthem.\n\n\"\"\"\n\nfrom .base import *\nfrom .rrt import *\nfrom .events import PilotEvent\n\nclass PilotCheckStart(StateNode):\n \"Fails if rrt planner indicates start_collides\"\n\n def start(self, event=None):\n super().start(event)\n (pose_x, pose_y, pose_theta) = self.robot.world.particle_filter.pose\n start_node = RRTNode(x=pose_x, y=pose_y, q=pose_theta)\n try:\n self.robot.world.rrt.plan_path(start_node,start_node)\n except StartCollides as e:\n print('PilotCheckStart: Start collides!',e)\n self.post_event(PilotEvent(StartCollides, args=e.args))\n self.post_failure()\n return\n except Exception as e:\n print('PilotCheckStart: Unexpected planner exception',e)\n self.post_failure()\n return\n self.post_success()\n\n\nclass PilotCheckStartDetail(StateNode):\n \"Posts collision object if rrt planner indicates start_collides\"\n\n def start(self, event=None):\n super().start(event)\n (pose_x, pose_y, pose_theta) = self.robot.world.particle_filter.pose\n start_node = RRTNode(x=pose_x, y=pose_y, q=pose_theta)\n try:\n self.robot.world.rrt.plan_path(start_node,start_node)\n except StartCollides as e:\n print('PilotCheckStartDetail: Start collides!',e)\n self.post_event(PilotEvent(StartCollides, args=e.args))\n self.post_data(e.args)\n return\n except Exception as e:\n print('PilotCheckStartDetail: Unexpected planner exception',e)\n self.post_failure()\n return\n self.post_success()\n\n#---------------- Navigation Plan ----------------\n\nclass NavStep():\n DRIVE = \"drive\"\n DOORPASS = \"doorpass\"\n BACKUP = \"backup\"\n\n def __init__(self, type, param):\n \"\"\"For DRIVE and BACKUP types, param is a list of RRTNode instances. The\n reason we group these into a list instead of having one node per step is that\n the DriveContinuous function is going to be interpolating over the entire sequence.\n For a DOORPASS step the param is the door object.\"\"\"\n self.type = type\n self.param = param\n\n def __repr__(self):\n if self.type == NavStep.DOORPASS:\n pstring = self.param.id\n elif self.type == NavStep.DRIVE:\n psteps = [(round(node.x,1),round(node.y,1)) for node in self.param]\n pstring = repr(psteps)\n else: # NavStep.BACKUP and anything else\n pstring = repr(self.param)\n if len(pstring) > 40:\n pstring = pstring[0:20] + ' ...' + pstring[-20:]\n return '<NavStep %s %s>' % (self.type, pstring)\n\n\nclass NavPlan():\n def __init__(self, steps=[]):\n self.steps = steps\n\n def __repr__(self):\n steps = [(('doorpass(%s)' % s.param.id) if s.type == NavStep.DOORPASS else s.type) for s in self.steps]\n return '<NavPlan %s>' % repr(steps)\n\n def extract_path(self):\n nodes = []\n for step in self.steps:\n if step.type in (NavStep.DRIVE, NavStep.BACKUP):\n nodes += step.param\n return nodes\n"
},
{
"alpha_fraction": 0.5950539112091064,
"alphanum_fraction": 0.5958165526390076,
"avg_line_length": 37.40585708618164,
"blob_id": "f25c5574b646fb333037002bf153c05ada5e950d",
"content_id": "a2d36453b8a2df2ca5b53e3d366148647b8d3a06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9179,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 239,
"path": "/cozmo_fsm/evbase.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n The Event Router and Event Listener\n\n This file implements an event router scheme modeled after the\n one in Tekkotsu.\n\n\"\"\"\n\nimport functools\nfrom multiprocessing import Queue\n\nimport cozmo\n\nfrom .trace import TRACE\n\n#________________ Event base class ________________\n\nclass Event:\n \"\"\"Base class for all events.\"\"\"\n def __init__(self, source=None):\n self.source = source\n\n cozmo_evt_type = None\n\n def generator(self,erouter,cozmo_evt): pass\n\n def __repr__(self):\n try:\n src_string = self.source.name\n except:\n src_string = repr(self.source)\n return '<%s from %s>' % (self.__class__.__name__, src_string)\n\n#________________ Event Router ________________\n\nclass EventRouter:\n \"\"\"An event router drives the state machine.\"\"\"\n def __init__(self):\n # dispatch_table: event_class -> (source,listener)...\n self.dispatch_table = dict()\n # listener_registry: listener -> (event_class, source)...\n self.listener_registry = dict()\n # wildcard registry: true if listener is a wildcard (should run last)\n self.wildcard_registry = dict()\n # event generator objects\n self.event_generators = dict()\n # running processes\n self.processes = dict() # id -> node\n self.interprocess_queue = Queue()\n\n def start(self):\n self.clear()\n self.poll_processes()\n\n def clear(self):\n self.dispatch_table.clear()\n self.listener_registry.clear()\n self.wildcard_registry.clear()\n self.event_generators.clear()\n self.processes.clear()\n self.interprocess_queue.close()\n self.interprocess_queue = Queue()\n\n def add_listener(self, listener, event_class, source):\n if not issubclass(event_class, Event):\n raise TypeError('% is not an Event' % event_type)\n source_dict = self.dispatch_table.get(event_class)\n if source_dict is None:\n source_dict = dict()\n # start a cozmo event handler if this event type requires one\n if event_class.cozmo_evt_type:\n coztype = event_class.cozmo_evt_type\n if not issubclass(coztype, cozmo.event.Event):\n raise ValueError('%s cozmo_evt_type %s not a subclass of cozmo.event.Event' % (event_type, coztype))\n world = self.robot.world\n # supply the erouter and event type\n gen = functools.partial(event_class.generator, self, event_class)\n self.event_generators[event_class] = gen\n world.add_event_handler(coztype,gen) \n handlers = source_dict.get(source, [])\n handlers.append(listener.handle_event)\n source_dict[source] = handlers\n self.dispatch_table[event_class] = source_dict\n reg_entry = self.listener_registry.get(listener,[])\n reg_entry.append((event_class,source))\n self.listener_registry[listener] = reg_entry\n\n # Transitions like =Hear('foo')=> must use None as the source\n # value because they do the matching themselves instead of relying\n # on the event router. So to distinguish a wildcard =Hear=>\n # transition, which must be invoked last, from all the other Hear\n # transitions, we must register it specially.\n def add_wildcard_listener(self, listener, event_class, source):\n self.add_listener(listener, event_class, source)\n self.wildcard_registry[listener.handle_event] = True\n\n def remove_listener(self, listener, event_class, source):\n try:\n del self.wildcard_registry[listener.handle_event]\n except: pass\n if not issubclass(event_class, Event):\n raise TypeError('% is not an Event' % event_class)\n source_dict = self.dispatch_table.get(event_class)\n if source_dict is None: return\n handlers = source_dict.get(source)\n if handlers is None: return\n try:\n handlers.remove(listener.handle_event)\n except: pass\n if handlers == []:\n del source_dict[source]\n if len(source_dict) == 0: # no one listening for this event\n del self.dispatch_table[event_class]\n # remove the cozmo SDK event handler if there was one\n if event_class.cozmo_evt_type:\n coztype = event_class.cozmo_evt_type\n world = self.robot.world\n gen = self.event_generators[event_class]\n world.remove_event_handler(coztype, gen)\n del self.event_generators[event_class]\n\n def remove_all_listener_entries(self, listener):\n for event_class, source in self.listener_registry.get(listener,[]):\n self.remove_listener(listener, event_class, source)\n try:\n del self.listener_registry[listener]\n except: pass\n\n def _get_listeners(self,event):\n source_dict = self.dispatch_table.get(type(event), None)\n if source_dict is None: # no listeners for this event type\n return []\n source_matches = source_dict.get(event.source, [])\n match_handlers = []\n # TapEvent can be wildcarded even though the source is never None\n wildcard_matches = source_dict.get(None, []) if event.source is not None else []\n wildcard_handlers = []\n for handler in source_matches + wildcard_matches:\n if self.wildcard_registry.get(handler,False) is True:\n wildcard_handlers.append(handler)\n else:\n match_handlers.append(handler)\n # wildcard handlers must come last in the list\n return match_handlers + wildcard_handlers\n\n def post(self,event):\n if not isinstance(event,Event):\n raise TypeError('%s is not an Event' % event)\n listeners = self._get_listeners(event)\n cnt = 0\n for listener in listeners:\n cnt += 1\n if TRACE.trace_level >= TRACE.listener_invocation:\n print('TRACE%d:' % TRACE.listener_invocation, listener.__self__, 'receiving', event)\n self.robot.loop.call_soon(listener,event)\n \n def add_process_node(self, node):\n self.processes[id(node)] = node\n\n def delete_process_node(self, node):\n node_id = id(node)\n if node_id in self.processes:\n del self.processes[node_id]\n # print('Deleted id',node_id,'for',node)\n else:\n print('*** ERROR in delete_process_node: node',node_id,'not in process dict!')\n\n POLLING_INTERVAL = 0.1\n\n def poll_processes(self):\n while not self.interprocess_queue.empty():\n (id,event) = self.interprocess_queue.get()\n if id in self.processes:\n node = self.processes[id]\n event.source = node\n print('Node %s returned %s' % (node,event))\n self.post(event)\n else:\n print('*** ERROR in poll_processes: node',id,'not in process dict!', self.processes)\n self.robot.loop.call_later(self.POLLING_INTERVAL, self.poll_processes)\n\n#________________ Event Listener ________________\n\nclass EventListener:\n \"\"\"Parent class for both StateNode and Transition.\"\"\"\n def __init__(self):\n rep = object.__repr__(self)\n self.name = rep[1+rep.rfind(' '):-1] # name defaults to hex address\n self.running = False\n if not hasattr(self,'polling_interval'):\n self.polling_interval = None\n self.poll_handle = None\n self._robot = robot_for_loading\n\n def __repr__(self):\n return '<%s %s>' % (self.__class__.__name__, self.name)\n\n def set_name(self,name):\n if not isinstance(name,str):\n raise ValueError('name must be a string, not %s' % name)\n self.name = name\n return self\n\n def start(self):\n self.running = True\n if self.polling_interval:\n self.poll_handle = \\\n self.robot.loop.call_later(self.polling_interval, self._next_poll)\n else:\n self.poll_handle = None\n\n def stop(self):\n if not self.running: return\n self.running = False\n if self.poll_handle: self.poll_handle.cancel()\n self.robot.erouter.remove_all_listener_entries(self)\n\n def handle_event(self, event):\n pass\n\n def set_polling_interval(self,interval):\n if isinstance(interval, (int,float)):\n self.polling_interval = interval\n else:\n raise TypeError('interval must be a number')\n\n def _next_poll(self):\n \"\"\"Called to schedule the next polling interval and then poll the node.\"\"\"\n # Schedule the next poll first because self.poll may cancel it.\n if self.running and self.polling_interval:\n self.poll_handle = \\\n self.robot.loop.call_later(self.polling_interval, self._next_poll)\n self.poll()\n\n def poll(self):\n \"\"\"Dummy polling function in case sublass neglects to supply one.\"\"\"\n if TRACE.trace_level >= TRACE.polling:\n print('TRACE%d: polling' % TRACE.polling, self)\n print('%s has no poll() method' % self)\n"
},
{
"alpha_fraction": 0.5280412435531616,
"alphanum_fraction": 0.5606589913368225,
"avg_line_length": 39.328857421875,
"blob_id": "61628bd3d328f7679fde0b934da070c05735777f",
"content_id": "0eec8ec8ac323b5d8a2f82cd95496e4eaf9e8fd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6009,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 149,
"path": "/cozmo_fsm/perched.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from numpy import matrix, array, ndarray, sqrt, arctan2, pi\nimport threading\nfrom time import sleep\nfrom .geometry import wrap_angle\n\ntry:\n import cv2\n import cv2.aruco as aruco\nexcept: pass\n\n# Known camera parameters\n\n# Microsoft HD ( Calibrated to death )\nmicrosoft_HD_webcam_cameraMatrix = matrix([[1148.00, -3, 641.0],\n [0.000000, 1145.0, 371.0],\n [0.000000, 0.000000, 1.000000]])\nmicrosoft_HD_webcam_distCoeffs = array([0.211679, -0.179776, 0.041896, 0.040334, 0.000000])\n\nclass Cam():\n def __init__(self,cap,x,y,z,phi, theta):\n self.cap = cap\n self.x = x\n self.y = y\n self.z = z\n self.phi = phi\n self.theta = theta\n\n def __repr__(self):\n return '<Cam (%.2f, %.2f, %.2f)> @ %.2f' % \\\n (self.x, self.y, self.z,self.phi*180/pi)\n\nclass PerchedCameraThread(threading.Thread):\n def __init__(self, robot):\n threading.Thread.__init__(self)\n self.robot = robot\n self.use_perched_cameras = False\n self.perched_cameras = []\n # Set camera parameters. (Current code assumes same parameters for all cameras connected to a computer.)\n self.cameraMatrix = microsoft_HD_webcam_cameraMatrix\n self.distCoeffs = microsoft_HD_webcam_distCoeffs\n self.aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250)\n self.parameters = aruco.DetectorParameters_create()\n # camera landmarks from local cameras\n self.cameras = {}\n # camera landamrks from network (sent from server)\n self.camera_pool = {}\n\n def run(self):\n while(True):\n if self.use_perched_cameras:\n self.process_image()\n # Computer overloaded if not given break\n sleep(0.01)\n else:\n break\n\n def start_perched_camera_thread(self,cameras=[]):\n if not isinstance(cameras,list):\n cameras = [cameras]\n\n if self.robot.aruco_id == -1:\n self.robot.aruco_id = int(input(\"Please enter the aruco id of the robot:\"))\n self.robot.world.server.camera_landmark_pool[self.robot.aruco_id]={}\n self.use_perched_cameras=True\n self.perched_cameras = []\n for x in cameras:\n cap = cv2.VideoCapture(x)\n if cap.isOpened():\n self.perched_cameras.append(cap)\n else:\n raise RuntimeError(\"Could not open camera %s.\" % repr(x))\n for cap in self.perched_cameras:\n # hack to set highest resolution\n cap.set(3,4000)\n cap.set(4,4000)\n self.robot.world.particle_filter.sensor_model.use_perched_cameras = True\n print(\"Particle filter now using perched cameras\")\n self.start()\n\n def stop_perched_camera_thread(self):\n self.use_perched_cameras=False\n sleep(1)\n for cap in self.perched_cameras:\n cap.release()\n self.robot.world.particle_filter.sensor_model.use_perched_cameras = False\n print(\"Particle filter stopped using perched cameras\")\n\n def check_camera(self,camera):\n cap = cv2.VideoCapture(camera)\n for j in range(10):\n # hack to clear buffer\n for i in range(5):\n cap.grab()\n ret, frame = cap.read()\n if not ret:\n print('Failed to get camera frame from camera %s.' % camera )\n return\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters)\n gray = cv2.aruco.drawDetectedMarkers(gray, corners, ids)\n cv2.imshow(\"Camera:\"+str(camera),gray)\n cv2.waitKey(1)\n cap.release()\n cv2.destroyAllWindows()\n\n def rotationMatrixToEulerAngles(self, R) :\n sy = sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n if not singular :\n x = arctan2(R[2,1] , R[2,2])\n y = arctan2(-R[2,0], sy)\n z = arctan2(R[1,0], R[0,0])\n else :\n x = arctan2(-R[1,2], R[1,1])\n y = arctan2(-R[2,0], sy)\n z = 0\n \n return array([x, y, z])\n\n def process_image(self):\n # Dict with key: aruco id with values as cameras that can see the marker\n self.temp_cams = {} # Necessary, else self.cameras is empty most of the time\n for cap in self.perched_cameras:\n # Clearing Buffer by grabbing five frames\n for i in range(5):\n cap.grab()\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=self.parameters)\n\n if type(ids) is ndarray:\n vecs = aruco.estimatePoseSingleMarkers(corners, 50, self.cameraMatrix, self.distCoeffs)\n rvecs, tvecs = vecs[0], vecs[1]\n for i in range(len(ids)):\n rotationm, jcob = cv2.Rodrigues(rvecs[i])\n # transform to robot coordinate frame\n transformed = matrix(rotationm).T*(-matrix(tvecs[i]).T)\n phi = self.rotationMatrixToEulerAngles(rotationm.T)\n if ids[i][0] in self.temp_cams:\n self.temp_cams[ids[i][0]][str(cap)]=Cam(str(cap),transformed[0][0,0],\n transformed[1][0,0],transformed[2][0,0],wrap_angle(phi[2]-pi/2), wrap_angle(phi[0]+pi/2))\n else:\n self.temp_cams[ids[i][0]]={str(cap):Cam(str(cap),transformed[0][0,0],\n transformed[1][0,0],transformed[2][0,0],wrap_angle(phi[2]-pi/2), wrap_angle(phi[0]+pi/2))}\n self.cameras = self.temp_cams\n\n # Only server clears the pool\n if self.robot.world.is_server:\n self.camera_pool = self.temp_cams\n"
},
{
"alpha_fraction": 0.5624809861183167,
"alphanum_fraction": 0.5843127965927124,
"avg_line_length": 48.18921661376953,
"blob_id": "a4e7c33a329be78e579ab0fc996731890c00ce9f",
"content_id": "9d9c21aff408448161f480c788c7ad74fc66ae12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98526,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 2003,
"path": "/cozmo_fsm/pickup.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from cozmo.util import Pose\nfrom cozmo.objects import LightCube\n\nfrom .nodes import *\nfrom .transitions import *\nfrom .geometry import wrap_angle, line_equation, line_intersection\nfrom .geometry import ORIENTATION_UPRIGHT, ORIENTATION_INVERTED, ORIENTATION_SIDEWAYS, ORIENTATION_TILTED\nfrom .geometry import get_orientation_state, same_orientation, get_pattern_number\nfrom .cozmo_kin import center_of_rotation_offset\nfrom .pilot import PilotToPose, PilotCheckStart, ParentPilotEvent, InvalidPose, PilotFrustration\nfrom .rrt import StartCollides, GoalCollides, MaxIterations\nfrom .worldmap import LightCubeObj\n\nfrom math import sin, cos, atan2, pi, sqrt\nimport copy\n\n\nclass GoToCube(StateNode):\n def __init__(self, cube=None):\n self.object = cube\n super().__init__()\n self.side_index = 0\n self.try_number = 0\n self.try_max = 3\n self.roll_cube = False\n self.roll_target = None\n\n def start(self, event=None):\n # self.object will normally be set up by the parent of this node\n if isinstance(self.object, LightCube):\n self.wmobject = self.object.wm_obj\n elif isinstance(self.object, LightCubeObj):\n self.wmobject = self.object\n self.object = self.object.sdk_obj\n else:\n raise ValueError(self.object)\n self.children['looker'].object = self.object\n self.sides = []\n self.side = None\n self.side_index = 0\n self.try_number = 0\n self.try_max = 3\n super().start(event)\n if self.wmobject.pose_confidence < 0:\n print('GoToCube: cube has invalid pose!', self.wmobject, self.object.pose)\n self.post_event(PilotEvent(InvalidPose))\n self.post_failure()\n\n def pick_side(self, offset, use_world_map):\n # If use_world_map is True, use the LightCubeObj pose.\n # If False, use the LightCube's pose as determined by vision.\n cube = self.object\n if use_world_map:\n wmobj = self.object.wm_obj\n x = wmobj.x\n y = wmobj.y\n ang = wmobj.theta\n orientation = wmobj.orientation\n print('Pick side from', self.object.wm_obj)\n else:\n x = cube.pose.position.x\n y = cube.pose.position.y\n orientation, _, _, z = get_orientation_state(cube.pose.rotation.q0_q1_q2_q3)\n if orientation == ORIENTATION_SIDEWAYS:\n ang = wrap_angle(z)\n else:\n ang = cube.pose.rotation.angle_z.radians\n (rx, ry, rtheta) = self.get_robot_cor(use_world_map=use_world_map)\n dist = LightCubeObj.light_cube_size[0]/2 + offset\n sideTop = [ (x + cos(ang)*dist), (y + sin(ang)*dist), wrap_angle(ang + pi) ]\n sideBottom = [ (x - cos(ang)*dist), (y - sin(ang)*dist), wrap_angle(ang) ]\n sideA = [ (x + sin(ang)*dist), (y - cos(ang)*dist), wrap_angle(ang + pi/2) ]\n sideB = [ (x - sin(ang)*dist), (y + cos(ang)*dist), wrap_angle(ang - pi/2) ]\n sides = (sideTop, sideBottom, sideA, sideB)\n\n if orientation == ORIENTATION_SIDEWAYS:\n if self.roll_cube:\n # if the orientation is sideways, only two valid sides to roll cube to upright/inverted\n if self.roll_target == ORIENTATION_UPRIGHT or self.roll_target == ORIENTATION_INVERTED:\n self.try_max = 1\n sorted_sides = (sideBottom, sideTop) if self.roll_target == ORIENTATION_UPRIGHT else (sideTop, sideBottom)\n else:\n sides_vertical = sorted((sideTop, sideBottom), key=lambda pt: ((pt[0]-rx)**2 + (pt[1]-ry)**2))\n sides_horizontal = sorted((sideA, sideB), key=lambda pt: ((pt[0]-rx)**2 + (pt[1]-ry)**2))\n sorted_sides = sides_vertical + sides_horizontal\n else:\n # if the orientation is sideways, only one valid side to pickup cube\n self.try_max = 0\n side = sideBottom\n d = sqrt((side[0]-rx)**2 + (side[1]-ry)**2)\n print(' side 0: %5.1f mm %5.1f , %5.1f @ %5.1f deg.' %\n (d, side[0], side[1], side[2]*180/pi))\n return side\n else:\n sorted_sides = sorted(sides, key=lambda pt: ((pt[0]-rx)**2 + (pt[1]-ry)**2))\n\n for i in range(len(sorted_sides)):\n side = sorted_sides[i]\n d = sqrt((side[0]-rx)**2 + (side[1]-ry)**2)\n print(' side %d: %5.1f mm %5.1f , %5.1f @ %5.1f deg.' %\n (i, d, side[0], side[1], side[2]*180/pi))\n print('Go to side %i' % self.side_index)\n return sorted_sides[self.side_index]\n\n def pick_another_side(self):\n if self.try_number >= self.try_max:\n # Have tried all sides, reset counter\n print('Have tried all sides.')\n self.try_number = 0\n self.side_index = 0\n self.try_max = 3\n return False\n else:\n self.try_number += 1\n self.side_index += 1\n print('Haven\\'t tried out all sides of cube. Going to try the side', self.side_index)\n return True\n\n def get_robot_pose(self, use_world_map):\n if use_world_map:\n rx = self.robot.world.particle_filter.pose[0]\n ry = self.robot.world.particle_filter.pose[1]\n rtheta = self.robot.world.particle_filter.pose[2]\n else:\n rx = self.robot.pose.position.x\n ry = self.robot.pose.position.y\n rtheta = self.robot.pose.rotation.angle_z.radians\n return (rx, ry, rtheta)\n\n def get_robot_cor(self, use_world_map):\n \"Get robot center of rotation and current heading\"\n (rx, ry, rtheta) = self.get_robot_pose(use_world_map=use_world_map)\n cx = rx + center_of_rotation_offset*cos(rtheta)\n cy = ry + center_of_rotation_offset*sin(rtheta)\n return (cx, cy, rtheta)\n\n def get_robot_line(self, use_world_map):\n (rx, ry, rtheta) = self.get_robot_pose(use_world_map)\n (cx, cy, ctheta) = self.get_robot_cor(use_world_map)\n return line_equation((rx,ry), (cx,cy))\n\n def get_cube_line(self, use_world_map):\n if use_world_map:\n ox = self.parent.wmobject.x\n oy = self.parent.wmobject.y\n else:\n ox = self.parent.object.pose.position.x\n oy = self.parent.object.pose.position.y\n (sx, sy, stheta) = self.side\n return line_equation((ox,oy), (sx,sy))\n\n def measure_dockedness(self, side, use_world_map):\n \"\"\"Returns distance and relative angle to specified docking pose.\"\"\"\n (rx, ry, rtheta) = self.get_robot_cor(use_world_map)\n (ox, oy, otheta) = side\n dist = sqrt((rx-ox)**2 + (ry-oy)**2)\n relative_angle = abs(wrap_angle(rtheta-otheta) % (pi/2)) * (180/pi)\n return (dist, relative_angle)\n\n\n class PilotToSide(PilotToPose):\n def __init__(self):\n super().__init__(None, verbose=True)\n\n def start(self, event=None):\n cube = self.parent.object\n (x, y, theta) = self.parent.pick_side(100, use_world_map=True)\n self.target_pose = Pose(x, y, self.robot.pose.position.z,\n angle_z=Angle(radians=wrap_angle(theta)))\n (px,py,pq) = self.robot.world.particle_filter.pose\n print('PilotToSide: planned path from (%.1f, %.1f) @ %.1f deg. to pickup point (%.1f, %.1f) @ %.1f deg.' %\n (px, py, pq*180/pi,\n self.target_pose.position.x, self.target_pose.position.y,\n self.target_pose.rotation.angle_z.degrees))\n super().start(event)\n\n\n class ReportPosition(StateNode):\n def __init__(self,id=None):\n super().__init__()\n self.id_string = id + ': ' if id else ''\n\n def start(self,event=None):\n super().start(event)\n cube = self.parent.object\n vis = 'visible' if cube.is_visible else 'not visible'\n cx = cube.pose.position.x\n cy = cube.pose.position.y\n rx = self.robot.pose.position.x\n ry = self.robot.pose.position.y\n dx = cx - rx\n dy = cy - ry\n dist = sqrt(dx*dx + dy*dy)\n bearing = wrap_angle(atan2(dy,dx) - self.robot.pose.rotation.angle_z.radians) * 180/pi\n print('%scube %s at (%5.1f,%5.1f) robot at (%5.1f,%5.1f) dist=%5.1f rel. brg=%5.1f' %\n (self.id_string, vis, cx, cy, rx, ry, dist, bearing))\n\n\n class TurnToCube(Turn):\n def __init__(self, check_vis=False):\n self.check_vis = check_vis\n super().__init__()\n\n def start(self, event=None):\n if self.running: return\n cube = self.parent.object\n if self.check_vis and not cube.is_visible:\n print('** TurnToCube %s could not see the cube.' % self.name)\n self.angle = None\n super().start(event)\n self.post_failure()\n print('TurnToCube %s posted failure' % self.name)\n else:\n (sx, sy, _) = self.parent.pick_side(0, use_world_map=False)\n (cx, cy, ctheta) = self.parent.get_robot_cor(False)\n dx = sx - cx\n dy = sy - cy\n dist = sqrt(dx*dx + dy*dy)\n self.angle = Angle(degrees = wrap_angle(atan2(dy,dx) - ctheta) * 180/pi)\n if abs(self.angle.degrees) <= 2:\n self.angle = degrees(0)\n if abs(self.angle.degrees) > 60:\n print('********>> BIG TURN in',self)\n print('TurnToCube %s: cube at (%5.1f,%5.1f) robot cor at (%5.1f,%5.1f) dist=%5.1f turn angle=%5.1f' %\n (self.name, sx, sy, cx, cy, dist, self.angle.degrees))\n super().start(event)\n\n\n class CheckAlmostDocked(StateNode):\n # *** TODO: convert to iterate through all feasible sides\n def start(self, event=None):\n if self.running: return\n super().start(event)\n cube = self.parent.object\n side = self.parent.pick_side(0, use_world_map=True)\n self.parent.side = side\n (dist, relative_angle) = self.parent.measure_dockedness(side,True)\n max_distance_from_dock_point = 150 # millimeters\n max_angle_from_dock_heading = 10 # degrees\n max_angle_for_sideways = 135 # degrees\n\n if isinstance(cube, LightCube):\n orientation = cube.wm_obj.orientation\n elif isinstance(cube, LightCubeObj):\n orientation = cube.sdk_obj.orientation\n\n # Re-calculate relative_angle for sideways cube\n if orientation == ORIENTATION_SIDEWAYS:\n (rx, ry, rtheta) = self.parent.get_robot_cor(True)\n ctheta = side[2]\n dtheta = (rtheta - ctheta) if (rtheta>ctheta) else (ctheta-rtheta)\n relative_angle = abs(dtheta/pi*180)\n # print('robot: %.1f deg; cube: %.1f deg; delta: %.1f deg.' %(rtheta/pi*180, ctheta/pi*180, relative_angle))\n\n if dist < max_distance_from_dock_point:\n if relative_angle < max_angle_from_dock_heading and cube.is_visible:\n print('CheckAlmostDocked is satisfied. dist=%.1f mm angle=%.1f deg.' %\n (dist, relative_angle))\n self.post_completion()\n else:\n if orientation == ORIENTATION_SIDEWAYS:\n if relative_angle < max_angle_for_sideways and cube.is_visible:\n print('CheckAlmostDocked: bad angle. (dist=%.1f mm) angle=%.1f deg.' %\n (dist, relative_angle))\n self.post_success()\n else:\n print('CheckAlmostDocked: use the Pilot to path plan. orientation=%s. dist=%.1f mm. angle=%.1f deg.' %\n (orientation, dist, relative_angle))\n self.post_failure()\n elif not cube.is_visible:\n print('CheckAlmostDocked: cube not visible')\n self.post_success()\n else:\n print('CheckAlmostDocked: bad angle. (dist=%.1f mm) angle=%.1f deg.' %\n (dist, relative_angle))\n self.post_success()\n else:\n print('CheckAlmostDocked: too far away. dist=%.1f mm. (angle=%.1f deg.)' %\n (dist, relative_angle))\n self.post_failure()\n\n\n class ForwardToCube(Forward):\n def __init__(self, offset):\n self.offset = offset\n super().__init__()\n\n def start(self, event=None):\n if self.running: return\n cube = self.parent.object\n dx = cube.pose.position.x - self.robot.pose.position.x\n dy = cube.pose.position.y - self.robot.pose.position.y\n dist = sqrt(dx*dx + dy*dy) - self.offset\n if (dist < 0):\n print('***** ForwardToCube %s negative distance: %.1f mm' % (self.name,dist))\n self.distance = Distance(dist)\n print('ForwardToCube %s: distance %.1f mm' % (self.name, self.distance.distance_mm))\n super().start(event)\n\n\n class ManualDock1(Forward):\n def report(self,rx,ry,rtheta,sx,sy,stheta,intx,inty,int_brg):\n print('ManualDock1: robot cor at %.1f , %.1f @ %.1f deg. side at %.1f , %.1f @ %.1f deg.' %\n (rx, ry, 180*rtheta/pi, sx, sy, stheta*180/pi))\n print(' int at %.1f , %.1f bearing=%.1f deg. dist=%.1f mm ' %\n (intx,inty,int_brg*180/pi,self.distance.distance_mm))\n\n def start(self,event=None):\n rline = self.parent.get_robot_line(use_world_map=True)\n cline = self.parent.get_cube_line(use_world_map=True)\n (intx, inty) = line_intersection(rline, cline)\n (rx, ry,rtheta) = self.parent.get_robot_cor(use_world_map=True)\n # Is intersection point ahead of or behind us?\n intersection_bearing = wrap_angle(atan2(inty-ry, intx-rx)-rtheta)\n (sx, sy, stheta) = self.parent.side\n if abs(intersection_bearing) > pi/2: # Intersection is behind us\n print('ManualDock1: Intersection is behind us.')\n dist = min(75, sqrt((rx-intx)**2 + (ry-inty)**2))\n self.distance = distance_mm(-dist)\n self.report(rx,ry,rtheta,sx,sy,stheta,intx,inty,intersection_bearing)\n super().start(event)\n return\n else: # Intersection is ahead of us\n dx = sx - intx\n dy = sy - inty\n dtheta = abs(wrap_angle(atan2(dy,dx) - stheta))\n dist_to_side = sqrt(dx**2 + dy**2)\n min_dist_to_side = 60 # mm from cor\n max_dist_to_side = 120 # mm from cor\n print('ManualDock1: Intersection ahead is %.1f mm from side and dtheta=%.1f deg.' %\n (dist_to_side, dtheta*180/pi))\n alignment_threshold = 5 # degrees\n aligned = abs(wrap_angle(rtheta-stheta)) < alignment_threshold*pi/180\n if ((dist_to_side >= min_dist_to_side) or aligned) and \\\n (dist_to_side <= max_dist_to_side) and \\\n (dtheta < pi/20): # make sure intersection is on near side of cube\n # Intersection ahead is at an acceptable distance from the chosen side\n print('ManualDock1: move forward to intersection.')\n self.distance = distance_mm(sqrt((rx-intx)**2 + (ry-inty)**2))\n self.report(rx,ry,rtheta,sx,sy,stheta,intx,inty,intersection_bearing)\n super().start(event)\n return\n else:\n # Intersection ahead is past the target, or too close or too far from it, so\n # pick a new point on cline at a reasonable distance and turn to that\n print('ManualDock: pick new intersection point')\n good_dist = 70 # mmm from cor\n tx = sx + good_dist * cos(stheta+pi)\n ty = sy + good_dist * sin(stheta+pi)\n turn_angle = wrap_angle(atan2(ty-ry,tx-rx)-rtheta)\n min_turn_angle = 2 * pi/180\n if abs(turn_angle) > min_turn_angle:\n self.distance = distance_mm(0)\n self.report(rx,ry,rtheta,sx,sy,stheta,intx,inty,intersection_bearing)\n print('ManualDock1: turn to point at %.1f , %.1f turn_angle=%.1f deg.' %\n (tx, ty, turn_angle*180/pi))\n super().start(event)\n self.post_data(Angle(radians=turn_angle))\n return\n else:\n dist = sqrt((rx-tx)**2 + (ry-ty)**2)\n self.distance = distance_mm(dist)\n self.report(rx,ry,rtheta,sx,sy,stheta,intx,inty,intersection_bearing)\n print('ManualDock1: Alignment is close enough.')\n super().start(event)\n return\n\n class ManualDock2(Turn):\n def start(self,event=None):\n (rx,ry,rtheta) = self.parent.get_robot_cor(use_world_map=True)\n (ox,oy,otheta) = self.parent.side\n #bearing = atan2(oy-ry, ox-rx)\n #turn_angle = wrap_angle(bearing-rtheta)\n turn_angle = wrap_angle(otheta-rtheta)\n self.angle = Angle(radians=turn_angle)\n print('ManualDock2: otheta=%.1f deg. heading=%.1f deg turn_angle=%.1f deg.' %\n (otheta*180/pi, rtheta*180/pi, turn_angle*180/pi))\n super().start(event)\n\n\n class InvalidatePose(StateNode):\n def start(self,event=None):\n super().start(event)\n self.parent.wmobject.pose_confidence = -1\n\n\n class ChooseAnotherSide(StateNode):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n super().start(event)\n if self.parent.pick_another_side():\n self.post_success()\n else:\n self.post_failure()\n\n\n class CheckCubePoseValid(StateNode):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n super().start(event)\n\n if isinstance(self.parent.object, LightCube):\n cube_id = self.parent.object.wm_obj.id\n elif isinstance(self.parent.object, LightCubeObj):\n cube_id = self.parent.object.id\n else:\n raise ValueError(self.parent.object)\n\n try:\n wmobject = self.robot.world.world_map.objects[cube_id]\n except: # worldmap was cleared just before we were called\n self.post_failure()\n return\n if self.parent.robot.is_picked_up:\n self.parent.robot.stop_all_motors()\n print('CheckCubePoseValid: robot is picked up.')\n self.post_failure()\n elif wmobject.pose_confidence < 0:\n print('CheckCubePoseValid %s: %s has invalid pose!' % (self.name, cube_id))\n self.post_failure()\n else:\n #print('CheckCubePoseValid %s: valid pose for %s.' % (self.name, cube_id))\n self.post_completion()\n\n\n class ReadyToRoll(StateNode):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n super().start(event)\n if self.parent.roll_cube:\n print('get ready to roll')\n self.post_success()\n else:\n self.post_failure()\n\n def setup(self):\n # # GoToCube machine\n # \n # droplift: SetLiftHeight(0)\n # droplift =C=> after_drop\n # droplift =F=> after_drop\n # \n # after_drop: StateNode() =N=> {looker, waitlift, monitor_cube_pose}\n # \n # looker: LookAtObject()\n # \n # waitlift: StateNode() =T(1)=> # allow time for vision to set up world map\n # check_almost_docked\n # \n # monitor_cube_pose: self.CheckCubePoseValid()\n # monitor_cube_pose =C=> StateNode() =T(1)=> monitor_cube_pose\n # monitor_cube_pose =F=> ParentFails()\n # \n # check_almost_docked: self.CheckAlmostDocked() # sets up parent.side\n # check_almost_docked =C=> turn_to_cube2 # we're good to dock right now\n # check_almost_docked =S=> setup_manual_dock # we're close: skip the Pilot and set up dock manually\n # check_almost_docked =F=> pilot_check_start # not close: use the Pilot to path plan\n # \n # setup_manual_dock: Forward(-10)\n # setup_manual_dock =C=> manual_dock1\n # setup_manual_dock =F=> failure\n # \n # manual_dock1: self.ManualDock1()\n # manual_dock1 =D=> Turn() =C=> Print('turned. wait...') =N=> manual_dock1\n # manual_dock1 =C=> Print('wait...') =N=> manual_dock2\n # manual_dock1 =T(10)=> Print('Cannot manual dock from here') =N=> pilot_check_start # temporary\n # \n # manual_dock2: self.ManualDock2()\n # manual_dock2 =C=> Print('wait...') =N=> turn_to_cube1\n # manual_dock2 =F=> failure\n # \n # pilot_check_start: PilotCheckStart()\n # pilot_check_start =S=> Print('Start collision check passed.') =N=> go_side\n # # TODO: instead of blindly backing up, find the best direction to move.\n # pilot_check_start =F=> Print('Backing up to escape start collision...') =N=> backup_for_escape\n # \n # backup_for_escape: Forward(-80)\n # backup_for_escape =C=> pilot_check_start2\n # backup_for_escape =F=> failure\n # \n # # Second chance to avoid StartCollides. There is no third chance.\n # pilot_check_start2: PilotCheckStart()\n # pilot_check_start2 =S=> Print('Start collision re-check passed.') =N=> go_side\n # pilot_check_start2 =PILOT(StartCollides)=> check_start2_pilot: ParentPilotEvent() =N=> failure\n # pilot_check_start2 =F=> failure\n # \n # go_side: self.PilotToSide()\n # go_side =PILOT(GoalCollides)=> failure\n # go_side =PILOT(MaxIterations)=> failure\n # go_side =PILOT=> go_side_pilot: ParentPilotEvent() =N=> failure\n # go_side =F=> failure\n # go_side =C=> self.ReportPosition('go_side_deccel')\n # =T(0.75)=> self.ReportPosition('go_side_stopped')\n # =N=> turn_to_cube1\n # \n # turn_to_cube1: self.TurnToCube(check_vis=True) =C=>\n # self.ReportPosition('turn_to_cube1_deccel')\n # =T(0.75)=> self.ReportPosition('turn_to_cube1_stopped')\n # =N=> Print('wait to approach...') =N=> approach\n # turn_to_cube1 =F=> recover1\n # \n # recover1: Forward(-50)\n # recover1 =C=> turn_to_cube2\n # recover1 =F=> failure\n # \n # approach: self.ForwardToCube(60) =C=> StateNode() =T(0.75)=>\n # self.ReportPosition('approach') =T(0.75)=>\n # self.ReportPosition('approach') =N=> turn_to_cube_1a\n # \n # turn_to_cube_1a: self.TurnToCube(check_vis=False) =C=> Print('wait...') =N=> exit_to_roll\n # turn_to_cube_1a =F=> failure\n # \n # approach =F=> failure\n # \n # exit_to_roll: self.ReadyToRoll()\n # exit_to_roll =F=> forward_to_cube_1a\n # exit_to_roll =S=> Forward(-5) =C=> SetLiftHeight(1) =C=> forward_to_cube_1a\n # \n # forward_to_cube_1a: self.ForwardToCube(15) =C=> success\n # \n # turn_to_cube2: self.TurnToCube(check_vis=True)\n # turn_to_cube2 =F=> Print(\"TurnToCube2: Cube Lost\") =N=> self.InvalidatePose() =N=> failure\n # turn_to_cube2 =C=> forward_to_cube2\n # \n # forward_to_cube2: self.ForwardToCube(60)\n # forward_to_cube2 =C=> turn_to_cube3\n # forward_to_cube2 =F=> failure\n # \n # turn_to_cube3: self.TurnToCube(check_vis=False) # can't fail\n # turn_to_cube3 =C=> exit_to_roll2\n # turn_to_cube3 =F=> failure\n # \n # exit_to_roll2: self.ReadyToRoll()\n # exit_to_roll2 =F=> forward_to_cube3\n # exit_to_roll2 =S=> Forward(-5) =C=> SetLiftHeight(1) =C=> forward_to_cube3\n # \n # forward_to_cube3: self.ForwardToCube(20) =C=> success\n # \n # success: Print('GoToSide has succeeded.') =N=> ParentCompletes()\n # \n # failure: Print('GoToSide has failed.') =N=> check_cube_pose\n # \n # check_cube_pose: self.CheckCubePoseValid()\n # check_cube_pose =C=> choose_another_side\n # check_cube_pose =F=> ParentFails()\n # \n # choose_another_side: self.ChooseAnotherSide()\n # choose_another_side =F=> ParentFails()\n # choose_another_side =S=> droplift\n \n # Code generated by genfsm on Sat Feb 25 01:50:19 2023:\n \n droplift = SetLiftHeight(0) .set_name(\"droplift\") .set_parent(self)\n after_drop = StateNode() .set_name(\"after_drop\") .set_parent(self)\n looker = LookAtObject() .set_name(\"looker\") .set_parent(self)\n waitlift = StateNode() .set_name(\"waitlift\") .set_parent(self)\n monitor_cube_pose = self.CheckCubePoseValid() .set_name(\"monitor_cube_pose\") .set_parent(self)\n statenode1 = StateNode() .set_name(\"statenode1\") .set_parent(self)\n parentfails1 = ParentFails() .set_name(\"parentfails1\") .set_parent(self)\n check_almost_docked = self.CheckAlmostDocked() .set_name(\"check_almost_docked\") .set_parent(self)\n setup_manual_dock = Forward(-10) .set_name(\"setup_manual_dock\") .set_parent(self)\n manual_dock1 = self.ManualDock1() .set_name(\"manual_dock1\") .set_parent(self)\n turn1 = Turn() .set_name(\"turn1\") .set_parent(self)\n print1 = Print('turned. wait...') .set_name(\"print1\") .set_parent(self)\n print2 = Print('wait...') .set_name(\"print2\") .set_parent(self)\n print3 = Print('Cannot manual dock from here') .set_name(\"print3\") .set_parent(self)\n manual_dock2 = self.ManualDock2() .set_name(\"manual_dock2\") .set_parent(self)\n print4 = Print('wait...') .set_name(\"print4\") .set_parent(self)\n pilot_check_start = PilotCheckStart() .set_name(\"pilot_check_start\") .set_parent(self)\n print5 = Print('Start collision check passed.') .set_name(\"print5\") .set_parent(self)\n print6 = Print('Backing up to escape start collision...') .set_name(\"print6\") .set_parent(self)\n backup_for_escape = Forward(-80) .set_name(\"backup_for_escape\") .set_parent(self)\n pilot_check_start2 = PilotCheckStart() .set_name(\"pilot_check_start2\") .set_parent(self)\n print7 = Print('Start collision re-check passed.') .set_name(\"print7\") .set_parent(self)\n check_start2_pilot = ParentPilotEvent() .set_name(\"check_start2_pilot\") .set_parent(self)\n go_side = self.PilotToSide() .set_name(\"go_side\") .set_parent(self)\n go_side_pilot = ParentPilotEvent() .set_name(\"go_side_pilot\") .set_parent(self)\n reportposition1 = self.ReportPosition('go_side_deccel') .set_name(\"reportposition1\") .set_parent(self)\n reportposition2 = self.ReportPosition('go_side_stopped') .set_name(\"reportposition2\") .set_parent(self)\n turn_to_cube1 = self.TurnToCube(check_vis=True) .set_name(\"turn_to_cube1\") .set_parent(self)\n reportposition3 = self.ReportPosition('turn_to_cube1_deccel') .set_name(\"reportposition3\") .set_parent(self)\n reportposition4 = self.ReportPosition('turn_to_cube1_stopped') .set_name(\"reportposition4\") .set_parent(self)\n print8 = Print('wait to approach...') .set_name(\"print8\") .set_parent(self)\n recover1 = Forward(-50) .set_name(\"recover1\") .set_parent(self)\n approach = self.ForwardToCube(60) .set_name(\"approach\") .set_parent(self)\n statenode2 = StateNode() .set_name(\"statenode2\") .set_parent(self)\n reportposition5 = self.ReportPosition('approach') .set_name(\"reportposition5\") .set_parent(self)\n reportposition6 = self.ReportPosition('approach') .set_name(\"reportposition6\") .set_parent(self)\n turn_to_cube_1a = self.TurnToCube(check_vis=False) .set_name(\"turn_to_cube_1a\") .set_parent(self)\n print9 = Print('wait...') .set_name(\"print9\") .set_parent(self)\n exit_to_roll = self.ReadyToRoll() .set_name(\"exit_to_roll\") .set_parent(self)\n forward1 = Forward(-5) .set_name(\"forward1\") .set_parent(self)\n setliftheight1 = SetLiftHeight(1) .set_name(\"setliftheight1\") .set_parent(self)\n forward_to_cube_1a = self.ForwardToCube(15) .set_name(\"forward_to_cube_1a\") .set_parent(self)\n turn_to_cube2 = self.TurnToCube(check_vis=True) .set_name(\"turn_to_cube2\") .set_parent(self)\n print10 = Print(\"TurnToCube2: Cube Lost\") .set_name(\"print10\") .set_parent(self)\n invalidatepose1 = self.InvalidatePose() .set_name(\"invalidatepose1\") .set_parent(self)\n forward_to_cube2 = self.ForwardToCube(60) .set_name(\"forward_to_cube2\") .set_parent(self)\n turn_to_cube3 = self.TurnToCube(check_vis=False) .set_name(\"turn_to_cube3\") .set_parent(self)\n exit_to_roll2 = self.ReadyToRoll() .set_name(\"exit_to_roll2\") .set_parent(self)\n forward2 = Forward(-5) .set_name(\"forward2\") .set_parent(self)\n setliftheight2 = SetLiftHeight(1) .set_name(\"setliftheight2\") .set_parent(self)\n forward_to_cube3 = self.ForwardToCube(20) .set_name(\"forward_to_cube3\") .set_parent(self)\n success = Print('GoToSide has succeeded.') .set_name(\"success\") .set_parent(self)\n parentcompletes1 = ParentCompletes() .set_name(\"parentcompletes1\") .set_parent(self)\n failure = Print('GoToSide has failed.') .set_name(\"failure\") .set_parent(self)\n check_cube_pose = self.CheckCubePoseValid() .set_name(\"check_cube_pose\") .set_parent(self)\n parentfails2 = ParentFails() .set_name(\"parentfails2\") .set_parent(self)\n choose_another_side = self.ChooseAnotherSide() .set_name(\"choose_another_side\") .set_parent(self)\n parentfails3 = ParentFails() .set_name(\"parentfails3\") .set_parent(self)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(droplift) .add_destinations(after_drop)\n \n failuretrans1 = FailureTrans() .set_name(\"failuretrans1\")\n failuretrans1 .add_sources(droplift) .add_destinations(after_drop)\n \n nulltrans1 = NullTrans() .set_name(\"nulltrans1\")\n nulltrans1 .add_sources(after_drop) .add_destinations(looker,waitlift,monitor_cube_pose)\n \n timertrans1 = TimerTrans(1) .set_name(\"timertrans1\")\n timertrans1 .add_sources(waitlift) .add_destinations(check_almost_docked)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(monitor_cube_pose) .add_destinations(statenode1)\n \n timertrans2 = TimerTrans(1) .set_name(\"timertrans2\")\n timertrans2 .add_sources(statenode1) .add_destinations(monitor_cube_pose)\n \n failuretrans2 = FailureTrans() .set_name(\"failuretrans2\")\n failuretrans2 .add_sources(monitor_cube_pose) .add_destinations(parentfails1)\n \n completiontrans3 = CompletionTrans() .set_name(\"completiontrans3\")\n completiontrans3 .add_sources(check_almost_docked) .add_destinations(turn_to_cube2)\n \n successtrans1 = SuccessTrans() .set_name(\"successtrans1\")\n successtrans1 .add_sources(check_almost_docked) .add_destinations(setup_manual_dock)\n \n failuretrans3 = FailureTrans() .set_name(\"failuretrans3\")\n failuretrans3 .add_sources(check_almost_docked) .add_destinations(pilot_check_start)\n \n completiontrans4 = CompletionTrans() .set_name(\"completiontrans4\")\n completiontrans4 .add_sources(setup_manual_dock) .add_destinations(manual_dock1)\n \n failuretrans4 = FailureTrans() .set_name(\"failuretrans4\")\n failuretrans4 .add_sources(setup_manual_dock) .add_destinations(failure)\n \n datatrans1 = DataTrans() .set_name(\"datatrans1\")\n datatrans1 .add_sources(manual_dock1) .add_destinations(turn1)\n \n completiontrans5 = CompletionTrans() .set_name(\"completiontrans5\")\n completiontrans5 .add_sources(turn1) .add_destinations(print1)\n \n nulltrans2 = NullTrans() .set_name(\"nulltrans2\")\n nulltrans2 .add_sources(print1) .add_destinations(manual_dock1)\n \n completiontrans6 = CompletionTrans() .set_name(\"completiontrans6\")\n completiontrans6 .add_sources(manual_dock1) .add_destinations(print2)\n \n nulltrans3 = NullTrans() .set_name(\"nulltrans3\")\n nulltrans3 .add_sources(print2) .add_destinations(manual_dock2)\n \n timertrans3 = TimerTrans(10) .set_name(\"timertrans3\")\n timertrans3 .add_sources(manual_dock1) .add_destinations(print3)\n \n nulltrans4 = NullTrans() .set_name(\"nulltrans4\")\n nulltrans4 .add_sources(print3) .add_destinations(pilot_check_start)\n \n completiontrans7 = CompletionTrans() .set_name(\"completiontrans7\")\n completiontrans7 .add_sources(manual_dock2) .add_destinations(print4)\n \n nulltrans5 = NullTrans() .set_name(\"nulltrans5\")\n nulltrans5 .add_sources(print4) .add_destinations(turn_to_cube1)\n \n failuretrans5 = FailureTrans() .set_name(\"failuretrans5\")\n failuretrans5 .add_sources(manual_dock2) .add_destinations(failure)\n \n successtrans2 = SuccessTrans() .set_name(\"successtrans2\")\n successtrans2 .add_sources(pilot_check_start) .add_destinations(print5)\n \n nulltrans6 = NullTrans() .set_name(\"nulltrans6\")\n nulltrans6 .add_sources(print5) .add_destinations(go_side)\n \n failuretrans6 = FailureTrans() .set_name(\"failuretrans6\")\n failuretrans6 .add_sources(pilot_check_start) .add_destinations(print6)\n \n nulltrans7 = NullTrans() .set_name(\"nulltrans7\")\n nulltrans7 .add_sources(print6) .add_destinations(backup_for_escape)\n \n completiontrans8 = CompletionTrans() .set_name(\"completiontrans8\")\n completiontrans8 .add_sources(backup_for_escape) .add_destinations(pilot_check_start2)\n \n failuretrans7 = FailureTrans() .set_name(\"failuretrans7\")\n failuretrans7 .add_sources(backup_for_escape) .add_destinations(failure)\n \n successtrans3 = SuccessTrans() .set_name(\"successtrans3\")\n successtrans3 .add_sources(pilot_check_start2) .add_destinations(print7)\n \n nulltrans8 = NullTrans() .set_name(\"nulltrans8\")\n nulltrans8 .add_sources(print7) .add_destinations(go_side)\n \n pilottrans1 = PilotTrans(StartCollides) .set_name(\"pilottrans1\")\n pilottrans1 .add_sources(pilot_check_start2) .add_destinations(check_start2_pilot)\n \n nulltrans9 = NullTrans() .set_name(\"nulltrans9\")\n nulltrans9 .add_sources(check_start2_pilot) .add_destinations(failure)\n \n failuretrans8 = FailureTrans() .set_name(\"failuretrans8\")\n failuretrans8 .add_sources(pilot_check_start2) .add_destinations(failure)\n \n pilottrans2 = PilotTrans(GoalCollides) .set_name(\"pilottrans2\")\n pilottrans2 .add_sources(go_side) .add_destinations(failure)\n \n pilottrans3 = PilotTrans(MaxIterations) .set_name(\"pilottrans3\")\n pilottrans3 .add_sources(go_side) .add_destinations(failure)\n \n pilottrans4 = PilotTrans() .set_name(\"pilottrans4\")\n pilottrans4 .add_sources(go_side) .add_destinations(go_side_pilot)\n \n nulltrans10 = NullTrans() .set_name(\"nulltrans10\")\n nulltrans10 .add_sources(go_side_pilot) .add_destinations(failure)\n \n failuretrans9 = FailureTrans() .set_name(\"failuretrans9\")\n failuretrans9 .add_sources(go_side) .add_destinations(failure)\n \n completiontrans9 = CompletionTrans() .set_name(\"completiontrans9\")\n completiontrans9 .add_sources(go_side) .add_destinations(reportposition1)\n \n timertrans4 = TimerTrans(0.75) .set_name(\"timertrans4\")\n timertrans4 .add_sources(reportposition1) .add_destinations(reportposition2)\n \n nulltrans11 = NullTrans() .set_name(\"nulltrans11\")\n nulltrans11 .add_sources(reportposition2) .add_destinations(turn_to_cube1)\n \n completiontrans10 = CompletionTrans() .set_name(\"completiontrans10\")\n completiontrans10 .add_sources(turn_to_cube1) .add_destinations(reportposition3)\n \n timertrans5 = TimerTrans(0.75) .set_name(\"timertrans5\")\n timertrans5 .add_sources(reportposition3) .add_destinations(reportposition4)\n \n nulltrans12 = NullTrans() .set_name(\"nulltrans12\")\n nulltrans12 .add_sources(reportposition4) .add_destinations(print8)\n \n nulltrans13 = NullTrans() .set_name(\"nulltrans13\")\n nulltrans13 .add_sources(print8) .add_destinations(approach)\n \n failuretrans10 = FailureTrans() .set_name(\"failuretrans10\")\n failuretrans10 .add_sources(turn_to_cube1) .add_destinations(recover1)\n \n completiontrans11 = CompletionTrans() .set_name(\"completiontrans11\")\n completiontrans11 .add_sources(recover1) .add_destinations(turn_to_cube2)\n \n failuretrans11 = FailureTrans() .set_name(\"failuretrans11\")\n failuretrans11 .add_sources(recover1) .add_destinations(failure)\n \n completiontrans12 = CompletionTrans() .set_name(\"completiontrans12\")\n completiontrans12 .add_sources(approach) .add_destinations(statenode2)\n \n timertrans6 = TimerTrans(0.75) .set_name(\"timertrans6\")\n timertrans6 .add_sources(statenode2) .add_destinations(reportposition5)\n \n timertrans7 = TimerTrans(0.75) .set_name(\"timertrans7\")\n timertrans7 .add_sources(reportposition5) .add_destinations(reportposition6)\n \n nulltrans14 = NullTrans() .set_name(\"nulltrans14\")\n nulltrans14 .add_sources(reportposition6) .add_destinations(turn_to_cube_1a)\n \n completiontrans13 = CompletionTrans() .set_name(\"completiontrans13\")\n completiontrans13 .add_sources(turn_to_cube_1a) .add_destinations(print9)\n \n nulltrans15 = NullTrans() .set_name(\"nulltrans15\")\n nulltrans15 .add_sources(print9) .add_destinations(exit_to_roll)\n \n failuretrans12 = FailureTrans() .set_name(\"failuretrans12\")\n failuretrans12 .add_sources(turn_to_cube_1a) .add_destinations(failure)\n \n failuretrans13 = FailureTrans() .set_name(\"failuretrans13\")\n failuretrans13 .add_sources(approach) .add_destinations(failure)\n \n failuretrans14 = FailureTrans() .set_name(\"failuretrans14\")\n failuretrans14 .add_sources(exit_to_roll) .add_destinations(forward_to_cube_1a)\n \n successtrans4 = SuccessTrans() .set_name(\"successtrans4\")\n successtrans4 .add_sources(exit_to_roll) .add_destinations(forward1)\n \n completiontrans14 = CompletionTrans() .set_name(\"completiontrans14\")\n completiontrans14 .add_sources(forward1) .add_destinations(setliftheight1)\n \n completiontrans15 = CompletionTrans() .set_name(\"completiontrans15\")\n completiontrans15 .add_sources(setliftheight1) .add_destinations(forward_to_cube_1a)\n \n completiontrans16 = CompletionTrans() .set_name(\"completiontrans16\")\n completiontrans16 .add_sources(forward_to_cube_1a) .add_destinations(success)\n \n failuretrans15 = FailureTrans() .set_name(\"failuretrans15\")\n failuretrans15 .add_sources(turn_to_cube2) .add_destinations(print10)\n \n nulltrans16 = NullTrans() .set_name(\"nulltrans16\")\n nulltrans16 .add_sources(print10) .add_destinations(invalidatepose1)\n \n nulltrans17 = NullTrans() .set_name(\"nulltrans17\")\n nulltrans17 .add_sources(invalidatepose1) .add_destinations(failure)\n \n completiontrans17 = CompletionTrans() .set_name(\"completiontrans17\")\n completiontrans17 .add_sources(turn_to_cube2) .add_destinations(forward_to_cube2)\n \n completiontrans18 = CompletionTrans() .set_name(\"completiontrans18\")\n completiontrans18 .add_sources(forward_to_cube2) .add_destinations(turn_to_cube3)\n \n failuretrans16 = FailureTrans() .set_name(\"failuretrans16\")\n failuretrans16 .add_sources(forward_to_cube2) .add_destinations(failure)\n \n completiontrans19 = CompletionTrans() .set_name(\"completiontrans19\")\n completiontrans19 .add_sources(turn_to_cube3) .add_destinations(exit_to_roll2)\n \n failuretrans17 = FailureTrans() .set_name(\"failuretrans17\")\n failuretrans17 .add_sources(turn_to_cube3) .add_destinations(failure)\n \n failuretrans18 = FailureTrans() .set_name(\"failuretrans18\")\n failuretrans18 .add_sources(exit_to_roll2) .add_destinations(forward_to_cube3)\n \n successtrans5 = SuccessTrans() .set_name(\"successtrans5\")\n successtrans5 .add_sources(exit_to_roll2) .add_destinations(forward2)\n \n completiontrans20 = CompletionTrans() .set_name(\"completiontrans20\")\n completiontrans20 .add_sources(forward2) .add_destinations(setliftheight2)\n \n completiontrans21 = CompletionTrans() .set_name(\"completiontrans21\")\n completiontrans21 .add_sources(setliftheight2) .add_destinations(forward_to_cube3)\n \n completiontrans22 = CompletionTrans() .set_name(\"completiontrans22\")\n completiontrans22 .add_sources(forward_to_cube3) .add_destinations(success)\n \n nulltrans18 = NullTrans() .set_name(\"nulltrans18\")\n nulltrans18 .add_sources(success) .add_destinations(parentcompletes1)\n \n nulltrans19 = NullTrans() .set_name(\"nulltrans19\")\n nulltrans19 .add_sources(failure) .add_destinations(check_cube_pose)\n \n completiontrans23 = CompletionTrans() .set_name(\"completiontrans23\")\n completiontrans23 .add_sources(check_cube_pose) .add_destinations(choose_another_side)\n \n failuretrans19 = FailureTrans() .set_name(\"failuretrans19\")\n failuretrans19 .add_sources(check_cube_pose) .add_destinations(parentfails2)\n \n failuretrans20 = FailureTrans() .set_name(\"failuretrans20\")\n failuretrans20 .add_sources(choose_another_side) .add_destinations(parentfails3)\n \n successtrans6 = SuccessTrans() .set_name(\"successtrans6\")\n successtrans6 .add_sources(choose_another_side) .add_destinations(droplift)\n \n return self\n\nclass SetCarrying(StateNode):\n def __init__(self,objparam=None):\n self.objparam = objparam\n self.object = None\n super().__init__()\n\n def start(self, event=None):\n if self.objparam is not None:\n self.object = self.objparam\n else:\n self.object = self.parent.object\n if isinstance(self.object, LightCube):\n self.wmobject = self.object.wm_obj\n elif isinstance(self.object, LightCubeObj):\n self.wmobject = self.object\n self.object = self.object.sdk_obj\n else:\n raise ValueError(self.object)\n self.robot.carrying = self.wmobject\n self.robot.fetching = None\n self.wmobject.update_from_sdk = False\n self.wmobject.pose_confidence = +1\n super().start(event)\n self.post_completion()\n\nclass SetNotCarrying(StateNode):\n def start(self,event=None):\n self.robot.carrying = None\n self.robot.fetching = None\n super().start(event)\n self.post_completion()\n\nclass CheckCarrying(StateNode):\n def start(self, event=None):\n super().start(event)\n if self.robot.carrying:\n self.post_success()\n else:\n self.post_failure()\n\nclass SetFetching(StateNode):\n \"Prevents pose invalidation if we bump the cube while trying to pick it up.\"\n def __init__(self,objparam=None):\n self.objparam = objparam\n self.object = None\n super().__init__()\n\n def start(self, event=None):\n super().start(event)\n if self.objparam is not None:\n self.object = self.objparam\n else:\n self.object = self.parent.object\n if isinstance(self.object, LightCube):\n try:\n self.wmobject = self.object.wm_obj\n except:\n self.wmobject = None\n elif isinstance(self.object, LightCubeObj):\n self.wmobject = self.object\n self.object = self.object.sdk_obj\n else:\n raise ValueError(self.object)\n if self.wmobject:\n self.robot.fetching = self.wmobject\n self.post_completion()\n else:\n self.post_failure()\n\n\nclass SetNotFetching(StateNode):\n def start(self,event=None):\n super().start(event)\n self.robot.fetching = None\n self.post_completion()\n\n\nclass PickUpCube(StateNode):\n \"\"\"Pick up a cube using our own dock and verify routines.\n Set self.object to indicate the cube to be picked up.\"\"\"\n\n class VerifyPickup(StateNode):\n def probe_column(self, im, col, row_start, row_end):\n \"\"\"\n Probe one column of the image, looking for the top horizontal\n black bar of the cube marker. This bar should be 23-32 pixels\n thick. Use adaptive thresholding by sorting the pixels and\n finding the darkest ones to set the black threshold.\n \"\"\"\n pixels = [float(im[r,col,0]) for r in range(row_start,row_end)]\n #print('Column ',col,':',sep='')\n #[print('%4d' % i,end='') for i in pixels]\n pixels.sort()\n npix = len(pixels)\n bindex = 1\n bsum = pixels[0]\n bmax = pixels[0]\n bcnt = 1\n windex = npix-2\n wsum = pixels[npix-1]\n wmin = pixels[npix-1]\n wcnt = 1\n while bindex < windex:\n if abs(bmax-pixels[bindex]) < abs(wmin-pixels[windex]):\n i = bindex\n bindex += 1\n else:\n i = windex\n windex -= 1\n bmean = bsum / bcnt\n wmean = wsum / wcnt\n val = pixels[i]\n if abs(val-bmean) < abs(val-wmean):\n bsum += val\n bcnt += 1\n bmax = max(bmax,val)\n else:\n wsum += val\n wcnt +=1\n wmin = min(wmin,val)\n black_thresh = bmax\n index = row_start\n nrows = im.shape[0]\n black_run_length = 0\n # initial white run\n while index < nrows and im[index,col,0] > black_thresh:\n index += 1\n if index == nrows: return -1\n while index < nrows and im[index,col,0] <= black_thresh:\n black_run_length += 1\n index +=1\n if index >= nrows-5:\n retval = -1\n else:\n retval = black_run_length\n #print(' col=%3d wmin=%5.1f wmean=%5.1f bmean=%5.1f black_thresh=%5.1f run_length=%d' %\n # (col, wmin, wmean, bmean, black_thresh, black_run_length))\n return retval\n\n def start(self,event=None):\n super().start(event)\n im = np.array(self.robot.world.latest_image.raw_image)\n min_length = 20\n max_length = 32\n bad_runs = 0\n print('Verifying pickup. hangle=%4.1f deg. langle=%4.1f deg. lheight=%4.1f mm' %\n (self.robot.head_angle.degrees, self.robot.lift_angle.degrees,\n self.robot.lift_height.distance_mm))\n columns = (100, 110, 120, 200, 210, 220) # avoid the screw in the center\n for col in columns: # range(100,220,20):\n run_length = self.probe_column(im, col, 8, 100)\n if run_length < min_length or run_length > max_length:\n bad_runs += 1\n print(' Number of bad_runs:', bad_runs)\n if bad_runs < 2:\n self.post_success()\n else:\n self.post_failure()\n\n # end of class VerifyPickup\n\n # PickUpCube methods\n\n def __init__(self, cube=None):\n self.cube = cube\n super().__init__()\n\n def picked_up_handler(self):\n print(\"PickUpCube aborting because robot was picked up.\")\n self.post_failure()\n\n def start(self, event=None):\n if isinstance(self.cube, LightCube):\n self.object = self.cube\n try:\n self.wmobject = self.object.wm_obj\n except:\n self.wmobject = None\n elif isinstance(self.cube, LightCubeObj):\n self.wmobject = self.cube\n self.object = self.cube.sdk_obj\n elif isinstance(self.object, LightCube):\n try:\n self.wmobject = self.object.wm_obj\n except:\n self.wmobject = None\n elif isinstance(self.object, LightCubeObj):\n self.wmobject = self.object\n self.object = self.object.sdk_obj\n else:\n raise ValueError(self.object)\n super().start(event)\n if not (self.object.pose and self.object.pose.is_valid and\n self.wmobject and not self.wmobject.pose_confidence < 0):\n print('PickUpCube: cube has invalid pose!', self.object, self.object.pose)\n self.post_event(PilotEvent(InvalidPose))\n self.post_failure()\n return\n self.children['goto_cube'].object = self.object\n print('Picking up',self.wmobject)\n\n def setup(self):\n # # PickUpCube machine\n # \n # fetch: SetFetching() =C=> check_carry\n # \n # check_carry: CheckCarrying()\n # check_carry =S=> DropObject() =C=> goto_cube\n # check_carry =F=> goto_cube\n # \n # goto_cube: GoToCube()\n # goto_cube =C=> AbortHeadAction() =T(0.1) => # clear head track\n # {raise_lift, raise_head}\n # goto_cube =PILOT=> goto_cube_pilot: ParentPilotEvent() =N=> pilot_frustrated\n # goto_cube =F=> pilot_frustrated\n # \n # raise_lift: SetLiftHeight(0.4)\n # \n # raise_head: SetHeadAngle(5)\n # raise_head =C=> wait_head: AbortHeadAction() =C=> StateNode() =T(0.2)=> raise_head2\n # raise_head =F=> wait_head\n # raise_head2: SetHeadAngle(0, num_retries=2)\n # raise_head2 =F=> verify\n # \n # {raise_lift, raise_head2} =C=> verify\n # \n # verify: self.VerifyPickup()\n # verify =S=> set_carrying\n # verify =F=> StateNode() =T(0.5)=> verify2\n # \n # verify2: self.VerifyPickup()\n # verify2 =S=> set_carrying\n # verify2 =F=> StateNode() =T(0.2)=> frustrated # Time delay before animation\n # \n # # verify3 is dead code\n # verify3: self.VerifyPickup()\n # verify3 =S=> set_carrying\n # verify3 =F=> frustrated\n # \n # set_carrying: SetCarrying() =N=> StateNode() =T(0.2)=> satisfied # Time delay before animation\n # \n # satisfied: AnimationTriggerNode(trigger=cozmo.anim.Triggers.ReactToBlockPickupSuccess,\n # ignore_body_track=True,\n # ignore_head_track=True,\n # ignore_lift_track=True)\n # satisfied =C=> {final_raise, drop_head}\n # satisfied =F=> StopAllMotors() =T(1)=> {final_raise, drop_head} # in case of tracks locked error\n # \n # final_raise: SetLiftHeight(1.0)\n # drop_head: SetHeadAngle(0)\n # \n # {final_raise, drop_head} =C=> ParentCompletes()\n # \n # frustrated: AnimationTriggerNode(trigger=cozmo.anim.Triggers.FrustratedByFailure,\n # ignore_body_track=True,\n # ignore_head_track=True,\n # ignore_lift_track=True) =C=> missed_cube\n # frustrated =F=> StopAllMotors() =T(1)=> missed_cube\n # \n # missed_cube: SetNotCarrying() =C=> Forward(-5) =C=> {drop_lift, drop_head_low}\n # \n # drop_lift: SetLiftHeight(0)\n # drop_lift =C=> backupmore\n # drop_lift =F=> backupmore\n # \n # backupmore: Forward(-5)\n # \n # drop_head_low: SetHeadAngle(-20)\n # \n # {backupmore, drop_head_low} =C=> fail\n # \n # pilot_frustrated: PilotFrustration() =C=> fail\n # \n # fail: SetNotFetching() =C=> ParentFails()\n # \n \n # Code generated by genfsm on Sat Feb 25 01:50:19 2023:\n \n fetch = SetFetching() .set_name(\"fetch\") .set_parent(self)\n check_carry = CheckCarrying() .set_name(\"check_carry\") .set_parent(self)\n dropobject1 = DropObject() .set_name(\"dropobject1\") .set_parent(self)\n goto_cube = GoToCube() .set_name(\"goto_cube\") .set_parent(self)\n abortheadaction1 = AbortHeadAction() .set_name(\"abortheadaction1\") .set_parent(self)\n goto_cube_pilot = ParentPilotEvent() .set_name(\"goto_cube_pilot\") .set_parent(self)\n raise_lift = SetLiftHeight(0.4) .set_name(\"raise_lift\") .set_parent(self)\n raise_head = SetHeadAngle(5) .set_name(\"raise_head\") .set_parent(self)\n wait_head = AbortHeadAction() .set_name(\"wait_head\") .set_parent(self)\n statenode3 = StateNode() .set_name(\"statenode3\") .set_parent(self)\n raise_head2 = SetHeadAngle(0, num_retries=2) .set_name(\"raise_head2\") .set_parent(self)\n verify = self.VerifyPickup() .set_name(\"verify\") .set_parent(self)\n statenode4 = StateNode() .set_name(\"statenode4\") .set_parent(self)\n verify2 = self.VerifyPickup() .set_name(\"verify2\") .set_parent(self)\n statenode5 = StateNode() .set_name(\"statenode5\") .set_parent(self)\n verify3 = self.VerifyPickup() .set_name(\"verify3\") .set_parent(self)\n set_carrying = SetCarrying() .set_name(\"set_carrying\") .set_parent(self)\n statenode6 = StateNode() .set_name(\"statenode6\") .set_parent(self)\n satisfied = AnimationTriggerNode(trigger=cozmo.anim.Triggers.ReactToBlockPickupSuccess,\n ignore_body_track=True,\n ignore_head_track=True,\n ignore_lift_track=True) .set_name(\"satisfied\") .set_parent(self)\n stopallmotors1 = StopAllMotors() .set_name(\"stopallmotors1\") .set_parent(self)\n final_raise = SetLiftHeight(1.0) .set_name(\"final_raise\") .set_parent(self)\n drop_head = SetHeadAngle(0) .set_name(\"drop_head\") .set_parent(self)\n parentcompletes2 = ParentCompletes() .set_name(\"parentcompletes2\") .set_parent(self)\n frustrated = AnimationTriggerNode(trigger=cozmo.anim.Triggers.FrustratedByFailure,\n ignore_body_track=True,\n ignore_head_track=True,\n ignore_lift_track=True) .set_name(\"frustrated\") .set_parent(self)\n stopallmotors2 = StopAllMotors() .set_name(\"stopallmotors2\") .set_parent(self)\n missed_cube = SetNotCarrying() .set_name(\"missed_cube\") .set_parent(self)\n forward3 = Forward(-5) .set_name(\"forward3\") .set_parent(self)\n drop_lift = SetLiftHeight(0) .set_name(\"drop_lift\") .set_parent(self)\n backupmore = Forward(-5) .set_name(\"backupmore\") .set_parent(self)\n drop_head_low = SetHeadAngle(-20) .set_name(\"drop_head_low\") .set_parent(self)\n pilot_frustrated = PilotFrustration() .set_name(\"pilot_frustrated\") .set_parent(self)\n fail = SetNotFetching() .set_name(\"fail\") .set_parent(self)\n parentfails4 = ParentFails() .set_name(\"parentfails4\") .set_parent(self)\n \n completiontrans24 = CompletionTrans() .set_name(\"completiontrans24\")\n completiontrans24 .add_sources(fetch) .add_destinations(check_carry)\n \n successtrans7 = SuccessTrans() .set_name(\"successtrans7\")\n successtrans7 .add_sources(check_carry) .add_destinations(dropobject1)\n \n completiontrans25 = CompletionTrans() .set_name(\"completiontrans25\")\n completiontrans25 .add_sources(dropobject1) .add_destinations(goto_cube)\n \n failuretrans21 = FailureTrans() .set_name(\"failuretrans21\")\n failuretrans21 .add_sources(check_carry) .add_destinations(goto_cube)\n \n completiontrans26 = CompletionTrans() .set_name(\"completiontrans26\")\n completiontrans26 .add_sources(goto_cube) .add_destinations(abortheadaction1)\n \n timertrans8 = TimerTrans(0.1) .set_name(\"timertrans8\")\n timertrans8 .add_sources(abortheadaction1) .add_destinations(raise_lift,raise_head)\n \n pilottrans5 = PilotTrans() .set_name(\"pilottrans5\")\n pilottrans5 .add_sources(goto_cube) .add_destinations(goto_cube_pilot)\n \n nulltrans20 = NullTrans() .set_name(\"nulltrans20\")\n nulltrans20 .add_sources(goto_cube_pilot) .add_destinations(pilot_frustrated)\n \n failuretrans22 = FailureTrans() .set_name(\"failuretrans22\")\n failuretrans22 .add_sources(goto_cube) .add_destinations(pilot_frustrated)\n \n completiontrans27 = CompletionTrans() .set_name(\"completiontrans27\")\n completiontrans27 .add_sources(raise_head) .add_destinations(wait_head)\n \n completiontrans28 = CompletionTrans() .set_name(\"completiontrans28\")\n completiontrans28 .add_sources(wait_head) .add_destinations(statenode3)\n \n timertrans9 = TimerTrans(0.2) .set_name(\"timertrans9\")\n timertrans9 .add_sources(statenode3) .add_destinations(raise_head2)\n \n failuretrans23 = FailureTrans() .set_name(\"failuretrans23\")\n failuretrans23 .add_sources(raise_head) .add_destinations(wait_head)\n \n failuretrans24 = FailureTrans() .set_name(\"failuretrans24\")\n failuretrans24 .add_sources(raise_head2) .add_destinations(verify)\n \n completiontrans29 = CompletionTrans() .set_name(\"completiontrans29\")\n completiontrans29 .add_sources(raise_lift,raise_head2) .add_destinations(verify)\n \n successtrans8 = SuccessTrans() .set_name(\"successtrans8\")\n successtrans8 .add_sources(verify) .add_destinations(set_carrying)\n \n failuretrans25 = FailureTrans() .set_name(\"failuretrans25\")\n failuretrans25 .add_sources(verify) .add_destinations(statenode4)\n \n timertrans10 = TimerTrans(0.5) .set_name(\"timertrans10\")\n timertrans10 .add_sources(statenode4) .add_destinations(verify2)\n \n successtrans9 = SuccessTrans() .set_name(\"successtrans9\")\n successtrans9 .add_sources(verify2) .add_destinations(set_carrying)\n \n failuretrans26 = FailureTrans() .set_name(\"failuretrans26\")\n failuretrans26 .add_sources(verify2) .add_destinations(statenode5)\n \n timertrans11 = TimerTrans(0.2) .set_name(\"timertrans11\")\n timertrans11 .add_sources(statenode5) .add_destinations(frustrated)\n \n successtrans10 = SuccessTrans() .set_name(\"successtrans10\")\n successtrans10 .add_sources(verify3) .add_destinations(set_carrying)\n \n failuretrans27 = FailureTrans() .set_name(\"failuretrans27\")\n failuretrans27 .add_sources(verify3) .add_destinations(frustrated)\n \n nulltrans21 = NullTrans() .set_name(\"nulltrans21\")\n nulltrans21 .add_sources(set_carrying) .add_destinations(statenode6)\n \n timertrans12 = TimerTrans(0.2) .set_name(\"timertrans12\")\n timertrans12 .add_sources(statenode6) .add_destinations(satisfied)\n \n completiontrans30 = CompletionTrans() .set_name(\"completiontrans30\")\n completiontrans30 .add_sources(satisfied) .add_destinations(final_raise,drop_head)\n \n failuretrans28 = FailureTrans() .set_name(\"failuretrans28\")\n failuretrans28 .add_sources(satisfied) .add_destinations(stopallmotors1)\n \n timertrans13 = TimerTrans(1) .set_name(\"timertrans13\")\n timertrans13 .add_sources(stopallmotors1) .add_destinations(final_raise,drop_head)\n \n completiontrans31 = CompletionTrans() .set_name(\"completiontrans31\")\n completiontrans31 .add_sources(final_raise,drop_head) .add_destinations(parentcompletes2)\n \n completiontrans32 = CompletionTrans() .set_name(\"completiontrans32\")\n completiontrans32 .add_sources(frustrated) .add_destinations(missed_cube)\n \n failuretrans29 = FailureTrans() .set_name(\"failuretrans29\")\n failuretrans29 .add_sources(frustrated) .add_destinations(stopallmotors2)\n \n timertrans14 = TimerTrans(1) .set_name(\"timertrans14\")\n timertrans14 .add_sources(stopallmotors2) .add_destinations(missed_cube)\n \n completiontrans33 = CompletionTrans() .set_name(\"completiontrans33\")\n completiontrans33 .add_sources(missed_cube) .add_destinations(forward3)\n \n completiontrans34 = CompletionTrans() .set_name(\"completiontrans34\")\n completiontrans34 .add_sources(forward3) .add_destinations(drop_lift,drop_head_low)\n \n completiontrans35 = CompletionTrans() .set_name(\"completiontrans35\")\n completiontrans35 .add_sources(drop_lift) .add_destinations(backupmore)\n \n failuretrans30 = FailureTrans() .set_name(\"failuretrans30\")\n failuretrans30 .add_sources(drop_lift) .add_destinations(backupmore)\n \n completiontrans36 = CompletionTrans() .set_name(\"completiontrans36\")\n completiontrans36 .add_sources(backupmore,drop_head_low) .add_destinations(fail)\n \n completiontrans37 = CompletionTrans() .set_name(\"completiontrans37\")\n completiontrans37 .add_sources(pilot_frustrated) .add_destinations(fail)\n \n completiontrans38 = CompletionTrans() .set_name(\"completiontrans38\")\n completiontrans38 .add_sources(fail) .add_destinations(parentfails4)\n \n return self\n\nclass DropObject(StateNode):\n\n class SetObject(StateNode):\n def start(self,event=None):\n super().start(event)\n self.parent.object = self.robot.carrying\n\n class CheckCubeVisible(StateNode):\n def start(self,event=None):\n super().start(event)\n for cube in self.robot.world.light_cubes.values():\n if cube and cube.is_visible:\n self.post_completion()\n return\n self.post_failure()\n\n def setup(self):\n # # DropObject machine\n # \n # SetLiftHeight(0) =C=> check_carrying\n # \n # check_carrying: CheckCarrying()\n # check_carrying =F=> {backup, lookdown}\n # check_carrying =S=> self.SetObject() =N=>\n # SetNotCarrying() =N=> SetFetching() =N=> {backup, lookdown}\n # \n # backup: Forward(-15)\n # \n # # Robots differ on head angle alignment, so try a shallow angle,\n # # and if we don't see the cube, try a steeper one.\n # lookdown: SetHeadAngle(-12)\n # lookdown =F=> head_angle_wait # Shouldn't fail, but just in case\n # \n # {backup, lookdown} =C=> head_angle_wait\n # \n # head_angle_wait: StateNode() =T(0.5)=> check_visible\n # \n # check_visible: self.CheckCubeVisible()\n # check_visible =C=> wrap_up\n # check_visible =F=> lookdown2\n # \n # # Try a lower head angle, but keep going even if we don't see the object\n # lookdown2: SetHeadAngle(-20)\n # lookdown2 =F=> wrap_up # Shouldn't fail, but just in case\n # lookdown2 =T(0.5)=> wrap_up\n # \n # wrap_up: SetNotFetching() =N=> ParentCompletes()\n \n # Code generated by genfsm on Sat Feb 25 01:50:19 2023:\n \n setliftheight3 = SetLiftHeight(0) .set_name(\"setliftheight3\") .set_parent(self)\n check_carrying = CheckCarrying() .set_name(\"check_carrying\") .set_parent(self)\n setobject1 = self.SetObject() .set_name(\"setobject1\") .set_parent(self)\n setnotcarrying1 = SetNotCarrying() .set_name(\"setnotcarrying1\") .set_parent(self)\n setfetching1 = SetFetching() .set_name(\"setfetching1\") .set_parent(self)\n backup = Forward(-15) .set_name(\"backup\") .set_parent(self)\n lookdown = SetHeadAngle(-12) .set_name(\"lookdown\") .set_parent(self)\n head_angle_wait = StateNode() .set_name(\"head_angle_wait\") .set_parent(self)\n check_visible = self.CheckCubeVisible() .set_name(\"check_visible\") .set_parent(self)\n lookdown2 = SetHeadAngle(-20) .set_name(\"lookdown2\") .set_parent(self)\n wrap_up = SetNotFetching() .set_name(\"wrap_up\") .set_parent(self)\n parentcompletes3 = ParentCompletes() .set_name(\"parentcompletes3\") .set_parent(self)\n \n completiontrans39 = CompletionTrans() .set_name(\"completiontrans39\")\n completiontrans39 .add_sources(setliftheight3) .add_destinations(check_carrying)\n \n failuretrans31 = FailureTrans() .set_name(\"failuretrans31\")\n failuretrans31 .add_sources(check_carrying) .add_destinations(backup,lookdown)\n \n successtrans11 = SuccessTrans() .set_name(\"successtrans11\")\n successtrans11 .add_sources(check_carrying) .add_destinations(setobject1)\n \n nulltrans22 = NullTrans() .set_name(\"nulltrans22\")\n nulltrans22 .add_sources(setobject1) .add_destinations(setnotcarrying1)\n \n nulltrans23 = NullTrans() .set_name(\"nulltrans23\")\n nulltrans23 .add_sources(setnotcarrying1) .add_destinations(setfetching1)\n \n nulltrans24 = NullTrans() .set_name(\"nulltrans24\")\n nulltrans24 .add_sources(setfetching1) .add_destinations(backup,lookdown)\n \n failuretrans32 = FailureTrans() .set_name(\"failuretrans32\")\n failuretrans32 .add_sources(lookdown) .add_destinations(head_angle_wait)\n \n completiontrans40 = CompletionTrans() .set_name(\"completiontrans40\")\n completiontrans40 .add_sources(backup,lookdown) .add_destinations(head_angle_wait)\n \n timertrans15 = TimerTrans(0.5) .set_name(\"timertrans15\")\n timertrans15 .add_sources(head_angle_wait) .add_destinations(check_visible)\n \n completiontrans41 = CompletionTrans() .set_name(\"completiontrans41\")\n completiontrans41 .add_sources(check_visible) .add_destinations(wrap_up)\n \n failuretrans33 = FailureTrans() .set_name(\"failuretrans33\")\n failuretrans33 .add_sources(check_visible) .add_destinations(lookdown2)\n \n failuretrans34 = FailureTrans() .set_name(\"failuretrans34\")\n failuretrans34 .add_sources(lookdown2) .add_destinations(wrap_up)\n \n timertrans16 = TimerTrans(0.5) .set_name(\"timertrans16\")\n timertrans16 .add_sources(lookdown2) .add_destinations(wrap_up)\n \n nulltrans25 = NullTrans() .set_name(\"nulltrans25\")\n nulltrans25 .add_sources(wrap_up) .add_destinations(parentcompletes3)\n \n return self\n\n\nclass PivotCube(StateNode):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n super().start(event)\n\n def setup(self):\n # put_arm: SetLiftHeight(0.68)\n # put_arm =C=> {drop, back}\n # put_arm =F=> ParentFails()\n # \n # drop: SetLiftHeight(0.28)\n # back: Forward(-60)\n # \n # drop =F=> ParentFails()\n # back =F=> ParentFails()\n # \n # {drop, back} =C=> reset\n # {drop, back} =F=> ParentFails()\n # \n # reset: SetLiftHeight(0)\n # reset =C=> Forward(-10) =C=> ParentCompletes()\n # reset =F=> ParentFails()\n \n # Code generated by genfsm on Sat Feb 25 01:50:19 2023:\n \n put_arm = SetLiftHeight(0.68) .set_name(\"put_arm\") .set_parent(self)\n parentfails5 = ParentFails() .set_name(\"parentfails5\") .set_parent(self)\n drop = SetLiftHeight(0.28) .set_name(\"drop\") .set_parent(self)\n back = Forward(-60) .set_name(\"back\") .set_parent(self)\n parentfails6 = ParentFails() .set_name(\"parentfails6\") .set_parent(self)\n parentfails7 = ParentFails() .set_name(\"parentfails7\") .set_parent(self)\n parentfails8 = ParentFails() .set_name(\"parentfails8\") .set_parent(self)\n reset = SetLiftHeight(0) .set_name(\"reset\") .set_parent(self)\n forward4 = Forward(-10) .set_name(\"forward4\") .set_parent(self)\n parentcompletes4 = ParentCompletes() .set_name(\"parentcompletes4\") .set_parent(self)\n parentfails9 = ParentFails() .set_name(\"parentfails9\") .set_parent(self)\n \n completiontrans42 = CompletionTrans() .set_name(\"completiontrans42\")\n completiontrans42 .add_sources(put_arm) .add_destinations(drop,back)\n \n failuretrans35 = FailureTrans() .set_name(\"failuretrans35\")\n failuretrans35 .add_sources(put_arm) .add_destinations(parentfails5)\n \n failuretrans36 = FailureTrans() .set_name(\"failuretrans36\")\n failuretrans36 .add_sources(drop) .add_destinations(parentfails6)\n \n failuretrans37 = FailureTrans() .set_name(\"failuretrans37\")\n failuretrans37 .add_sources(back) .add_destinations(parentfails7)\n \n completiontrans43 = CompletionTrans() .set_name(\"completiontrans43\")\n completiontrans43 .add_sources(drop,back) .add_destinations(reset)\n \n failuretrans38 = FailureTrans() .set_name(\"failuretrans38\")\n failuretrans38 .add_sources(drop,back) .add_destinations(parentfails8)\n \n completiontrans44 = CompletionTrans() .set_name(\"completiontrans44\")\n completiontrans44 .add_sources(reset) .add_destinations(forward4)\n \n completiontrans45 = CompletionTrans() .set_name(\"completiontrans45\")\n completiontrans45 .add_sources(forward4) .add_destinations(parentcompletes4)\n \n failuretrans39 = FailureTrans() .set_name(\"failuretrans39\")\n failuretrans39 .add_sources(reset) .add_destinations(parentfails9)\n \n return self\n\n\nclass RollingCube(StateNode):\n def __init__(self, object=None, orientation=None, old_object=None):\n super().__init__()\n self.object = object\n self.target_orientation = orientation\n self.old_object = old_object\n self.try_roll_number = 0\n\n\n def start(self, event=None):\n if isinstance(self.object, LightCube):\n try:\n self.wmobject = self.object.wm_obj\n except:\n self.wmobject = None\n elif isinstance(self.object, LightCubeObj):\n self.wmobject = self.object\n self.object = self.object.sdk_obj\n else:\n raise ValueError(self.object)\n self.children['looker'].object = self.object\n self.children['goto_cube'].object = self.object\n self.children['goto_cube'].roll_cube = True\n self.children['goto_cube'].roll_target = self.target_orientation\n self.children['roll1'].object = self.object\n self.children['roll2'].object = self.object\n self.children['roll3'].object = self.object\n super().start(event)\n if (not self.wmobject) or self.wmobject.pose_confidence < 0:\n print('RollingCube: cube has invalid pose!', self.object, self.object.pose)\n self.post_event(PilotEvent(InvalidPose))\n self.post_failure()\n\n\n def try_again(self):\n if self.try_roll_number > 2:\n self.try_roll_number = 0\n print('Reach max trying number.')\n return False\n else:\n self.try_roll_number += 1\n print('try again', self.try_roll_number)\n return True\n\n\n class CheckOrientation(StateNode):\n def start(self,event=None):\n self.parent.orientation, _, _, _ = get_orientation_state(self.parent.object.pose.rotation.q0_q1_q2_q3)\n super().start(event)\n if self.parent.target_orientation == self.parent.orientation != None:\n print('rolled it successfully to', self.parent.target_orientation)\n self.post_success()\n elif not self.parent.target_orientation:\n if not self.parent.old_object or self.parent.old_object.wm_obj.id != self.parent.object.wm_obj.id:\n self.parent.old_object = copy.copy(self.parent.object)\n self.post_completion()\n elif same_orientation(self.parent.old_object, self.parent.object):\n print('failed: still the same orientation')\n self.post_failure()\n else:\n self.parent.old_object = copy.copy(self.parent.object)\n print('rolled it successfully')\n self.post_success()\n else:\n if not self.parent.old_object:\n self.parent.old_object = copy.copy(self.parent.object)\n self.post_completion()\n else:\n print('failed: orientation is not', self.parent.target_orientation)\n self.post_failure()\n\n class CheckCubePoseValidOnce(StateNode):\n def __init__(self, check_vis=False, reset=False):\n self.check_vis = check_vis\n self.reset = reset\n super().__init__()\n\n def start(self, event=None):\n super().start(event)\n\n if isinstance(self.parent.object, LightCube):\n cube_id = self.parent.object.wm_obj.id\n elif isinstance(self.parent.object, LightCubeObj):\n cube_id = self.parent.object.id\n else:\n raise ValueError(self.parent.object)\n\n if 'Cube' not in str(cube_id):\n cube_id = 'Cube-' + str(cube_id)\n wmobject = self.robot.world.world_map.objects[cube_id]\n\n if self.check_vis and not self.parent.object.is_visible:\n if self.reset:\n self.parent.wmobject.pose_confidence = -1\n print('CheckCubePoseValidOnce: %s has invalid pose!' % cube_id)\n self.post_failure()\n elif self.parent.wmobject.pose_confidence < 0:\n print('pose_confidence', self.parent.wmobject.pose_confidence)\n self.post_failure()\n else:\n self.post_completion()\n\n class CheckCounts(StateNode):\n def start(self,event=None):\n SIDEWAYS_FRONT = 'front'\n SIDEWAYS_BACK = 'back'\n SIDEWAYS_SIDE = 'side'\n\n super().start(event)\n if not self.parent.target_orientation:\n print('no target_orientation')\n self.post_data(1)\n elif self.parent.orientation == ORIENTATION_UPRIGHT or self.parent.orientation == ORIENTATION_INVERTED:\n if self.parent.target_orientation == ORIENTATION_SIDEWAYS:\n self.post_data(1)\n elif self.parent.orientation == self.parent.target_orientation:\n self.post_data(3)\n else:\n self.post_data(2)\n else: # ORIENTATION_SIDEWAYS\n x, y, z = self.parent.object.pose.rotation.euler_angles\n robot_angle = self.robot.pose.rotation.angle_z.radians\n pattern = get_pattern_number(self.parent.object.pose.rotation.euler_angles)\n if pattern == 1:\n cube_rotate_angle = wrap_angle(x - pi/2)\n new_robot_angle = wrap_angle(robot_angle + cube_rotate_angle)\n elif pattern == 2:\n cube_rotate_angle = wrap_angle(y + pi)\n new_robot_angle = wrap_angle(robot_angle + cube_rotate_angle)\n elif pattern == 3:\n cube_rotate_angle = wrap_angle(- (x + pi/2))\n new_robot_angle = wrap_angle(robot_angle + cube_rotate_angle)\n elif pattern == 4:\n cube_rotate_angle = y\n new_robot_angle = wrap_angle(robot_angle + cube_rotate_angle)\n else:\n print('Unrecognized pattern.')\n cube_rotate_angle = 0\n new_robot_angle = 0\n\n possible_angles = [-pi, -pi/2, 0, pi/2, pi]\n facing_side = None\n new_robot_angle = min(possible_angles, key=lambda val:abs(val-new_robot_angle))\n if new_robot_angle == 0:\n facing_side = SIDEWAYS_FRONT\n elif abs(new_robot_angle) == pi:\n facing_side = SIDEWAYS_BACK\n else:\n facing_side = SIDEWAYS_SIDE\n\n # print('new_robot_angle: %.2f' % new_robot_angle)\n print('Robot is facing', facing_side)\n\n if facing_side == SIDEWAYS_SIDE: # left/right\n if self.parent.target_orientation == ORIENTATION_SIDEWAYS:\n self.post_data(1)\n else:\n self.post_data(0)\n elif facing_side == SIDEWAYS_FRONT: # front\n if self.parent.target_orientation == ORIENTATION_INVERTED:\n self.post_data(1)\n elif self.parent.target_orientation == ORIENTATION_SIDEWAYS:\n self.post_data(2)\n else:\n self.post_data(3)\n elif facing_side == SIDEWAYS_BACK: # back\n if self.parent.target_orientation == ORIENTATION_UPRIGHT:\n self.post_data(1)\n elif self.parent.target_orientation == ORIENTATION_SIDEWAYS:\n self.post_data(2)\n else:\n self.post_data(3)\n\n\n class TryAgain(StateNode):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n super().start(event)\n if self.parent.try_again():\n self.post_failure()\n else:\n self.post_success()\n\n\n class ForwardToCube(Forward):\n def __init__(self, offset):\n self.offset = offset\n super().__init__()\n\n def start(self, event=None):\n if self.running: return\n cube = self.parent.object\n dx = cube.pose.position.x - self.robot.pose.position.x\n dy = cube.pose.position.y - self.robot.pose.position.y\n dist = sqrt(dx*dx + dy*dy) - self.offset\n if (dist < 0):\n print('***** ForwardToCube %s negative distance: %.1f mm' % (self.name,dist))\n self.distance = Distance(dist)\n print('ForwardToCube %s: distance %.1f mm' % (self.name, self.distance.distance_mm))\n super().start(event)\n\n\n def setup(self):\n # # RollingCube machine\n # \n # start: SetFetching() =C=> {looker, check_cube_pose}\n # \n # looker: LookAtObject()\n # \n # check_cube_pose: self.CheckCubePoseValidOnce(check_vis=False)\n # check_cube_pose =C=> check_orientation\n # check_cube_pose =F=> fail\n # \n # check_orientation: self.CheckOrientation()\n # check_orientation =C=> goto_cube\n # check_orientation =S=> StateNode() =T(0.5)=> satisfied\n # check_orientation =F=> StateNode() =T(0.5)=> frustrated\n # \n # satisfied: AnimationTriggerNode(trigger=cozmo.anim.Triggers.RollBlockSuccess,\n # ignore_body_track=True,\n # ignore_head_track=True,\n # ignore_lift_track=True)\n # \n # satisfied =C=> ParentCompletes()\n # satisfied =F=> StopAllMotors() =T(1)=> ParentCompletes()\n # \n # frustrated: AnimationTriggerNode(trigger=cozmo.anim.Triggers.FrustratedByFailure,\n # ignore_body_track=True,\n # ignore_head_track=True,\n # ignore_lift_track=True)\n # frustrated =C=> goto_cube\n # frustrated =F=> StopAllMotors() =T(1)=> goto_cube\n # \n # goto_cube: GoToCube()\n # goto_cube =F=> Print('goto_cube has failed.') =N=> try_again\n # goto_cube =C=> Print('goto_cube has succeeded.') =N=> decide_roll_counts\n # \n # decide_roll_counts: self.CheckCounts()\n # decide_roll_counts =D(0)=> Print('No way to achieve from this side') =C=> check_cube_pose_valid\n # decide_roll_counts =D(1)=> Print('Roll once') =C=> roll1\n # decide_roll_counts =D(2)=> Print('Roll twice') =C=> roll2\n # decide_roll_counts =D(3)=> Print('Roll thrice') =C=> roll3\n # \n # check_cube_pose_valid: self.CheckCubePoseValidOnce(check_vis=True)\n # check_cube_pose_valid =C=> check_orientation\n # check_cube_pose_valid =F=> setup_check_again\n # \n # setup_check_again: Forward(-30)\n # setup_check_again =C=> Print('backing up...') =T(1)=> check_cube_pose_valid2\n # setup_check_again =F=> try_again\n # \n # check_cube_pose_valid2:self.CheckCubePoseValidOnce(check_vis=True, reset=True)\n # check_cube_pose_valid2 =C=> check_orientation\n # check_cube_pose_valid2 =F=> try_again\n # \n # roll1: PivotCube()\n # roll1 =C=> Forward(-30) =C=> Print('Checking the new orientation...') =T(1)=> check_cube_pose_valid\n # roll1 =F=> try_again\n # \n # roll2: PivotCube()\n # roll2 =C=> SetLiftHeight(1) =C=> self.ForwardToCube(15) =C=> roll1\n # roll2 =F=> try_again\n # \n # roll3: PivotCube()\n # roll3 =C=> SetLiftHeight(1) =C=> self.ForwardToCube(15) =C=> roll2\n # roll3 =F=> try_again\n # \n # try_again: self.TryAgain()\n # try_again =S=> check_cube_pose_valid\n # try_again =F=> fail\n # \n # fail: SetNotFetching() =C=> ParentFails()\n \n # Code generated by genfsm on Sat Feb 25 01:50:19 2023:\n \n start = SetFetching() .set_name(\"start\") .set_parent(self)\n looker = LookAtObject() .set_name(\"looker\") .set_parent(self)\n check_cube_pose = self.CheckCubePoseValidOnce(check_vis=False) .set_name(\"check_cube_pose\") .set_parent(self)\n check_orientation = self.CheckOrientation() .set_name(\"check_orientation\") .set_parent(self)\n statenode7 = StateNode() .set_name(\"statenode7\") .set_parent(self)\n statenode8 = StateNode() .set_name(\"statenode8\") .set_parent(self)\n satisfied = AnimationTriggerNode(trigger=cozmo.anim.Triggers.RollBlockSuccess,\n ignore_body_track=True,\n ignore_head_track=True,\n ignore_lift_track=True) .set_name(\"satisfied\") .set_parent(self)\n parentcompletes5 = ParentCompletes() .set_name(\"parentcompletes5\") .set_parent(self)\n stopallmotors3 = StopAllMotors() .set_name(\"stopallmotors3\") .set_parent(self)\n parentcompletes6 = ParentCompletes() .set_name(\"parentcompletes6\") .set_parent(self)\n frustrated = AnimationTriggerNode(trigger=cozmo.anim.Triggers.FrustratedByFailure,\n ignore_body_track=True,\n ignore_head_track=True,\n ignore_lift_track=True) .set_name(\"frustrated\") .set_parent(self)\n stopallmotors4 = StopAllMotors() .set_name(\"stopallmotors4\") .set_parent(self)\n goto_cube = GoToCube() .set_name(\"goto_cube\") .set_parent(self)\n print11 = Print('goto_cube has failed.') .set_name(\"print11\") .set_parent(self)\n print12 = Print('goto_cube has succeeded.') .set_name(\"print12\") .set_parent(self)\n decide_roll_counts = self.CheckCounts() .set_name(\"decide_roll_counts\") .set_parent(self)\n print13 = Print('No way to achieve from this side') .set_name(\"print13\") .set_parent(self)\n print14 = Print('Roll once') .set_name(\"print14\") .set_parent(self)\n print15 = Print('Roll twice') .set_name(\"print15\") .set_parent(self)\n print16 = Print('Roll thrice') .set_name(\"print16\") .set_parent(self)\n check_cube_pose_valid = self.CheckCubePoseValidOnce(check_vis=True) .set_name(\"check_cube_pose_valid\") .set_parent(self)\n setup_check_again = Forward(-30) .set_name(\"setup_check_again\") .set_parent(self)\n print17 = Print('backing up...') .set_name(\"print17\") .set_parent(self)\n check_cube_pose_valid2 = self.CheckCubePoseValidOnce(check_vis=True, reset=True) .set_name(\"check_cube_pose_valid2\") .set_parent(self)\n roll1 = PivotCube() .set_name(\"roll1\") .set_parent(self)\n forward5 = Forward(-30) .set_name(\"forward5\") .set_parent(self)\n print18 = Print('Checking the new orientation...') .set_name(\"print18\") .set_parent(self)\n roll2 = PivotCube() .set_name(\"roll2\") .set_parent(self)\n setliftheight4 = SetLiftHeight(1) .set_name(\"setliftheight4\") .set_parent(self)\n forwardtocube1 = self.ForwardToCube(15) .set_name(\"forwardtocube1\") .set_parent(self)\n roll3 = PivotCube() .set_name(\"roll3\") .set_parent(self)\n setliftheight5 = SetLiftHeight(1) .set_name(\"setliftheight5\") .set_parent(self)\n forwardtocube2 = self.ForwardToCube(15) .set_name(\"forwardtocube2\") .set_parent(self)\n try_again = self.TryAgain() .set_name(\"try_again\") .set_parent(self)\n fail = SetNotFetching() .set_name(\"fail\") .set_parent(self)\n parentfails10 = ParentFails() .set_name(\"parentfails10\") .set_parent(self)\n \n completiontrans46 = CompletionTrans() .set_name(\"completiontrans46\")\n completiontrans46 .add_sources(start) .add_destinations(looker,check_cube_pose)\n \n completiontrans47 = CompletionTrans() .set_name(\"completiontrans47\")\n completiontrans47 .add_sources(check_cube_pose) .add_destinations(check_orientation)\n \n failuretrans40 = FailureTrans() .set_name(\"failuretrans40\")\n failuretrans40 .add_sources(check_cube_pose) .add_destinations(fail)\n \n completiontrans48 = CompletionTrans() .set_name(\"completiontrans48\")\n completiontrans48 .add_sources(check_orientation) .add_destinations(goto_cube)\n \n successtrans12 = SuccessTrans() .set_name(\"successtrans12\")\n successtrans12 .add_sources(check_orientation) .add_destinations(statenode7)\n \n timertrans17 = TimerTrans(0.5) .set_name(\"timertrans17\")\n timertrans17 .add_sources(statenode7) .add_destinations(satisfied)\n \n failuretrans41 = FailureTrans() .set_name(\"failuretrans41\")\n failuretrans41 .add_sources(check_orientation) .add_destinations(statenode8)\n \n timertrans18 = TimerTrans(0.5) .set_name(\"timertrans18\")\n timertrans18 .add_sources(statenode8) .add_destinations(frustrated)\n \n completiontrans49 = CompletionTrans() .set_name(\"completiontrans49\")\n completiontrans49 .add_sources(satisfied) .add_destinations(parentcompletes5)\n \n failuretrans42 = FailureTrans() .set_name(\"failuretrans42\")\n failuretrans42 .add_sources(satisfied) .add_destinations(stopallmotors3)\n \n timertrans19 = TimerTrans(1) .set_name(\"timertrans19\")\n timertrans19 .add_sources(stopallmotors3) .add_destinations(parentcompletes6)\n \n completiontrans50 = CompletionTrans() .set_name(\"completiontrans50\")\n completiontrans50 .add_sources(frustrated) .add_destinations(goto_cube)\n \n failuretrans43 = FailureTrans() .set_name(\"failuretrans43\")\n failuretrans43 .add_sources(frustrated) .add_destinations(stopallmotors4)\n \n timertrans20 = TimerTrans(1) .set_name(\"timertrans20\")\n timertrans20 .add_sources(stopallmotors4) .add_destinations(goto_cube)\n \n failuretrans44 = FailureTrans() .set_name(\"failuretrans44\")\n failuretrans44 .add_sources(goto_cube) .add_destinations(print11)\n \n nulltrans26 = NullTrans() .set_name(\"nulltrans26\")\n nulltrans26 .add_sources(print11) .add_destinations(try_again)\n \n completiontrans51 = CompletionTrans() .set_name(\"completiontrans51\")\n completiontrans51 .add_sources(goto_cube) .add_destinations(print12)\n \n nulltrans27 = NullTrans() .set_name(\"nulltrans27\")\n nulltrans27 .add_sources(print12) .add_destinations(decide_roll_counts)\n \n datatrans2 = DataTrans(0) .set_name(\"datatrans2\")\n datatrans2 .add_sources(decide_roll_counts) .add_destinations(print13)\n \n completiontrans52 = CompletionTrans() .set_name(\"completiontrans52\")\n completiontrans52 .add_sources(print13) .add_destinations(check_cube_pose_valid)\n \n datatrans3 = DataTrans(1) .set_name(\"datatrans3\")\n datatrans3 .add_sources(decide_roll_counts) .add_destinations(print14)\n \n completiontrans53 = CompletionTrans() .set_name(\"completiontrans53\")\n completiontrans53 .add_sources(print14) .add_destinations(roll1)\n \n datatrans4 = DataTrans(2) .set_name(\"datatrans4\")\n datatrans4 .add_sources(decide_roll_counts) .add_destinations(print15)\n \n completiontrans54 = CompletionTrans() .set_name(\"completiontrans54\")\n completiontrans54 .add_sources(print15) .add_destinations(roll2)\n \n datatrans5 = DataTrans(3) .set_name(\"datatrans5\")\n datatrans5 .add_sources(decide_roll_counts) .add_destinations(print16)\n \n completiontrans55 = CompletionTrans() .set_name(\"completiontrans55\")\n completiontrans55 .add_sources(print16) .add_destinations(roll3)\n \n completiontrans56 = CompletionTrans() .set_name(\"completiontrans56\")\n completiontrans56 .add_sources(check_cube_pose_valid) .add_destinations(check_orientation)\n \n failuretrans45 = FailureTrans() .set_name(\"failuretrans45\")\n failuretrans45 .add_sources(check_cube_pose_valid) .add_destinations(setup_check_again)\n \n completiontrans57 = CompletionTrans() .set_name(\"completiontrans57\")\n completiontrans57 .add_sources(setup_check_again) .add_destinations(print17)\n \n timertrans21 = TimerTrans(1) .set_name(\"timertrans21\")\n timertrans21 .add_sources(print17) .add_destinations(check_cube_pose_valid2)\n \n failuretrans46 = FailureTrans() .set_name(\"failuretrans46\")\n failuretrans46 .add_sources(setup_check_again) .add_destinations(try_again)\n \n completiontrans58 = CompletionTrans() .set_name(\"completiontrans58\")\n completiontrans58 .add_sources(check_cube_pose_valid2) .add_destinations(check_orientation)\n \n failuretrans47 = FailureTrans() .set_name(\"failuretrans47\")\n failuretrans47 .add_sources(check_cube_pose_valid2) .add_destinations(try_again)\n \n completiontrans59 = CompletionTrans() .set_name(\"completiontrans59\")\n completiontrans59 .add_sources(roll1) .add_destinations(forward5)\n \n completiontrans60 = CompletionTrans() .set_name(\"completiontrans60\")\n completiontrans60 .add_sources(forward5) .add_destinations(print18)\n \n timertrans22 = TimerTrans(1) .set_name(\"timertrans22\")\n timertrans22 .add_sources(print18) .add_destinations(check_cube_pose_valid)\n \n failuretrans48 = FailureTrans() .set_name(\"failuretrans48\")\n failuretrans48 .add_sources(roll1) .add_destinations(try_again)\n \n completiontrans61 = CompletionTrans() .set_name(\"completiontrans61\")\n completiontrans61 .add_sources(roll2) .add_destinations(setliftheight4)\n \n completiontrans62 = CompletionTrans() .set_name(\"completiontrans62\")\n completiontrans62 .add_sources(setliftheight4) .add_destinations(forwardtocube1)\n \n completiontrans63 = CompletionTrans() .set_name(\"completiontrans63\")\n completiontrans63 .add_sources(forwardtocube1) .add_destinations(roll1)\n \n failuretrans49 = FailureTrans() .set_name(\"failuretrans49\")\n failuretrans49 .add_sources(roll2) .add_destinations(try_again)\n \n completiontrans64 = CompletionTrans() .set_name(\"completiontrans64\")\n completiontrans64 .add_sources(roll3) .add_destinations(setliftheight5)\n \n completiontrans65 = CompletionTrans() .set_name(\"completiontrans65\")\n completiontrans65 .add_sources(setliftheight5) .add_destinations(forwardtocube2)\n \n completiontrans66 = CompletionTrans() .set_name(\"completiontrans66\")\n completiontrans66 .add_sources(forwardtocube2) .add_destinations(roll2)\n \n failuretrans50 = FailureTrans() .set_name(\"failuretrans50\")\n failuretrans50 .add_sources(roll3) .add_destinations(try_again)\n \n successtrans13 = SuccessTrans() .set_name(\"successtrans13\")\n successtrans13 .add_sources(try_again) .add_destinations(check_cube_pose_valid)\n \n failuretrans51 = FailureTrans() .set_name(\"failuretrans51\")\n failuretrans51 .add_sources(try_again) .add_destinations(fail)\n \n completiontrans67 = CompletionTrans() .set_name(\"completiontrans67\")\n completiontrans67 .add_sources(fail) .add_destinations(parentfails10)\n \n return self\n\n\n\"\"\"\nclass PickUpCubeForeign(StateNode):\n\n # *** THIS IS OLD CODE AND NEEDS TO BE UPDATED ***\n\n def __init__(self, cube_id=None):\n self.object_id = cube_id\n super().__init__()\n\n def start(self, event=None):\n # self.object will be set up by the parent of this node\n self.object = self.robot.world.light_cubes[self.object_id]\n self.foreign_cube_id = 'LightCubeForeignObj-'+str(self.object_id)\n super().start(event)\n\n def pick_side(self, dist, use_world_map):\n # NOTE: This code is only correct for upright cubes\n cube = self.foreign_cube_id\n wobj = self.robot.world.world_map.objects[cube]\n x = wobj.x\n y = wobj.y\n ang = wobj.theta\n rx = self.robot.world.particle_filter.pose[0]\n ry = self.robot.world.particle_filter.pose[1]\n\n side1 = (x + cos(ang) * dist, y + sin(ang) * dist, ang + pi)\n side2 = (x - cos(ang) * dist, y - sin(ang) * dist, ang)\n side3 = (x + sin(ang) * dist, y - cos(ang) * dist, ang + pi/2)\n side4 = (x - sin(ang) * dist, y + cos(ang) * dist, ang - pi/2)\n sides = [side1, side2, side3, side4]\n sorted_sides = sorted(sides, key=lambda pt: (pt[0]-rx)**2 + (pt[1]-ry)**2)\n return sorted_sides[0]\n\n class GoToSide(WallPilotToPose):\n def __init__(self):\n super().__init__(None)\n\n def start(self, event=None):\n cube = self.parent.foreign_cube_id\n print('Selected cube',self.robot.world.world_map.objects[cube])\n (x, y, theta) = self.parent.pick_side(200, True)\n self.target_pose = Pose(x, y, self.robot.pose.position.z,\n angle_z=Angle(radians = wrap_angle(theta)))\n print('pickup.GoToSide: traveling to (%.1f, %.1f) @ %.1f deg.' %\n (self.target_pose.position.x, self.target_pose.position.y,\n self.target_pose.rotation.angle_z.degrees))\n super().start(event)\n\n class Pick(PickUpCube):\n def __init__(self):\n super().__init__(None)\n\n def start(self, event=None):\n self.object = self.parent.object\n super().start(event)\n\n #setup{ # PickUpCube machine\n goto_cube: self.GoToSide() =C=> one\n\n one: self.Pick() =C=> end\n end: Say('Done') =C=> ParentCompletes()\n }\n\"\"\"\n"
},
{
"alpha_fraction": 0.540644645690918,
"alphanum_fraction": 0.5834177732467651,
"avg_line_length": 43.44144058227539,
"blob_id": "9d39bde5b998124ef1075c32b43ff7534aa2c37b",
"content_id": "6bd28bb91ac61276e168cf15c6dc6f3b8a9b6021",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4933,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 111,
"path": "/cozmo_fsm/examples/CV_Hough.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n CV_Hough demonstrates OpenCV's HoughLines and probabilistic HoughLinesP\n primitives. The 'edges' window displays the results of a Canny edge operator\n that is the input to the Hough transform. The 'Hough' window shows the\n output of HoughLines with the given settings of the r and theta tolerances\n and minimum bin count (threshold). The 'HoughP' window shows the output of\n HoughLinesP using the r and theta values from the Hough window, plus the\n minLineLength and maxLineGap parameters and its own bin count threshold.\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom cozmo_fsm import *\n\nclass CV_Hough(StateMachineProgram):\n def __init__(self):\n super().__init__(aruco=False, particle_filter=False, cam_viewer=False,\n force_annotation=True, annotate_sdk=False)\n\n def start(self):\n cv2.namedWindow('gray')\n cv2.namedWindow('edges')\n cv2.namedWindow('Hough')\n cv2.namedWindow('HoughP')\n dummy = numpy.array([[0]], dtype='uint8')\n cv2.imshow('gray',dummy)\n cv2.imshow('edges',dummy)\n cv2.imshow('Hough',dummy)\n cv2.imshow('HoughP',dummy)\n\n self.h_lines = None\n self.p_lines = None\n\n cv2.createTrackbar('thresh1','edges',0,255,lambda self: None)\n cv2.createTrackbar('thresh2','edges',0,255,lambda self: None)\n cv2.setTrackbarPos('thresh1','edges',50)\n cv2.setTrackbarPos('thresh2','edges',150)\n\n cv2.createTrackbar('r_tol','Hough',1,10,lambda self: None)\n cv2.createTrackbar('deg_tol','Hough',1,18,lambda self: None)\n cv2.createTrackbar('h_thresh','Hough',1,250,lambda self: None)\n cv2.createTrackbar('h_main','Hough',0,1,lambda self: None)\n cv2.setTrackbarPos('r_tol','Hough',2)\n cv2.setTrackbarPos('deg_tol','Hough',2)\n cv2.setTrackbarPos('h_thresh','Hough',120)\n cv2.setTrackbarPos('h_main','Hough',0)\n\n cv2.createTrackbar('minLineLength','HoughP',1,80,lambda self: None)\n cv2.createTrackbar('maxLineGap','HoughP',1,50,lambda self: None)\n cv2.createTrackbar('p_thresh','HoughP',1,250,lambda self: None)\n cv2.setTrackbarPos('minLineLength','HoughP',40)\n cv2.setTrackbarPos('maxLineGap','HoughP',20)\n cv2.setTrackbarPos('p_thresh','HoughP',20)\n cv2.createTrackbar('p_main','HoughP',0,1,lambda self: None)\n cv2.setTrackbarPos('p_main','HoughP',0)\n super().start()\n\n def user_image(self,image,gray):\n self.gray = gray\n\n # Canny edge detector\n self.thresh1 = cv2.getTrackbarPos('thresh1','edges')\n self.thresh2 = cv2.getTrackbarPos('thresh2','edges')\n self.edges = cv2.Canny(gray, self.thresh1, self.thresh2, apertureSize=3)\n\n # regular Hough\n self.r_tol = max(0.1, cv2.getTrackbarPos('r_tol','Hough'))\n self.deg_tol = max(0.1, cv2.getTrackbarPos('deg_tol','Hough'))\n self.h_thresh = cv2.getTrackbarPos('h_thresh','Hough')\n self.h_lines = cv2.HoughLines(self.edges, self.r_tol,\n self.deg_tol/180.*np.pi,\n self.h_thresh)\n # probabilistic Hough\n self.p_thresh = cv2.getTrackbarPos('p_thresh','HoughP')\n self.minLineLength = cv2.getTrackbarPos('minLineLength','HoughP')\n self.maxLineGap = cv2.getTrackbarPos('maxLineGap','HoughP')\n self.p_lines = cv2.HoughLinesP(self.edges, self.r_tol, self.deg_tol/180.*np.pi,\n self.p_thresh, None,\n self.minLineLength, self.maxLineGap)\n\n def user_annotate(self,image):\n cv2.imshow('gray',self.gray)\n cv2.imshow('edges',self.edges)\n if self.h_lines is not None:\n hough_image = cv2.cvtColor(self.edges,cv2.COLOR_GRAY2BGR)\n h_main = cv2.getTrackbarPos('h_main','Hough')\n for line in self.h_lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*a)\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*a)\n cv2.line(hough_image,(x1,y1),(x2,y2),(0,255,0),1)\n if h_main:\n cv2.line(image,(2*x1,2*y1),(2*x2,2*y2),(0,255,0),2)\n cv2.imshow('Hough',hough_image)\n if self.p_lines is not None:\n houghp_image = cv2.cvtColor(self.edges,cv2.COLOR_GRAY2BGR)\n p_main = cv2.getTrackbarPos('p_main','HoughP')\n for line in self.p_lines:\n x1,y1,x2,y2 = line[0]\n cv2.line(houghp_image,(x1,y1),(x2,y2),(255,0,0),1)\n if p_main:\n cv2.line(image,(2*x1,2*y1),(2*x2,2*y2),(255,0,0),2)\n cv2.imshow('HoughP',houghp_image)\n cv2.waitKey(1)\n return image\n"
},
{
"alpha_fraction": 0.5297501087188721,
"alphanum_fraction": 0.5589383840560913,
"avg_line_length": 35.71709060668945,
"blob_id": "cb72651745b74c99b60e46afd58030168714c6ec",
"content_id": "5dd90cfb58dac3f11a835112dee9fee10ae40cf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37378,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 1018,
"path": "/cozmo_fsm/worldmap_viewer.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nOpenGL world viewer for cozmo_fsm world map.\n\"\"\"\n\nfrom math import sin, cos, atan2, pi, radians\nimport time\nimport array\nimport numpy as np\nimport platform as pf\n\ntry:\n from OpenGL.GLUT import *\n from OpenGL.GL import *\n from OpenGL.GLU import *\nexcept:\n pass\n\nfrom . import opengl\nfrom . import geometry\nfrom . import worldmap\n\nimport cozmo\nfrom cozmo.nav_memory_map import NodeContentTypes\n\nWINDOW = None\nEXCEPTION_COUNTER = 0\nDISPLAY_ENABLED = True\n\nhelp_text = \"\"\"\nWorld viewer keyboard commands:\n a Translate gazepoint left\n d Translate gazepoint right\n w Translate gazepoint forward\n s Translate gazepoint backward\n < Zoom in\n > Zoom out\n page-up Translate gazepoint up\n page-down Translate gazepoint down\n\n left-arrow Orbit camera left\n right-arrow Orbit camera right\n up-arrow Orbit camera upward\n down-arrow Orbit camera downward\n\n m Toggle memory map\n x Toggle axes\n z Reset to initial view\n v Toggle display of viewing parameters\n # Disable/enable automatic redisplay\n h Print help\n\"\"\"\n\nhelp_text_mac = \"\"\"\nWorld viewer keyboard commands:\n option + a Translate gazepoint left\n option + d Translate gazepoint right\n option + w Translate gazepoint forward\n option + s Translate gazepoint backward\n option + < Zoom in\n option + > Zoom out\n fn + up-arrow Translate gazepoint up\n fn + down-arrow Translate gazepoint down\n\n left-arrow Orbit camera left\n right-arrow Orbit camera right\n up-arrow Orbit camera upward\n down-arrow Orbit camera downward\n\n option + m Toggle memory map\n option + x Toggle axes\n option + z Reset to initial view\n option + v Toggle display of viewing parameters\n # Disable/enable automatic redisplay\n option + h Print help\n\"\"\"\n\ncube_vertices = array.array('f', [ \\\n -0.5, -0.5, +0.5, \\\n -0.5, +0.5, +0.5, \\\n +0.5, +0.5, +0.5, \\\n +0.5, -0.5, +0.5, \\\n -0.5, -0.5, -0.5, \\\n -0.5, +0.5, -0.5, \\\n +0.5, +0.5, -0.5, \\\n +0.5, -0.5, -0.5 \\\n ])\n\ncamera_vertices = array.array('f', [ \\\n -0.5, 0, 0, \\\n -0.5, 0, 0, \\\n +0.5, +0.5, +0.5, \\\n +0.5, -0.5, +0.5, \\\n -0.5, 0, 0, \\\n -0.5, 0, 0, \\\n +0.5, +0.5, -0.5, \\\n +0.5, -0.5, -0.5 \\\n ])\n\ncube_colors_0 = array.array('f', [ \\\n 0.6, 0.6, 0.0, \\\n 0.6, 0.6, 0.0, \\\n 0.0, 0.0, 0.7, \\\n 0.0, 0.0, 0.7, \\\n 0.7, 0.0, 0.0, \\\n 0.7, 0.0, 0.0, \\\n 0.0, 0.7, 0.0, \\\n 0.0, 0.7, 0.0, \\\n ])\n\ncube_colors_1 = array.array('f', [x/0.7 for x in cube_colors_0])\n\ncube_colors_2 = array.array('f', \\\n [0.8, 0.8, 0.0, \\\n 0.8, 0.8, 0.0, \\\n 0.0, 0.8, 0.8, \\\n 0.0, 0.8, 0.8, \\\n 0.8, 0.0, 0.8, \\\n 0.8, 0.0, 0.8, \\\n 0.9, 0.9, 0.9, \\\n 0.9, 0.9, 0.9 ])\n\ncolor_black = (0., 0., 0.)\ncolor_white = (1., 1., 1.)\ncolor_red = (1., 0., 0.)\ncolor_green = (0., 1., 0.)\ncolor_light_green = (0., 0.5, 0.)\ncolor_blue = (0., 0., 1.0)\ncolor_cyan = (0., 1.0, 1.0)\ncolor_yellow = (0.8, 0.8, 0.)\ncolor_orange = (1., 0.5, .063)\ncolor_gray = (0.5, 0.5, 0.5)\ncolor_light_gray = (0.65, 0.65, 0.65)\n\ncube_cIndices = array.array('B', \\\n [0, 3, 2, 1, \\\n 2, 3, 7, 6, \\\n 0, 4, 7, 3, \\\n 1, 2, 6, 5, \\\n 4, 5, 6, 7, \\\n 0, 1, 5, 4 ])\n\nlight_cube_size_mm = 44.3\n\nrobot_body_size_mm = ( 70, 56, 30)\nrobot_body_offset_mm = (-30, 0, 15)\nrobot_head_size_mm = ( 36, 39.4, 36)\nrobot_head_offset_mm = ( 20, 0, 36)\nlift_size_mm = ( 10, 50, 30)\nlift_arm_spacing_mm = 52\nlift_arm_len_mm = 66\nlift_arm_diam_mm = 10\n\n\ncharger_bed_size_mm = (104, 98, 10 )\ncharger_back_size_mm = ( 5, 90, 35 )\n\nwscale = 0.02 # millimeters to graphics window coordinates\n\naxis_length = 100\naxis_width = 1\nprint_camera = False\n\ninitial_fixation_point = [100, -25, 0]\ninitial_camera_rotation = [0, 40, 270]\ninitial_camera_distance = 500\n\nfixation_point = initial_fixation_point.copy()\ncamera_rotation = initial_camera_rotation.copy()\ncamera_distance = initial_camera_distance\ncamera_loc = (0., 0., 0.) # will be recomputed by display()\n\nclass WorldMapViewer():\n def __init__(self, robot, width=512, height=512,\n windowName = \"Cozmo's World\",\n bgcolor = (0,0,0)):\n self.robot = robot\n self.width = width\n self.height = height\n self.aspect = self.width/self.height\n self.windowName = windowName\n self.bgcolor = bgcolor\n self.translation = [0., 0.] # Translation in mm\n self.scale = 1\n self.show_axes = True\n self.show_memory_map = False\n\n def make_cube(self, size=(1,1,1), highlight=False, color=None, alpha=1.0, body=True, edges=True):\n \"\"\"Make a cube centered on the origin\"\"\"\n glEnableClientState(GL_VERTEX_ARRAY)\n if color is None:\n glEnableClientState(GL_COLOR_ARRAY)\n if highlight:\n glColorPointer(3, GL_FLOAT, 0, cube_colors_1.tobytes())\n else:\n glColorPointer(3, GL_FLOAT, 0, cube_colors_0.tobytes())\n else:\n if not highlight:\n s = 0.5 # scale down the brightness\n color = (color[0]*s, color[1]*s, color[2]*s)\n glColor4f(*color,alpha)\n verts = cube_vertices * 1; # copy the array\n for i in range(0,24,3):\n verts[i ] *= size[0]\n verts[i+1] *= size[1]\n verts[i+2] *= size[2]\n if body:\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n glVertexPointer(3, GL_FLOAT, 0, verts.tobytes())\n glDrawElements(GL_QUADS, 24, GL_UNSIGNED_BYTE, cube_cIndices.tobytes())\n if edges:\n # begin wireframe\n for i in range(0,24): verts[i] *= 1.02\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glVertexPointer(3, GL_FLOAT, 0, verts.tobytes())\n glDisableClientState(GL_COLOR_ARRAY)\n if body:\n if highlight:\n glColor4f(*color_white,1)\n else:\n glColor4f(*color_black,1)\n else:\n if highlight:\n glColor4f(*color,1)\n else:\n s = 0.7 # scale down the brightness if necessary\n glColor4f(color[0]*s, color[1]*s, color[2]*s, 1)\n glDrawElements(GL_QUADS, 24, GL_UNSIGNED_BYTE, cube_cIndices.tobytes())\n # end wireframe\n glDisableClientState(GL_COLOR_ARRAY)\n glDisableClientState(GL_VERTEX_ARRAY)\n\n def make_light_cube(self,cube_obj):\n global gl_lists\n lcube = cube_obj.sdk_obj\n cube_number = lcube.cube_id\n pos = (cube_obj.x, cube_obj.y, cube_obj.z)\n color = (None, color_red, color_green, color_blue)[cube_number]\n valid_pose = (lcube.pose.is_valid and cube_obj.pose_confidence >= 0) or \\\n self.robot.carrying is cube_obj\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(*pos)\n s = light_cube_size_mm\n glTranslatef(0,0,4) # keep cube from projecting through the floor\n if valid_pose:\n t = geometry.quat2rot(*lcube.pose.rotation.q0_q1_q2_q3)\n else:\n t = geometry.aboutZ(cube_obj.theta)\n t = t.transpose() # Transpose the matrix for sending to OpenGL\n rotmat = array.array('f',t.flatten()).tobytes()\n glMultMatrixf(rotmat)\n if valid_pose:\n # make solid cube and highlight if visible\n self.make_cube((s,s,s), highlight=lcube.is_visible, color=color)\n glRotatef(-90, 0., 0., 1.)\n glTranslatef(-s/4, -s/4, s/2+0.5)\n glScalef(0.25, 0.2, 0.25)\n glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN, ord(ascii(cube_number)))\n else:\n # make wireframe cube if coords no longer comparable\n pass # self.make_cube((s,s,s), body=False, highlight=True, color=color)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_custom_cube(self,custom_obj,obst):\n global gl_lists\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n pos = (obst.x, obst.y, obst.z)\n size = obst.size\n orient = obst.theta\n glPushMatrix()\n glTranslatef(pos[0], pos[1], max(pos[2],5))\n # Transpose the pose rotation matrix for sending to OpenCV\n if isinstance(custom_obj, cozmo.objects.CustomObject):\n t = geometry.quat2rot(*custom_obj.pose.rotation.q0_q1_q2_q3).transpose()\n else:\n t = geometry.identity()\n rotmat = array.array('f',t.flatten()).tobytes()\n glMultMatrixf(rotmat)\n comparable = True # obj.pose.origin_id == 0 or obj.pose.is_comparable(self.robot.pose)\n obj_color = color_orange\n highlight = custom_obj.is_visible\n if comparable:\n self.make_cube(size, highlight=highlight, color=obj_color)\n else:\n self.make_cube(size, body=False, highlight=False, color=obj_color)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_cylinder(self,radius=10,height=25, highlight=True, color=None):\n if color is None:\n color = (1,1,1)\n if len(color) == 3:\n color = (*color, 1)\n if not highlight:\n s = 0.5\n color = (color*s, color*s, color*s, color[3])\n glColor4f(*color)\n quadric = gluNewQuadric()\n gluQuadricOrientation(quadric, GLU_OUTSIDE)\n gluCylinder(quadric, radius, radius, height, 30, 20)\n glTranslatef(0, 0, height)\n gluDisk(quadric, 0, radius, 30, 1)\n\n # Draw the outline circles\n if highlight:\n color = (1, 1, 1, 1)\n else:\n color = (0, 0, 0, 1)\n\n r = radius + 0.1\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glColor4f(*color)\n glBegin(GL_LINE_LOOP)\n num_slices = 36\n for i in range(num_slices):\n theta = i * (360/num_slices) * (pi/180)\n glVertex3f(r * cos(theta), r*sin(theta), 0.)\n glEnd()\n glTranslatef(0, 0, -height)\n glBegin(GL_LINE_LOOP)\n for i in range(num_slices):\n theta = i * (360/num_slices) * (pi/180)\n glVertex3f(r * cos(theta), r*sin(theta), 0.)\n glEnd()\n\n\n def make_chip(self,chip):\n global gl_lists\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(chip.x, chip.y, chip.z)\n self.make_cylinder(chip.radius, chip.thickness,\n color=(0,0.8,0), highlight=True)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_face(self,face):\n global gl_lists\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(face.x, face.y, face.z)\n if face.is_visible:\n color = (0.0, 1.0, 0.0, 0.9)\n else:\n color = (0.0, 0.5, 0.0, 0.7)\n glColor4f(*color)\n quadric = gluNewQuadric()\n gluQuadricOrientation(quadric, GLU_OUTSIDE)\n glScalef(1.0, 1.0, 2.0)\n gluSphere(quadric, 100, 20, 10)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n\n def make_wall(self,wall_obj):\n global gl_lists\n wall_spec = worldmap.wall_marker_dict[wall_obj.id]\n half_length = wall_obj.length / 2\n half_height = wall_obj.height / 2\n door_height = wall_obj.door_height\n wall_thickness = 4.0\n widths = []\n last_x = -half_length\n edges = [ [0, -half_length, door_height/2, 1.] ]\n for (center,width) in wall_spec.doorways:\n left_edge = center - width/2 - half_length\n edges.append([0., left_edge, door_height/2, 1.])\n widths.append(left_edge - last_x)\n right_edge = center + width/2 - half_length\n edges.append([0., right_edge, door_height/2, 1.])\n last_x = right_edge\n edges.append([0., half_length, door_height/2, 1.])\n widths.append(half_length-last_x)\n edges = np.array(edges).T\n edges = geometry.aboutZ(wall_obj.theta).dot(edges)\n edges = geometry.translate(wall_obj.x,wall_obj.y).dot(edges)\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n if wall_obj.is_foreign:\n color = color_white\n else:\n color = color_yellow\n for i in range(0,len(widths)):\n center = edges[:, 2*i : 2*i+2].mean(1).reshape(4,1)\n dimensions=(wall_thickness, widths[i], wall_obj.door_height)\n glPushMatrix()\n glTranslatef(*center.flatten()[0:3])\n glRotatef(wall_obj.theta*180/pi, 0, 0, 1)\n self.make_cube(size=dimensions, color=color, highlight=True)\n glPopMatrix()\n # Make the transom\n glPushMatrix()\n transom_height = wall_obj.height - wall_obj.door_height\n z = wall_obj.door_height + transom_height/2\n glTranslatef(wall_obj.x, wall_obj.y, z)\n glRotatef(wall_obj.theta*180/pi, 0, 0, 1)\n self.make_cube(size=(wall_thickness, wall_obj.length, transom_height),\n edges=False, color=color, highlight=True)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_doorway(self,doorway):\n global gl_lists\n wall = doorway.wall\n spec = wall.doorways[doorway.index]\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(doorway.x, doorway.y, wall.door_height/2)\n glRotatef(doorway.theta*180/pi, 0, 0, 1)\n self.make_cube(size=(1, spec[1]-10, wall.door_height-10), edges=False,\n color=color_cyan, alpha=0.2, highlight=True)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_floor(self):\n global gl_lists\n floor_size = (2000, 2000, 1)\n blip = floor_size[2]\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(0., 0., -blip)\n self.make_cube(floor_size, highlight=None, color=color_gray)\n glTranslatef(0., 0., 2.*blip)\n glColor4f(*color_light_gray,1)\n for x in range(-floor_size[0]//2, floor_size[0]//2+1, 100):\n glBegin(GL_LINES)\n glVertex3f(x, floor_size[1]//2, 0)\n glVertex3f(x, -floor_size[1]//2, 0)\n glEnd()\n for y in range(-floor_size[1]//2, floor_size[1]//2+1, 100):\n glBegin(GL_LINES)\n glVertex3f( floor_size[0]/2, y, 0)\n glVertex3f(-floor_size[0]/2, y, 0)\n glEnd()\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_charger(self):\n charger = self.robot.world.charger\n if (not charger) or (not charger.pose) or not charger.pose.is_valid: return None\n comparable = charger.pose.is_comparable(self.robot.pose)\n highlight = charger.is_visible or (self.robot.is_on_charger and comparable)\n global gl_lists\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n p = charger.pose.position.x_y_z\n glTranslatef(*p)\n glRotatef(charger.pose.rotation.angle_z.degrees, 0, 0, 1)\n glTranslatef(charger_bed_size_mm[0]/2,\n 0,\n charger_bed_size_mm[2]/2)\n glRotatef(180, 0, 0, 1) # charger \"front\" is opposite robot \"front\"\n if comparable:\n self.make_cube(charger_bed_size_mm, highlight=highlight)\n else:\n self.make_cube(charger_bed_size_mm, body=False, \\\n highlight=False, color=color_white)\n glTranslatef(\n (charger_back_size_mm[0]-charger_bed_size_mm[0])/2,\n 0,\n charger_back_size_mm[2]/2)\n if comparable:\n self.make_cube(charger_back_size_mm, highlight=highlight)\n else:\n self.make_cube(charger_back_size_mm, body=False, \\\n highlight=True, color=color_white)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_custom_marker(self,marker):\n self.make_aruco_marker(marker)\n\n def make_aruco_marker(self,marker):\n global gl_lists\n marker_number = marker.marker_number\n s = light_cube_size_mm\n pos = (marker.x, marker.y, marker.z)\n color = (color_red, color_green, color_blue)[marker_number%3]\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(*pos)\n glRotatef(marker.theta*180/pi+180, 0., 0., 1.)\n highlight = marker.is_visible\n marker_thickness = 5 # must be thicker than wall\n self.make_cube((marker_thickness,s,s), color=color, highlight=highlight)\n glRotatef(-90, 0., 0., 1.)\n glRotatef(90, 1., 0., 0.)\n length = len(ascii(marker_number)) + 0.5\n glTranslatef(-s/4*length, -s/4, marker_thickness)\n glScalef(0.25, 0.2, 0.25)\n glutStrokeString(GLUT_STROKE_MONO_ROMAN, c_char_p(bytes(ascii(marker_number),'utf8')))\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_foreign_cube(self,cube_obj):\n global gl_lists\n cube_number = cube_obj.id\n pos = (cube_obj.x, cube_obj.y, cube_obj.z)\n color = color_white\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(*pos)\n # Transpose the matrix for sending to OpenCV\n s = light_cube_size_mm\n self.make_cube((s,s,s), color=color)\n glRotatef(-90, 0., 0., 1.)\n glTranslatef(-s/4, -s/4, s/2+0.5)\n glScalef(0.25, 0.2, 0.25)\n glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN, ord(ascii(cube_number)))\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_eye(self,size=(1,1,1), highlight=False, color=None, body=True, edges=True):\n glEnableClientState(GL_VERTEX_ARRAY)\n if color is None:\n glEnableClientState(GL_COLOR_ARRAY)\n if highlight:\n glColorPointer(3, GL_FLOAT, 0, cube_colors_1.tobytes())\n else:\n glColorPointer(3, GL_FLOAT, 0, cube_colors_0.tobytes())\n else:\n if not highlight:\n s = 0.5 # scale down the brightness if necessary\n color = (color[0]*s, color[1]*s, color[2]*s)\n glColor4f(*color,1)\n verts = camera_vertices* 1; # copy the array\n for i in range(0,24,3):\n verts[i ] *= size[0]\n verts[i+1] *= size[1]\n verts[i+2] *= size[2]\n if body:\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n glVertexPointer(3, GL_FLOAT, 0, verts.tobytes())\n glDrawElements(GL_QUADS, 24, GL_UNSIGNED_BYTE, cube_cIndices.tobytes())\n if edges:\n # begin wireframe\n for i in range(0,24): verts[i] *= 1.02\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n glVertexPointer(3, GL_FLOAT, 0, verts.tobytes())\n glDisableClientState(GL_COLOR_ARRAY)\n if body:\n if highlight:\n glColor4f(*color_white,1)\n else:\n glColor4f(*color_black,1)\n else:\n if highlight:\n glColor4f(*color,1)\n else:\n s = 0.7 # scale down the brightness if necessary\n glColor4f(color[0]*s, color[1]*s, color[2]*s, 1)\n glDrawElements(GL_QUADS, 24, GL_UNSIGNED_BYTE, cube_cIndices.tobytes())\n # end wireframe\n glDisableClientState(GL_COLOR_ARRAY)\n glDisableClientState(GL_VERTEX_ARRAY)\n\n def make_camera(self,cameraobj):\n global gl_lists, cap, aruco_dict, parameters, F\n camera_number = cameraobj.id\n pos = (cameraobj.x, cameraobj.y, cameraobj.z)\n color = (color_orange, color_red, color_green, color_blue)[camera_number%4]\n valid_pose = (cameraobj.x, cameraobj.y, cameraobj.z)\n angle = cameraobj.theta\n phi = cameraobj.phi\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslatef(*pos)\n # Transpose the matrix for sending to OpenCV\n\n t = geometry.quat2rot(cos(phi/2),0,0,sin(phi/2)).transpose()\n rotmat = array.array('f',t.flatten()).tobytes()\n glMultMatrixf(rotmat)\n\n t = geometry.quat2rot( cos(-angle/2 + pi/4) ,0 ,sin(-angle/2 + pi/4) ,0 ).transpose()\n rotmat = array.array('f',t.flatten()).tobytes()\n glMultMatrixf(rotmat)\n\n s = light_cube_size_mm\n self.make_eye((s,s,s), color=color)\n\n glRotatef(-90, 0., 0., 1.)\n glTranslatef(-s/4, -s/4, s/2+0.5)\n glScalef(0.25, 0.2, 0.25)\n glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN, ord(ascii(camera_number%4)))\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_foreign_robot(self,obj):\n global gl_lists\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n\n # Draw the body\n p = (obj.x, obj.y, obj.z)\n color = (color_orange, color_red, color_green, color_blue)[obj.camera_id%4]\n glTranslatef(*p)\n glTranslatef(*robot_body_offset_mm)\n glRotatef(obj.theta*180/pi, 0, 0, 1)\n self.make_cube(robot_body_size_mm, color=color_white)\n\n # Draw the head\n glPushMatrix()\n glTranslatef(*robot_head_offset_mm)\n glRotatef(-self.robot.head_angle.degrees, 0, 1, 0)\n self.make_cube(robot_head_size_mm, color=color_white)\n glTranslatef(*( 0, 0, 36))\n glScalef(0.25, 0.2, 0.25)\n glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN, ord(ascii(obj.cozmo_id%9)))\n glPopMatrix()\n\n # Draw the lift\n glTranslatef(-robot_body_offset_mm[0], -robot_body_offset_mm[1], -robot_body_offset_mm[2])\n glPushMatrix()\n self.robot.kine.get_pose()\n lift_tran = self.robot.kine.joint_to_base('lift_attach')\n lift_pt = geometry.point(0, 0, 0)\n lift_point = self.tran_to_tuple(lift_tran.dot(lift_pt))\n glTranslatef(*lift_point)\n self.make_cube(lift_size_mm, color=color)\n glPopMatrix()\n\n # Draw the lift arms\n glPushMatrix()\n lift_pt = geometry.point(0, 0, lift_arm_spacing_mm / 2)\n lift_point = self.tran_to_tuple(lift_tran.dot(lift_pt))\n\n shoulder_tran = self.robot.kine.joint_to_base('shoulder')\n shoulder_pt = geometry.point(0, 0, lift_arm_spacing_mm / 2)\n shoulder_point = self.tran_to_tuple(shoulder_tran.dot(shoulder_pt));\n\n arm_point = ((shoulder_point[0] + lift_point[0]) / 2,\n (shoulder_point[1] + lift_point[1]) / 2,\n (shoulder_point[2] + lift_point[2]) / 2)\n\n arm_angle = atan2(lift_point[2] - shoulder_point[2],\n lift_point[0] - shoulder_point[0])\n\n glTranslatef(*arm_point)\n glRotatef(-(180 * arm_angle / pi), 0, 1, 0)\n self.make_cube((lift_arm_len_mm, lift_arm_diam_mm, lift_arm_diam_mm), color=color_white)\n glTranslatef(0, lift_arm_spacing_mm, 0)\n self.make_cube((lift_arm_len_mm, lift_arm_diam_mm, lift_arm_diam_mm), color=color_white)\n glPopMatrix()\n\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n\n @staticmethod\n def tran_to_tuple(tran):\n return (tran[0][0], tran[1][0], tran[2][0])\n\n def make_cozmo_robot(self):\n global gl_lists\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n\n # Draw the body\n cur_pose = self.robot.world.particle_filter.pose\n p = (cur_pose[0], cur_pose[1], self.robot.pose.position.z)\n glTranslatef(*p)\n glTranslatef(*robot_body_offset_mm)\n glRotatef(cur_pose[2]*180/pi, 0, 0, 1)\n self.make_cube(robot_body_size_mm, highlight=self.robot.is_on_charger)\n\n # Draw the head\n glPushMatrix()\n glTranslatef(*robot_head_offset_mm)\n glRotatef(-self.robot.head_angle.degrees, 0, 1, 0)\n self.make_cube(robot_head_size_mm, highlight=self.robot.is_on_charger)\n glPopMatrix()\n\n # Draw the lift\n glTranslatef(-robot_body_offset_mm[0], -robot_body_offset_mm[1], -robot_body_offset_mm[2])\n glPushMatrix()\n self.robot.kine.get_pose()\n lift_tran = self.robot.kine.joint_to_base('lift_attach')\n lift_pt = geometry.point(0, 0, 0)\n lift_point = self.tran_to_tuple(lift_tran.dot(lift_pt))\n glTranslatef(*lift_point)\n self.make_cube(lift_size_mm, highlight=self.robot.is_on_charger)\n glPopMatrix()\n\n # Draw the lift arms\n glPushMatrix()\n lift_pt = geometry.point(0, 0, lift_arm_spacing_mm / 2)\n lift_point = self.tran_to_tuple(lift_tran.dot(lift_pt))\n\n shoulder_tran = self.robot.kine.joint_to_base('shoulder')\n shoulder_pt = geometry.point(0, 0, lift_arm_spacing_mm / 2)\n shoulder_point = self.tran_to_tuple(shoulder_tran.dot(shoulder_pt));\n\n arm_point = ((shoulder_point[0] + lift_point[0]) / 2,\n (shoulder_point[1] + lift_point[1]) / 2,\n (shoulder_point[2] + lift_point[2]) / 2)\n\n arm_angle = atan2(lift_point[2] - shoulder_point[2],\n lift_point[0] - shoulder_point[0])\n\n glTranslatef(*arm_point)\n glRotatef(-(180 * arm_angle / pi), 0, 1, 0)\n self.make_cube((lift_arm_len_mm, lift_arm_diam_mm, lift_arm_diam_mm),\n highlight=self.robot.is_on_charger)\n glTranslatef(0, lift_arm_spacing_mm, 0)\n self.make_cube((lift_arm_len_mm, lift_arm_diam_mm, lift_arm_diam_mm),\n highlight=self.robot.is_on_charger)\n glPopMatrix()\n\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_axes(self):\n global gl_lists\n if not self.show_axes: return None\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n len = axis_length\n w = axis_width\n glTranslatef(len/2., 0., 0.)\n self.make_cube((len,w,w), highlight=True, color=color_red, edges=False)\n glPopMatrix()\n glPushMatrix()\n glTranslatef(0., len/2., 0.)\n self.make_cube((w,len,w), highlight=True, color=color_green, edges=False)\n glPopMatrix()\n glPushMatrix()\n glTranslatef(0., 0., len/2.)\n self.make_cube((w,w,len), highlight=True, color=color_blue, edges=False)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_gazepoint(self):\n global gl_lists\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n glPushMatrix()\n glTranslate(fixation_point[0], fixation_point[1], fixation_point[2])\n s = 3.\n self.make_cube((s,s,s), highlight=True, color=(1.0, 0.9, 0.1), edges=False)\n glPopMatrix()\n glEndList()\n gl_lists.append(c)\n\n def make_objects(self):\n if self.robot.use_shared_map:\n items = tuple(self.robot.world.world_map.shared_objects.items())\n else:\n items = tuple(self.robot.world.world_map.objects.items())\n for (key,obj) in items:\n if isinstance(obj, worldmap.LightCubeObj):\n self.make_light_cube(obj)\n elif isinstance(obj, worldmap.CustomCubeObj):\n self.make_custom_cube(key,obj)\n elif isinstance(obj, worldmap.WallObj):\n self.make_wall(obj)\n elif isinstance(obj, worldmap.DoorwayObj):\n pass # doorways must come last, due to transparency\n elif isinstance(obj, worldmap.ChipObj):\n self.make_chip(obj)\n elif isinstance(obj, worldmap.FaceObj):\n self.make_face(obj)\n elif isinstance(obj, worldmap.CameraObj):\n self.make_camera(obj)\n elif isinstance(obj, worldmap.RobotForeignObj):\n self.make_foreign_robot(obj)\n elif isinstance(obj, worldmap.LightCubeForeignObj):\n self.make_foreign_cube(obj)\n elif isinstance(obj, worldmap.CustomMarkerObj):\n self.make_custom_marker(obj)\n elif isinstance(obj, worldmap.ArucoMarkerObj):\n self.make_aruco_marker(obj)\n # Make the doorways last, so transparency works correctly\n for (key,obj) in items:\n if isinstance(obj, worldmap.DoorwayObj):\n self.make_doorway(obj)\n\n def make_memory(self):\n global gl_lists\n quadtree = self.robot.world.nav_memory_map\n if quadtree and self.show_memory_map:\n c = glGenLists(1)\n glNewList(c, GL_COMPILE)\n self.memory_tree_crawl(quadtree.root_node, 0)\n glEndList()\n gl_lists.append(c)\n\n def memory_tree_crawl(self, node, depth):\n if node.content == NodeContentTypes.ClearOfObstacle:\n obj_color = color_green\n elif node.content == NodeContentTypes.ClearOfCliff:\n obj_color = color_light_green\n elif node.content == NodeContentTypes.ObstacleCube:\n obj_color = color_orange\n elif node.content == NodeContentTypes.ObstacleCharger:\n obj_color = color_blue\n elif node.content == NodeContentTypes.VisionBorder:\n obj_color = color_cyan\n elif node.content == NodeContentTypes.Cliff:\n obj_color = color_red\n else:\n obj_color = color_light_gray\n\n glPushMatrix()\n p = (node.center.x, node.center.y, depth)\n glTranslatef(*p)\n obj_size = (node.size, node.size, 1)\n self.make_cube(obj_size, highlight=False, color=obj_color)\n glPopMatrix()\n if node.children is not None:\n for child in node.children:\n self.memory_tree_crawl(child,depth+1)\n\n def make_shapes(self):\n global gl_lists\n gl_lists = []\n self.make_axes()\n self.make_gazepoint()\n self.make_charger()\n self.make_cozmo_robot()\n self.make_memory()\n self.make_floor()\n self.make_objects() # walls, light cubes, custom cubes, and chips\n\n def del_shapes(self):\n global gl_lists\n for id in gl_lists:\n glDeleteLists(id,1)\n\n # ================ Window Setup ================\n\n def window_creator(self):\n global WINDOW\n WINDOW = opengl.create_window(bytes(self.windowName,'utf-8'), (self.width,self.height))\n glutDisplayFunc(self.display)\n glutReshapeFunc(self.reshape)\n glutKeyboardFunc(self.keyPressed)\n glutSpecialFunc(self.specialKeyPressed)\n glViewport(0,0,self.width,self.height)\n glClearColor(*self.bgcolor, 0)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n # Enable transparency for doorways: see\n # https://www.opengl.org/archives/resources/faq/technical/transparency.htm\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n\n def start(self): # Displays in background\n self.robot.world.request_nav_memory_map(1)\n if not WINDOW:\n opengl.init()\n opengl.CREATION_QUEUE.append(self.window_creator)\n if pf.system() == 'Darwin':\n print(\"Type 'option' + 'h' in the world map window for help.\")\n else:\n print(\"Type 'h' in the world map window for help.\")\n\n def display(self):\n global DISPLAY_ENABLED, EXCEPTION_COUNTER\n if not DISPLAY_ENABLED: return\n global gl_lists\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n field_of_view = 50 # degrees\n near_clip = 5\n far_clip = 600 # 20.0\n gluPerspective(field_of_view, self.aspect, near_clip, far_clip)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n wscale = 0.1\n rotmat = array.array('f',[\n wscale, 0, 0, 0,\n 0, wscale, 0, 0,\n 0, 0, wscale, 0,\n 0, 0, 0, 1]).tobytes()\n glMultMatrixf(rotmat)\n # Model transformation switches to robot coordinates: z is up, x forward, y left.\n # View transformation moves the camera, keeping it pointed at the fixation point.\n # Keyboard commands: translations move the fixation point, rotations orbit the camera.\n pitch = camera_rotation[1]\n yaw = camera_rotation[2]\n global camera_loc\n camera_loc = [\n camera_distance * cos(radians(yaw)) + fixation_point[0],\n camera_distance * sin(radians(yaw)) + fixation_point[1],\n camera_distance * sin(radians(pitch)) + fixation_point[2]\n ]\n gluLookAt(*camera_loc, *fixation_point, 0.0, 0.0, 1.0)\n try:\n self.make_shapes()\n for id in gl_lists:\n glCallList(id)\n glutSwapBuffers()\n self.del_shapes()\n except Exception as e:\n print('Worldmap viewer exception:',e)\n EXCEPTION_COUNTER += 1\n if EXCEPTION_COUNTER >= 2:\n print('\\n\\nworldmap_viewer: Too many errors. Stopping redisplay.')\n DISPLAY_ENABLED = False\n else:\n raise\n\n\n def keyPressed(self, key, x, y):\n global DISPLAY_ENABLED, EXCEPTION_COUNTER\n if ord(key) == 27:\n print(\"Use 'exit' to quit.\")\n #return\n global fixation_point, camera_rotation, camera_distance, print_camera\n heading = atan2(camera_loc[1]-fixation_point[1], camera_loc[0]-fixation_point[0])*180/pi\n translate_step = 5\n if key == b'a':\n fixation_point[0] -= translate_step * cos(radians(heading+90))\n fixation_point[1] -= translate_step * sin(radians(heading+90))\n elif key == b'd':\n fixation_point[0] += translate_step * cos(radians(heading+90))\n fixation_point[1] += translate_step * sin(radians(heading+90))\n elif key == b'w':\n fixation_point[0] -= translate_step * cos(radians(heading))\n fixation_point[1] -= translate_step * sin(radians(heading))\n elif key == b's':\n fixation_point[0] += translate_step * cos(radians(heading))\n fixation_point[1] += translate_step * sin(radians(heading))\n elif key == b'>':\n camera_distance += translate_step\n elif key == b'<':\n camera_distance -= translate_step\n elif key == b'j':\n camera_rotation[2] -= 2.5\n elif key == b'l':\n camera_rotation[2] += 2.5\n elif key == b'k':\n camera_rotation[1] -= 2.5\n elif key == b'i':\n camera_rotation[1] += 2.5\n elif key == b'x':\n self.show_axes = not self.show_axes\n elif key == b'm':\n self.show_memory_map = not self.show_memory_map\n elif key == b'h':\n if pf.system() == 'Darwin':\n print(help_text_mac)\n else:\n print(help_text)\n elif key == b'v':\n print_camera = not print_camera\n if not print_camera:\n print(\"Halted viewing parameters display. Press 'v' again to resume.\")\n elif key == b'z':\n fixation_point = initial_fixation_point.copy()\n camera_rotation = initial_camera_rotation.copy()\n camera_distance = initial_camera_distance\n elif key == b'#':\n DISPLAY_ENABLED = not DISPLAY_ENABLED\n if DISPLAY_ENABLED:\n EXCEPTION_COUNTER = 0\n print('Worldmap viewer redisplay %sabled.' %\n 'en' if DISPLAY_ENABLED else 'dis')\n if print_camera:\n pitch = camera_rotation[1]\n yaw = camera_rotation[2]\n print('pitch=%5.1f yaw=%5.1f dist=%f' % (pitch,yaw,camera_distance),\n ' gazepointt[%5.1f %5.1f %5.1f]' %\n (fixation_point[0], fixation_point[1], fixation_point[2]),\n ' camera[%5.1f %5.1f %5.1f]' % (camera_loc[0], camera_loc[1], camera_loc[2]))\n self.display()\n\n def specialKeyPressed(self, key, x, y):\n global fixation_point, camera_rotation, camera_distance\n heading = -camera_rotation[1]\n if key == GLUT_KEY_LEFT:\n camera_rotation[2] = (camera_rotation[2] - 2.5) % 360\n elif key == GLUT_KEY_RIGHT:\n camera_rotation[2] = (camera_rotation[2] + 2.5) % 360\n elif key == GLUT_KEY_UP:\n camera_rotation[1] = (camera_rotation[1] + 90 + 2.5) % 180 - 90\n elif key == GLUT_KEY_DOWN:\n camera_rotation[1] = (camera_rotation[1] + 90 - 2.5) % 180 - 90\n elif key == GLUT_KEY_PAGE_UP:\n fixation_point[2] += 1\n elif key == GLUT_KEY_PAGE_DOWN:\n fixation_point[2] -= 1\n self.display()\n\n def reshape(self, width, height):\n self.width = width\n self.height = height\n self.aspect = self.width/self.height\n glViewport(0, 0, width, height)\n"
},
{
"alpha_fraction": 0.49547919631004333,
"alphanum_fraction": 0.5406871438026428,
"avg_line_length": 38.5,
"blob_id": "c9d363385814df45cb73426f4844f6a3c6f242e4",
"content_id": "9ee3fa6a070f45ff3fd66396d4018cf88ccfe526",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 14,
"path": "/cozmo_fsm/examples/PF_Cube.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from cozmo_fsm import *\nfrom cozmo.util import degrees, Pose\n\nclass PF_Cube(StateMachineProgram):\n def __init__(self):\n landmarks = {\n cube1 : Pose( 55, 160, 0, angle_z=degrees(90)),\n cube2 : Pose(160, 55, 0, angle_z=degrees( 0)),\n cube3 : Pose(160, -55, 0, angle_z=degrees( 0))\n }\n pf = ParticleFilter(robot,\n landmarks = landmarks,\n sensor_model = CubeSensorModel(robot))\n super().__init__(particle_filter=pf, particle_viewer=True)\n"
},
{
"alpha_fraction": 0.6311659216880798,
"alphanum_fraction": 0.6474215388298035,
"avg_line_length": 27.317461013793945,
"blob_id": "6e89c71d42ae60a0d96038869bd2e736d9d37b01",
"content_id": "7aa5d62f4e14a3400630dba41dab917db687d209",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1784,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 63,
"path": "/cozmo_fsm/sim_robot.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCreate a dummy robot and world so we can use cozmo-tools\nclasses without having to connect to a real robot.\n\"\"\"\n\nimport asyncio\n\ntry:\n import cv2\n ARUCO_DICT_4x4_100 = cv2.aruco.DICT_4X4_100\nexcept:\n ARUCO_DICT_4x4_100 = None\n\nimport cozmo\nfrom cozmo.util import Distance, Angle, Pose\n\nfrom .cozmo_kin import CozmoKinematics\nfrom .evbase import EventRouter\nfrom .aruco import Aruco\nfrom .particle import SLAMParticleFilter\nfrom .rrt import RRT, RRTNode\nfrom .worldmap import WorldMap\n\nclass SimWorld():\n def __init__(self):\n self.path_viewer = None\n self.particle_viewer = None\n self.worldmap_viewer = None\n\nclass SimServer():\n def __init__(self):\n self.started = False\n\nclass SimRobot():\n def __init__(self, run_in_cloud=False):\n robot = self\n\n robot.loop = asyncio.get_event_loop()\n\n if not run_in_cloud:\n robot.erouter = EventRouter()\n robot.erouter.robot = robot\n robot.erouter.start()\n\n robot.head_angle = Angle(radians=0)\n robot.shoulder_angle = Angle(radians=0)\n robot.lift_height = Distance(distance_mm=0)\n robot.pose = Pose(0,0,0,angle_z=Angle(degrees=0))\n robot.camera = None\n robot.carrying = None\n\n robot.world = SimWorld()\n robot.world.aruco = Aruco(robot, ARUCO_DICT_4x4_100)\n robot.world.light_cubes = dict()\n robot.world._faces = dict()\n robot.world.charger = None\n robot.world.server = SimServer()\n robot.world.path_viewer = None\n\n robot.world.particle_filter = SLAMParticleFilter(robot)\n robot.kine = CozmoKinematics(robot) # depends on particle filter\n robot.world.rrt = RRT(robot) # depends on kine\n robot.world.world_map = WorldMap(robot)\n"
},
{
"alpha_fraction": 0.6094240546226501,
"alphanum_fraction": 0.6209424138069153,
"avg_line_length": 35.730770111083984,
"blob_id": "d1d0e62ed48e7a74367218b25fc19aa0833041a7",
"content_id": "1f38953e61ee2a1bb18b03a199cd05afb04b3379",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1910,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 52,
"path": "/cozmo_fsm/examples/TapSpeak.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n The TapSpeak demo shows Cozmo responding to cube tap events. A\n TapTrans transition is used to set up a handler for taps. The\n example also illustrates how the TapTrans transition does wildcard\n matching if not given an argument. By passing a cube as an argument\n to the TapTrans constructor can use it to look for taps on a\n specific cube.\n\n Behavior: Cozmo starts out by saying 'Tap a cube'. Then, every time\n a cube is tapped, Cozmo says the cube name and goes back to\n listening for more tap events.\n\"\"\"\n\nfrom cozmo_fsm import *\n\nfrom cozmo_fsm import *\n\nclass SayCube(Say):\n \"\"\"Say the name of a cube.\"\"\"\n def start(self, event=None, \\\n cube_names = ['paperclip', 'anglepoise lamp', 'deli slicer']):\n cube_number = next(k for k,v in self.robot.world.light_cubes.items() \\\n if v == event.source)\n self.text = cube_names[cube_number-1]\n super().start(event)\n\nclass TapSpeak(StateMachineProgram):\n def setup(self):\n \"\"\"\n intro: Say('Tap a cube.') =C=> wait\n \n wait: StateNode() =Tap()=> speak\n \n speak: SayCube() =C=> wait\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:16:53 2020:\n \n intro = Say('Tap a cube.') .set_name(\"intro\") .set_parent(self)\n wait = StateNode() .set_name(\"wait\") .set_parent(self)\n speak = SayCube() .set_name(\"speak\") .set_parent(self)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(intro) .add_destinations(wait)\n \n taptrans1 = TapTrans() .set_name(\"taptrans1\")\n taptrans1 .add_sources(wait) .add_destinations(speak)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(speak) .add_destinations(wait)\n \n return self\n"
},
{
"alpha_fraction": 0.5428358912467957,
"alphanum_fraction": 0.5533609390258789,
"avg_line_length": 44.628753662109375,
"blob_id": "4dde540ad6c995bd97785f3302578e560e3ae5eb",
"content_id": "166535cdd9e02079881bbc4eb92a6c00e680ccc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 57767,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 1266,
"path": "/cozmo_fsm/particle.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nParticle filter localization.\n\"\"\"\n\nimport math, array, random\nfrom math import pi, sqrt, sin, cos, atan2, exp\nimport numpy as np\n\ntry: import cv2\nexcept: pass\n\nimport cozmo\nfrom cozmo.util import Pose\n\nfrom .geometry import wrap_angle, wrap_selected_angles, tprint, rotation_matrix_to_euler_angles\nfrom .aruco import ArucoMarker\nfrom .cozmo_kin import center_of_rotation_offset\nfrom .worldmap import WorldObject, WallObj, wall_marker_dict, ArucoMarkerObj\nfrom .perched import Cam\n\nclass Particle():\n def __init__(self, index=-1):\n self.x = 0\n self.y = 0\n self.theta = 0\n self.log_weight = 0\n self.weight = 1\n self.index = index\n\n def __repr__(self):\n return '<Particle %d: (%.2f, %.2f) %.1f deg. log_wt=%f>' % \\\n (self.index, self.x, self.y, self.theta*80/pi, self.log_weight)\n\n#================ Particle Initializers ================\n\nclass ParticleInitializer():\n def __init__(self):\n self.pf = None # must be filled in after creation\n\nclass RandomWithinRadius(ParticleInitializer):\n \"\"\" Normally distribute particles within a radius, with random heading. \"\"\"\n def __init__(self,radius=200):\n super().__init__()\n self.radius = radius\n\n def initialize(self, robot):\n for p in self.pf.particles:\n qangle = random.random()*2*pi\n r = random.gauss(0, self.radius/2) + self.radius/1.5\n p.x = r * cos(qangle)\n p.y = r * sin(qangle)\n p.theta = random.random()*2*pi\n p.log_weight = 0.0\n p.weight = 1.0\n self.pf.pose = (0, 0, 0)\n self.pf.motion_model.old_pose = robot.pose\n\nclass RobotPosition(ParticleInitializer):\n \"\"\" Initialize all particles to the robot's current position or a constant;\n the motion model will jitter them. \"\"\"\n def __init__(self, x=None, y=None, theta=None):\n super().__init__()\n self.x = x\n self.y = y\n self.theta = theta\n\n def initialize(self, robot):\n if self.x is None:\n x = robot.pose.position.x\n y = robot.pose.position.y\n theta = robot.pose.rotation.angle_z.radians\n else:\n x = self.x\n y = self.y\n theta = self.theta\n for p in self.pf.particles:\n p.x = x\n p.y = y\n p.theta = theta\n p.log_weight = 0.0\n p.weight = 1.0\n self.pf.pose = (x, y, theta)\n self.pf.motion_model.old_pose = robot.pose\n\n\n#================ Motion Model ================\n\nclass MotionModel():\n def __init__(self, robot):\n self.robot = robot\n\nclass DefaultMotionModel(MotionModel):\n def __init__(self, robot, sigma_trans=0.1, sigma_rot=0.01):\n super().__init__(robot)\n self.sigma_trans = sigma_trans\n self.sigma_rot = sigma_rot\n self.old_pose = robot.pose\n\n def move(self, particles):\n old_pose = self.old_pose\n new_pose = self.robot.pose\n self.old_pose = new_pose\n if not new_pose.is_comparable(old_pose):\n return # can't path integrate if the robot switched reference frames\n old_xyz = old_pose.position.x_y_z\n new_xyz = new_pose.position.x_y_z\n old_hdg = old_pose.rotation.angle_z.radians\n new_hdg = new_pose.rotation.angle_z.radians\n turn_angle = wrap_angle(new_hdg - old_hdg)\n cor = center_of_rotation_offset\n old_rx = old_xyz[0] + cor * cos(old_hdg)\n old_ry = old_xyz[1] + cor * sin(old_hdg)\n new_rx = new_xyz[0] + cor * cos(new_hdg)\n new_ry = new_xyz[1] + cor * sin(new_hdg)\n dist = sqrt((new_rx-old_rx)**2 + (new_ry-old_ry)**2)\n # Did we drive forward, or was it backward?\n fwd_xy = (old_xyz[0] + dist * cos(old_hdg+turn_angle/2),\n old_xyz[1] + dist * sin(old_hdg+turn_angle/2))\n rev_xy = (old_xyz[0] - dist * cos(old_hdg+turn_angle/2),\n old_xyz[1] - dist * sin(old_hdg+turn_angle/2))\n fwd_dx = fwd_xy[0] - new_xyz[0]\n fwd_dy = fwd_xy[1] - new_xyz[1]\n rev_dx = rev_xy[0] - new_xyz[0]\n rev_dy = rev_xy[1] - new_xyz[1]\n if (fwd_dx*fwd_dx + fwd_dy*fwd_dy) > (rev_dx*rev_dx + rev_dy*rev_dy):\n dist = - dist # we drove backward\n rot_var = 0 if abs(turn_angle) < 0.001 else self.sigma_rot\n for p in particles:\n pdist = dist * (1 + random.gauss(0, self.sigma_trans))\n pturn = random.gauss(turn_angle, rot_var)\n # Correct for the center of rotation being behind the base frame\n # (xc,yc) is the center of rotation\n xc = p.x + cor * cos(p.theta)\n yc = p.y + cor * sin(p.theta)\n # Make half the turn, translate, then complete the turn\n p.theta = p.theta + pturn/2\n p.x = xc + cos(p.theta) * pdist\n p.y = yc + sin(p.theta) * pdist\n p.theta = wrap_angle(p.theta + pturn/2)\n # Move from center of rotation back to (rotated) base frame\n p.x = p.x - cor * cos(p.theta)\n p.y = p.y - cor * sin(p.theta)\n\n#================ Sensor Model ================\n\nclass SensorModel():\n def __init__(self, robot, landmarks=None):\n self.robot = robot\n if landmarks is None:\n landmarks = dict()\n self.set_landmarks(landmarks)\n self.last_evaluate_pose = robot.pose\n\n def set_landmarks(self,landmarks):\n self.landmarks = landmarks\n\n def compute_robot_motion(self):\n # How much did we move since last evaluation?\n if self.robot.pose.is_comparable(self.last_evaluate_pose):\n dx = self.robot.pose.position.x - self.last_evaluate_pose.position.x\n dy = self.robot.pose.position.y - self.last_evaluate_pose.position.y\n dist = sqrt(dx*dx + dy*dy)\n turn_angle = wrap_angle(self.robot.pose.rotation.angle_z.radians -\n self.last_evaluate_pose.rotation.angle_z.radians)\n else:\n dist = 0\n turn_angle = 0\n print('** Robot origin_id changed from', self.last_evaluate_pose.origin_id,\n 'to', self.robot.pose.origin_id)\n self.last_evaluate_pose = self.robot.pose\n return (dist,turn_angle)\n\nclass ArucoDistanceSensorModel(SensorModel):\n \"\"\"Sensor model using only landmark distances.\"\"\"\n def __init__(self, robot, landmarks=None, distance_variance=100):\n if landmarks is None:\n landmarks = dict()\n super().__init__(robot,landmarks)\n self.distance_variance = distance_variance\n\n def evaluate(self,particles,force=False):\n # Returns true if particles were evaluated.\n # Called with force=True from particle_viewer to force evaluation.\n\n # Only evaluate if the robot moved enough for evaluation to be worthwhile.\n (dist,turn_angle) = self.compute_robot_motion()\n if (not force) and (dist < 5) and abs(turn_angle) < math.radians(5):\n return False\n self.last_evaluate_pose = self.robot.pose\n # Cache seen_marker_objects because vision is in another thread.\n seen_marker_objects = self.robot.world.aruco.seen_marker_objects\n # Process each seen marker:\n for (id, marker) in seen_marker_objects.items():\n if marker.id_string in self.landmarks:\n sensor_dist = marker.camera_distance\n landmark_spec = self.landmarks[marker.id_string]\n lm_x = landmark_spec.position.x\n lm_y = landmark_spec.position.y\n for p in particles:\n dx = lm_x - p.x\n dy = lm_y - p.y\n predicted_dist = sqrt(dx*dx + dy*dy)\n error = sensor_dist - predicted_dist\n p.log_weight -= (error*error)/self.distance_variance\n return True\n\nclass ArucoBearingSensorModel(SensorModel):\n \"\"\"Sensor model using only landmark bearings.\"\"\"\n def __init__(self, robot, landmarks=None, bearing_variance=0.1):\n if landmarks is None:\n landmarks = dict()\n super().__init__(robot,landmarks)\n self.bearing_variance = bearing_variance\n\n def evaluate(self,particles,force=False):\n # Returns true if particles were evaluated.\n # Called with force=True from particle_viewer to force evaluation.\n\n # Only evaluate if the robot moved enough for evaluation to be worthwhile.\n (dist,turn_angle) = self.compute_robot_motion()\n if not force and dist < 5 and abs(turn_angle) < math.radians(5):\n return False\n self.last_evaluate_pose = self.robot.pose\n # Cache seen_marker_objects because vision is in another thread.\n seen_marker_objects = self.robot.world.aruco.seen_marker_objects\n # Process each seen marker:\n for id in seen_marker_objects:\n marker_id = 'Aruco-' + str(id)\n if marker_id in self.landmarks:\n sensor_coords = seen_marker_objects[id].camera_coords\n sensor_bearing = atan2(sensor_coords[0], sensor_coords[2])\n landmark_spec = self.landmarks[marker_id]\n lm_x = landmark_spec.position.x\n lm_y = landmark_spec.position.y\n for p in particles:\n dx = lm_x - p.x\n dy = lm_y - p.y\n predicted_bearing = wrap_angle(atan2(dy,dx) - p.theta)\n error = wrap_angle(sensor_bearing - predicted_bearing)\n p.log_weight -= (error * error) / self.bearing_variance\n return True\n\nclass ArucoCombinedSensorModel(SensorModel):\n \"\"\"Sensor model using combined distance and bearing information.\"\"\"\n def __init__(self, robot, landmarks=None, distance_variance=200):\n if landmarks is None:\n landmarks = dict()\n super().__init__(robot,landmarks)\n self.distance_variance = distance_variance\n\n def evaluate(self,particles,force=False):\n # Returns true if particles were evaluated.\n # Called with force=True from particle_viewer to force evaluation.\n\n # Don't evaluate if robot is still moving; ArUco info will be bad.\n if self.robot.is_moving:\n return False\n\n # Only evaluate if the robot moved enough for evaluation to be worthwhile.\n (dist,turn_angle) = self.compute_robot_motion()\n if not force and dist < 5 and abs(turn_angle) < math.radians(5):\n return False\n self.last_evaluate_pose = self.robot.pose\n # Cache seen_marker_objects because vision is in another thread.\n seen_marker_objects = self.robot.world.aruco.seen_marker_objects\n # Process each seen marker:\n for id in seen_marker_objects:\n marker_id = 'Aruco-' + str(id)\n if marker_id in self.landmarks:\n sensor_dist = seen_marker_objects[id].camera_distance\n sensor_coords = seen_marker_objects[id].camera_coords\n sensor_bearing = atan2(sensor_coords[0], sensor_coords[2])\n landmark_spec = self.landmarks[marker_id]\n lm_x = landmark_spec.position.x\n lm_y = landmark_spec.position.y\n for p in particles:\n # Use sensed bearing and distance to get particle's\n # estimate of landmark position on the world map.\n predicted_pos_x = p.x + sensor_dist * cos(p.theta + sensor_bearing)\n predicted_pos_y = p.y + sensor_dist * sin(p.theta + sensor_bearing)\n dx = lm_x - predicted_pos_x\n dy = lm_y - predicted_pos_y\n error_sq = dx*dx + dy*dy\n p.log_weight -= error_sq / self.distance_variance\n return True\n\nclass CubeOrientSensorModel(SensorModel):\n \"\"\"Sensor model using only orientation information.\"\"\"\n def __init__(self, robot, landmarks=None, distance_variance=200):\n if landmarks is None:\n landmarks = dict()\n super().__init__(robot,landmarks)\n self.distance_variance = distance_variance\n\n def evaluate(self,particles,force=False):\n # Returns true if particles were evaluated.\n # Called with force=True from particle_viewer to force evaluation.\n\n # Only evaluate if the robot moved enough for evaluation to be worthwhile.\n (dist,turn_angle) = self.compute_robot_motion()\n if not force and dist < 5 and abs(turn_angle) < math.radians(5):\n return False\n self.last_evaluate_pose = self.robot.pose\n seenCubes = [cube for cube in self.robot.world.light_cubes.values()\n if cube.is_visible]\n # Process each seen cube if it's a landmark:\n for cube in seenCubes:\n if cube in self.landmarks:\n sensor_dx = cube.pose.position.x - self.robot.pose.position.x\n sensor_dy = cube.pose.position.y - self.robot.pose.position.y\n sensor_dist = sqrt(sensor_dx*sensor_dx + sensor_dy*sensor_dy)\n angle = atan2(sensor_dy,sensor_dx)\n sensor_bearing = \\\n wrap_angle(angle - self.robot.pose.rotation.angle_z.radians)\n #sensor_orient = wrap_angle(robot.pose.rotation.angle_z.radians -\n # cube.pose.rotation.angle_z.radians +\n # sensor_bearing)\n # simplifies to...\n sensor_orient = wrap_angle(angle - cube.pose.rotation.angle_z.radians)\n\n landmark_spec = self.landmarks[cube]\n lm_x = landmark_spec.position.x\n lm_y = landmark_spec.position.y\n lm_orient = landmark_spec.rotation.angle_z.radians\n\n for p in particles:\n # ... Orientation error:\n #predicted_bearing = wrap_angle(atan2(lm_y-p.y, lm_x-p.x) - p.theta)\n #predicted_orient = wrap_angle(p.theta - lm_orient + predicted_bearing)\n # simplifies to...\n predicted_orient = wrap_angle(atan2(lm_y-p.y, lm_x-p.x) - lm_orient)\n error_sq = ((predicted_orient - sensor_orient)*sensor_dist)**2\n p.log_weight -= error_sq / self.distance_variance\n return True\n\nclass CubeSensorModel(SensorModel):\n \"\"\"Sensor model using combined distance, bearing, and orientation information.\"\"\"\n def __init__(self, robot, landmarksNone, distance_variance=200):\n if landmarks is None:\n landmarks = dict()\n super().__init__(robot,landmarks)\n self.distance_variance = distance_variance\n\n def evaluate(self,particles,force=False):\n # Returns true if particles were evaluated.\n # Called with force=True from particle_viewer to force evaluation.\n\n # Only evaluate if the robot moved enough for evaluation to be worthwhile.\n (dist,turn_angle) = self.compute_robot_motion()\n if not force and dist < 5 and abs(turn_angle) < math.radians(5):\n return False\n self.last_evaluate_pose = self.robot.pose\n seenCubes = [cube for cube in world.light_cubes.values() if cube.is_visible]\n # Process each seen cube if it's a landmark:\n for cube in seenCubes:\n cube_id = 'Cube-' + cube.cube_id\n if cube_id in self.landmarks:\n sensor_dx = cube.pose.position.x - robot.pose.position.x\n sensor_dy = cube.pose.position.y - robot.pose.position.y\n sensor_dist = sqrt(sensor_dx*sensor_dx + sensor_dy*sensor_dy)\n angle = atan2(sensor_dy,sensor_dx)\n sensor_bearing = wrap_angle(angle - robot.pose.rotation.angle_z.radians)\n #sensor_orient = wrap_angle(robot.pose.rotation.angle_z.radians -\n # cube.pose.rotation.angle_z.radians +\n # sensor_bearing)\n # simplifies to...\n sensor_orient = wrap_angle(angle - cube.pose.rotation.angle_z.radians)\n\n landmark_spec = self.landmarks[cube_id]\n lm_x = landmark_spec.position.x\n lm_y = landmark_spec.position.y\n lm_orient = landmark_spec.rotation.angle_z.radians\n\n for p in particles:\n # ... Bearing and distance errror:\n # Use sensed bearing and distance to get particle's\n # prediction of landmark position on the world map.\n predicted_pos_x = p.x + sensor_dist * cos(p.theta + sensor_bearing)\n predicted_pos_y = p.y + sensor_dist * sin(p.theta + sensor_bearing)\n dx = lm_x - predicted_pos_x\n dy = lm_y - predicted_pos_y\n error1_sq = dx*dx + dy*dy\n # ... Orientation error:\n #predicted_bearing = wrap_angle(atan2(lm_y-p.y, lm_x-p.x) - p.theta)\n #predicted_orient = wrap_angle(p.theta - lm_orient + predicted_bearing)\n # simplifies to...\n predicted_orient = wrap_angle(atan2(lm_y-p.y, lm_x-p.x) - lm_orient)\n error2_sq = (sensor_dist*wrap_angle(predicted_orient - sensor_orient))**2\n\n error_sq = error1_sq + error2_sq\n p.log_weight -= error_sq / self.distance_variance\n return True\n\n\n#================ Particle Filter ================\n\nclass ParticleFilter():\n # Particle filter state:\n LOCALIZED = 'localized' # Normal\n LOCALIZING = 'localizing' # Trying to use LMs to localize\n LOST = 'lost' # De-localized and no LMs in view\n\n def __init__(self, robot, num_particles=500,\n initializer = RandomWithinRadius(),\n motion_model = \"default\",\n sensor_model = \"default\",\n particle_factory = Particle,\n landmarks = None):\n if landmarks is None:\n landmarks = dict() # make a fresh dict each time\n self.robot = robot\n self.num_particles = num_particles\n self.initializer = initializer\n self.initializer.pf = self\n\n if motion_model == \"default\":\n motion_model = DefaultMotionModel(robot)\n self.motion_model = motion_model\n self.motion_model.pf = self\n\n if sensor_model == \"default\":\n sensor_model = ArucoCombinedSensorModel(robot)\n if sensor_model:\n sensor_model.set_landmarks(landmarks)\n self.sensor_model = sensor_model\n self.sensor_model.pf = self\n\n self.particle_factory = particle_factory\n self.particles = [particle_factory(i) for i in range(num_particles)]\n self.best_particle = self.particles[0]\n self.min_log_weight = -300 # prevent floating point underflow in exp()\n self.initializer.initialize(robot)\n self.exp_weights = np.empty(self.num_particles)\n self.cdf = np.empty(self.num_particles)\n self.variance = (np.array([[0,0],[0,0]]), 0.)\n self.new_indices = [0] * num_particles # np.empty(self.num_particles, dtype=np.int)\n self.new_x = [0.0] * num_particles # np.empty(self.num_particles)\n self.new_y = [0.0] * num_particles # np.empty(self.num_particles)\n self.new_theta = [0.0] * num_particles # np.empty(self.num_particles)\n self.pose = (0., 0., 0.)\n self.dist_jitter = 15 # mm\n self.angle_jitter = 10 / 180 * pi\n self.state = self.LOST\n\n def move(self):\n self.motion_model.move(self.particles)\n if self.sensor_model.evaluate(self.particles): # true if log_weights changed\n var = self.update_weights()\n if var > 0:\n self.resample()\n self.state = self.LOCALIZED\n self.variance_estimate()\n if self.robot.carrying:\n self.robot.world.world_map.update_carried_object(self.robot.carrying)\n\n def delocalize(self):\n self.state = self.LOST\n self.initializer.initialize(self.robot)\n\n def pose_estimate(self):\n cx = 0.0; cy = 0.0\n hsin = 0.0; hcos = 0.0\n weight_sum = 0.0\n best_particle = self.particles[0]\n for p in self.particles:\n p.weight = exp(p.log_weight)\n if p.weight > best_particle.weight:\n best_particle = p\n cx += p.weight * p.x\n cy += p.weight * p.y\n hsin += sin(p.theta) * p.weight\n hcos += cos(p.theta) * p.weight\n weight_sum += p.weight\n if weight_sum == 0:\n weight_sum = 1\n cx /= weight_sum\n cy /= weight_sum\n self.pose = (cx, cy, atan2(hsin,hcos))\n self.best_particle = best_particle\n return self.pose\n\n def variance_estimate(self):\n weight = var_xx = var_xy = var_yy = r_sin = r_cos = 0.0\n (mu_x, mu_y, mu_theta) = self.pose_estimate()\n for p in self.particles:\n dx = (p.x - mu_x)\n dy = (p.y - mu_y)\n var_xx += dx * dx * p.weight\n var_xy += dx * dy * p.weight\n var_yy += dy * dy * p.weight\n r_sin += sin(p.theta) * p.weight\n r_cos += cos(p.theta) * p.weight\n weight += p.weight\n xy_var = np.array([[var_xx, var_xy],\n [var_xy, var_yy]]) / weight\n Rsq = r_sin**2 + r_cos**2\n Rav = sqrt(Rsq) / weight\n theta_var = max(0, 1 - Rav)\n self.variance = (xy_var, theta_var)\n return self.variance\n\n def update_weights(self):\n # Clip the log_weight values and calculate the new weights.\n max_weight = max(p.log_weight for p in self.particles)\n if max_weight >= self.min_log_weight:\n wt_inc = 0.0\n else:\n wt_inc = - self.min_log_weight / 2.0\n print('wt_inc',wt_inc,'applied for max_weight',max_weight)\n exp_weights = self.exp_weights\n particles = self.particles\n for i in range(self.num_particles):\n p = particles[i]\n p.log_weight += wt_inc\n exp_weights[i] = p.weight = exp(p.log_weight)\n variance = np.var(exp_weights)\n return variance\n\n def resample(self):\n # Compute and normalize the cdf; make local pointers for faster access.\n #print('resampling...')\n exp_weights = self.exp_weights\n cdf = self.cdf\n cumsum = 0\n for i in range(self.num_particles):\n cumsum += exp_weights[i]\n cdf[i] = cumsum\n np.divide(cdf,cumsum,cdf)\n\n # Resampling loop: choose particles to spawn\n uincr = 1.0 / self.num_particles\n u = random.random() * uincr\n index = 0\n new_indices = self.new_indices\n for j in range(self.num_particles):\n while u > cdf[index]:\n index += 1\n new_indices[j] = index\n u += uincr\n\n self.install_new_particles()\n\n def install_new_particles(self):\n particles = self.particles\n new_indices = self.new_indices\n new_x = self.new_x\n new_y = self.new_y\n new_theta = self.new_theta\n for i in range(self.num_particles):\n p = particles[new_indices[i]]\n new_x[i] = p.x\n new_y[i] = p.y\n new_theta[i] = p.theta\n for i in range(self.num_particles):\n p = particles[i]\n p.x = new_x[i]\n p.y = new_y[i]\n p.theta = new_theta[i]\n p.log_weight = 0.0\n p.weight = 1.0\n\n def set_pose(self,x,y,theta):\n for p in self.particles:\n p.x = x\n p.y = y\n p.theta = theta\n p.log_weight = 0.0\n p.weight = 1.0\n self.variance_estimate()\n\n def look_for_new_landmarks(self): pass # SLAM only\n\n def clear_landmarks(self):\n print('clear_landmarks: Landmarks are fixed in this particle filter.')\n\n #================ \"show\" commands that can be used by simple_cli\n\n def show_landmarks(self):\n landmarks = self.sensor_model.landmarks\n print('The particle filter has %d landmark%s:' %\n (len(landmarks), '' if (len(landmarks) == 1) else 's'))\n self.show_landmarks_workhorse(landmarks)\n\n def show_landmarks_workhorse(self,landmarks):\n \"Also called by show_particle\"\n sorted_keys = self.sort_wmobject_ids(landmarks)\n for key in sorted_keys:\n value = landmarks[key]\n if isinstance(value, Pose):\n x = value.position.x\n y = value.position.y\n theta = value.rotation.angle_z.degrees\n sigma_x = 0\n sigma_y = 0\n sigma_theta = 0\n else:\n x = value[0][0,0]\n y = value[0][1,0]\n theta = value[1] * 180/pi\n sigma_x = sqrt(value[2][0,0])\n sigma_y = sqrt(value[2][1,1])\n sigma_theta = sqrt(value[2][2,2])*180/pi\n if key.startswith('Aruco-'):\n print(' Aruco marker %s' % key[6:], end='')\n elif key.startswith('Wall-'):\n print(' Wall %s' % key[5:], end='')\n elif key.startswith('Cube-'):\n print(' Cube %s' % key[5:], end='')\n else:\n print(' %r' % key, end='')\n print(' at (%6.1f, %6.1f) @ %4.1f deg +/- (%4.1f,%4.1f) +/- %3.1f deg' %\n (x, y, theta, sigma_x, sigma_y, sigma_theta))\n print()\n\n def sort_wmobject_ids(self,ids):\n preference = ['Charger','Cube','Aruco','Wall','Doorway','CustomCube','CustomMarker','Room','Face']\n\n def key(id):\n index = 0\n for prefix in preference:\n if id.startswith(prefix):\n break\n else:\n index += 1\n return ('%02d' % index) + id\n\n result = sorted(ids, key=key)\n return result\n\n def show_particle(self,args=[]):\n if len(args) == 0:\n particle = self.best_particle\n particle_number = '(best=%d)' % particle.index\n elif len(args) > 1:\n print('Usage: show particle [number]')\n return\n else:\n try:\n particle_number = int(args[0])\n particle = self.particles[particle_number]\n except ValueError:\n print('Usage: show particle [number]')\n return\n except IndexError:\n print('Particle number must be between 0 and',\n len(self.particles)-1)\n return\n print ('Particle %s: x=%6.1f y=%6.1f theta=%6.1f deg log wt=%f [%.25f]' %\n (particle_number, particle.x, particle.y, particle.theta*180/pi,\n particle.log_weight, particle.weight))\n if isinstance(particle,SLAMParticle) and len(particle.landmarks) > 0:\n print('Landmarks:')\n self.show_landmarks_workhorse(particle.landmarks)\n else:\n print()\n\n#================ Particle SLAM ================\n\nclass SLAMParticle(Particle):\n def __init__(self, index=-1):\n super().__init__(index)\n self.landmarks = dict()\n\n def __repr__(self):\n return '<SLAMParticle %d: (%.2f, %.2f) %.1f deg. log_wt=%f, %d-lm>' % \\\n (self.index, self.x, self.y, self.theta*180/pi, self.log_weight, len(self.landmarks))\n\n sigma_r = 50\n sigma_alpha = 15 * (pi/180)\n sigma_phi = 15 * (pi/180)\n sigma_theta = 15 * (pi/180)\n sigma_z = 50\n landmark_sensor_variance_Qt = np.array([[sigma_r**2, 0 , 0],\n [0 , sigma_alpha**2, 0],\n [0 , 0 , sigma_phi**2]])\n # variance of camera location (cylindrical coordinates)\n # phi is the angle around the Z axis of the robot\n # theta is the angle around the X axis of the camera (pitch)\n camera_sensor_variance_Qt = np.array([[sigma_r**2 , 0 , 0 ,0 , 0],\n [0 , sigma_alpha**2, 0 ,0 , 0],\n [0 , 0 , sigma_z**2 ,0 , 0],\n [0 , 0 , 0 ,sigma_phi**2, 0],\n [0 , 0 , 0 ,0 , sigma_theta**2]])\n\n @staticmethod\n def sensor_jacobian_H(dx, dy, dist):\n \"\"\"Jacobian of sensor values (r, alpha) wrt particle state x,y\n where (dx,dy) is vector from particle to lm, and\n r = sqrt(dx**2 + dy**2), alpha = atan2(dy,dx), phi = phi\"\"\"\n q = dist**2\n sqr_q = dist\n return np.array([[dx/sqr_q, dy/sqr_q, 0],\n [-dy/q , dx/q , 0],\n [0 , 0 , 1]])\n\n @staticmethod\n def sensor_jacobian_H_cam(dx, dy, dist):\n \"\"\"Jacobian of sensor values (r, alpha) wrt particle state x,y\n where (dx,dy) is vector from particle to lm, and\n r = sqrt(dx**2 + dy**2), alpha = atan2(dy,dx), z = z, phi = phi, theta = theta\"\"\"\n q = dist**2\n sqr_q = dist\n return np.array([[dx/sqr_q, dy/sqr_q, 0, 0, 0],\n [-dy/q , dx/q , 0, 0, 0],\n [0 , 0 , 1, 0, 0],\n [0 , 0 , 0, 1, 0],\n [0 , 0 , 0, 0, 1],])\n\n def add_regular_landmark(self, lm_id, sensor_dist, sensor_bearing, sensor_orient):\n direction = self.theta + sensor_bearing\n dx = sensor_dist * cos(direction)\n dy = sensor_dist * sin(direction)\n lm_x = self.x + dx\n lm_y = self.y + dy\n\n if lm_id.startswith('Aruco-') or lm_id.startswith('Wall-'):\n lm_orient = wrap_angle(sensor_orient + self.theta)\n elif lm_id.startswith('Cube-'):\n lm_orient = sensor_orient\n else:\n print('Unrecognized landmark type:',lm_id)\n lm_orient = sensor_orient\n\n lm_mu = np.array([[lm_x], [lm_y]])\n H = self.sensor_jacobian_H(dx, dy, sensor_dist)\n Hinv = np.linalg.inv(H)\n Q = self.landmark_sensor_variance_Qt\n lm_sigma = Hinv.dot(Q.dot(Hinv.T))\n self.landmarks[lm_id] = (lm_mu, lm_orient, lm_sigma)\n\n def update_regular_landmark(self, id, sensor_dist, sensor_bearing, sensor_orient,\n dx, dy, I=np.eye(3)):\n # (dx,dy) is vector from particle to SENSOR position of lm\n (old_mu, old_orient, old_sigma) = self.landmarks[id]\n H = self.sensor_jacobian_H(dx, dy, sensor_dist)\n Ql = H.dot(old_sigma.dot(H.T)) + self.landmark_sensor_variance_Qt\n Ql_inv = np.linalg.inv(Ql)\n K = old_sigma.dot((H.T).dot(Ql_inv))\n z = np.array([[sensor_dist], [sensor_bearing], [sensor_orient]])\n # (ex,ey) is vector from particle to MAP position of lm\n ex = old_mu[0,0] - self.x\n ey = old_mu[1,0] - self.y\n h = np.array([ [sqrt(ex**2+ey**2)],\n [wrap_angle(atan2(ey,ex) - self.theta)],\n [wrap_angle(old_orient - self.theta)] ])\n delta_sensor = wrap_selected_angles(z-h, [1,2])\n if False: \"\"\"#abs(delta_sensor[1,0]) > 0.1 or abs(delta_sensor[0,0]) > 50:\n # Huge delta means the landmark must have moved, so reset our estimate.\n if isinstance(id,str): # *** DEBUG\n print('update_regular_landmark: index=%d id=%s dist=%5.1f brg=%5.1f orient=%5.1f' %\n (self.index, id, sensor_dist, sensor_bearing*180/pi, sensor_orient*180/pi), end='')\n print(' delta sensor: %.1f %.1f %.1f' %\n (delta_sensor[0,0], delta_sensor[1,0]*180/pi, delta_sensor[2,0]*180/pi))\n new_mu = np.array([[self.x + sensor_dist*cos(sensor_bearing+self.theta)],\n [self.y + sensor_dist*sin(sensor_bearing+self.theta)],\n [sensor_orient]])\n Hinv = np.linalg.inv(H)\n Q = self.landmark_sensor_variance_Qt\n new_sigma = Hinv.dot(Q.dot(Hinv.T))\"\"\"\n else:\n # Error not too large: refine current estimate using EKF\n new_mu = np.append(old_mu,[old_orient]).reshape([3,1]) + K.dot(delta_sensor)\n new_sigma = (I - K.dot(H)).dot(old_sigma)\n # landmark tuple is ( [x,y], orient, covariance_matrix )\n if self.index == -1: # NOOP: should be == 0\n print('id=',id,' old_mu=',[old_mu[0,0],old_mu[1,0]],'@',old_orient*180/pi,\n ' new_mu=',[new_mu[0][0],new_mu[1][0]],'@',new_mu[2][0]*180/pi)\n print(' ','dx,dy=',[dx,dy],' ex,ey=',[ex,ey],\n ' sensor_dist=',sensor_dist,\n ' sensor_bearing=',sensor_bearing*180/pi,\n ' sensor_orient=',sensor_orient*180/pi,\n ' delta_sensor=',delta_sensor)\n self.landmarks[id] = (new_mu[0:2], wrap_angle(new_mu[2][0]), new_sigma)\n if not isinstance(self.landmarks[id][1],(float, np.float64)):\n print('ORIENT FAIL', self.landmarks[id])\n print('new_mu=',new_mu)\n print(' ','dx,dy=',[dx,dy],' ex,ey=',[ex,ey],\n ' sensor_dist=',sensor_dist,\n ' sensor_bearing=',sensor_bearing*180/pi,\n ' sensor_orient=',sensor_orient*180/pi,\n ' delta_sensor=',delta_sensor)\n print('old_mu=',old_mu,'\\nold_orient=',old_orient,'\\nold_sigma=',old_sigma)\n print('z=',z, 'h=',h)\n input()\n\n def add_cam_landmark(self, lm_id, sensor_dist, sensor_bearing, sensor_height, sensor_phi, sensor_theta):\n direction = self.theta + sensor_bearing\n dx = sensor_dist * cos(direction)\n dy = sensor_dist * sin(direction)\n lm_x = self.x + dx\n lm_y = self.y + dy\n\n lm_height = (sensor_height, wrap_angle(sensor_phi+self.theta), sensor_theta)\n\n lm_mu = np.array([[lm_x], [lm_y]])\n H = self.sensor_jacobian_H_cam(dx, dy, sensor_dist)\n Hinv = np.linalg.inv(H)\n Q = self.camera_sensor_variance_Qt\n lm_sigma = Hinv.dot(Q.dot(Hinv.T))\n # [ [x,y], [z,orient,pitch], covarience_matrix]\n self.landmarks[lm_id] = (lm_mu, lm_height, lm_sigma)\n\n def update_cam_landmark(self, id, sensor_dist, sensor_bearing, sensor_height, sensor_phi, sensor_theta,\n dx, dy, I=np.eye(5)):\n # (dx,dy) is vector from particle to SENSOR position of lm\n (old_mu, old_height, old_sigma) = self.landmarks[id]\n H = self.sensor_jacobian_H_cam(dx, dy, sensor_dist)\n Ql = H.dot(old_sigma.dot(H.T)) + self.camera_sensor_variance_Qt\n Ql_inv = np.linalg.inv(Ql)\n K = old_sigma.dot((H.T).dot(Ql_inv))\n z = np.array([[sensor_dist],\n [sensor_bearing],\n [sensor_height],\n [wrap_angle(sensor_phi+self.theta)],\n [sensor_theta]])\n # (ex,ey) is vector from particle to MAP position of lm\n ex = old_mu[0,0] - self.x\n ey = old_mu[1,0] - self.y\n h = np.array([[sqrt(ex**2+ey**2)],\n [wrap_angle(atan2(ey,ex) - self.theta)],\n [old_height[0]],\n [old_height[1]],\n [old_height[2]]])\n new_mu = np.append(old_mu,[old_height]).reshape([5,1]) + K.dot(wrap_selected_angles(z - h,[1,3,4]))\n new_sigma = (I - K.dot(H)).dot(old_sigma)\n # [ [x,y], [z,orient,pitch], covariance_matrix]\n self.landmarks[id] = (new_mu[0:2], new_mu[2:5], new_sigma)\n\n\nclass SLAMSensorModel(SensorModel):\n @staticmethod\n def is_cube(x):\n return isinstance(x, cozmo.objects.LightCube) and x.pose.is_valid\n\n @staticmethod\n def is_solo_aruco_landmark(x):\n #return False # **** DEBUG HACK\n \"True for independent Aruco landmarks not associated with any wall.\"\n return isinstance(x, ArucoMarker) and x.id_string not in wall_marker_dict\n\n def __init__(self, robot, landmark_test=None, landmarks=None,\n distance_variance=200):\n if landmarks is None:\n landmarks = dict()\n if landmark_test is None:\n landmark_test = self.is_cube\n self.landmark_test = landmark_test\n self.distance_variance = distance_variance\n self.candidate_arucos = dict()\n self.use_perched_cameras = False\n super().__init__(robot,landmarks)\n\n def infer_wall_from_corners_lists(self, id, markers):\n # Called by generate_walls_from_markers below.\n # All these markers have the same wall_spec, so just grab the first one.\n wall_spec = wall_marker_dict.get(markers[0][0], None)\n world_points = []\n image_points = []\n for (id, corners) in markers:\n (s, (cx, cy)) = wall_spec.marker_specs[id]\n\n if cy < 100:\n marker_size = self.robot.world.aruco.marker_size\n else:\n # Compensate for lintel marker foreshortening.\n # TODO: This could be smarter; make it distance-dependent.\n marker_size = 0.85 * self.robot.world.aruco.marker_size\n\n world_points.append((cx-s*marker_size/2, cy+marker_size/2, s))\n world_points.append((cx+s*marker_size/2, cy+marker_size/2, s))\n world_points.append((cx+s*marker_size/2, cy-marker_size/2, s))\n world_points.append((cx-s*marker_size/2, cy-marker_size/2, s))\n\n image_points.append(corners[0])\n image_points.append(corners[1])\n image_points.append(corners[2])\n image_points.append(corners[3])\n\n # Find rotation and translation vector from camera frame using SolvePnP\n (success, rvecs, tvecs) = cv2.solvePnP(np.array(world_points),\n np.array(image_points),\n self.robot.world.aruco.camera_matrix,\n self.robot.world.aruco.distortion_array)\n rotationm, jcob = cv2.Rodrigues(rvecs)\n # Change to marker frame.\n # Arucos seen head-on have orientation 0, so work with that for now.\n # Later we will flip the orientation to pi for the worldmap.\n transformed = np.matrix(rotationm).T*(-np.matrix(tvecs))\n angles_xyz = rotation_matrix_to_euler_angles(rotationm)\n # euler angle flip when back of wall is seen\n if angles_xyz[2] > pi/2:\n wall_orient = wrap_angle(-(angles_xyz[1]-pi))\n elif angles_xyz[2] >= -pi/2 and angles_xyz[2] <= pi/2:\n wall_orient = wrap_angle((angles_xyz[1]))\n else:\n wall_orient = wrap_angle(-(angles_xyz[1]+pi))\n\n wall_x = -transformed[2]*cos(wall_orient) + (transformed[0]-wall_spec.length/2)*sin(wall_orient)\n wall_y = (transformed[0]-wall_spec.length/2)*cos(wall_orient) - -transformed[2]*sin(wall_orient)\n # Flip wall orientation to match ArUcos for worldmap\n wm_wall_orient = wrap_angle(pi - wall_orient)\n wall = WallObj(id=wall_spec.spec_id, x=wall_x, y=wall_y, theta=wm_wall_orient,\n length=wall_spec.length)\n return wall\n\n def generate_walls_from_markers(self, seen_marker_objects, good_markers):\n if self.robot.is_moving:\n return []\n walls = []\n wall_markers = dict() # key is wall id\n for num in good_markers:\n marker = seen_marker_objects[num]\n wall_spec = wall_marker_dict.get(marker.id_string,None)\n if wall_spec is None: continue # marker not part of a known wall\n wall_id = wall_spec.spec_id\n markers = wall_markers.get(wall_id, list())\n markers.append((marker.id_string, marker.bbox[0]))\n wall_markers[wall_id] = markers\n # Now infer the walls from the markers\n for (wall_id,markers) in wall_markers.items():\n # Must see at least two markers to create a wall, but once it's\n # in the world map we only require one marker to recognize it.\n # Necessary to avoid spurious wall creation.\n # NOTE: switched to only requiring 1 marker for wall creation.\n if len(markers) >= 1 or wall_id in self.robot.world.world_map.objects:\n walls.append(self.infer_wall_from_corners_lists(wall_id,markers))\n return walls\n\n def evaluate(self, particles, force=False, just_looking=False):\n # Returns true if particles were evaluated.\n # Call with force=True from particle_viewer to skip distance traveled check.\n # Call with just_looking=True to just look for new landmarks; no evaluation.\n evaluated = False\n\n # Don't evaluate if robot is still moving; ArUco info will be bad.\n if self.robot.is_moving:\n return False\n\n # Compute robot motion even if forced, to check for robot origin_id change\n (dist,turn_angle) = self.compute_robot_motion()\n\n # If we're lost but have landmarks in view, see if we can\n # recover by using the landmarks to generate a new particle set.\n if self.pf.state == ParticleFilter.LOST:\n if self.pf.sensor_model.landmarks:\n found_lms = self.pf.make_particles_from_landmarks()\n if not found_lms:\n return False\n else:\n self.pf.state = ParticleFilter.LOCALIZING\n force = True\n just_looking = False\n else: # no landmarks, so we can't be lost\n self.pf.state = ParticleFilter.LOCALIZED\n\n # Unless forced, don't evaluate unless the robot moved enough\n # for evaluation to be worthwhile.\n #print('force=',force,' dist=',dist, ' state=',self.pf.state)\n if (not force) and (dist < 5) and abs(turn_angle) < 2*pi/180:\n return False\n if not just_looking:\n self.last_evaluate_pose = self.robot.pose\n\n # Evaluate any cube landmarks (but we don't normally use cubes as landmarks)\n for cube in self.robot.world.light_cubes.values():\n if self.landmark_test(cube):\n id = 'Cube-'+str(cube.cube_id)\n evaluated = self.process_landmark(id, cube, just_looking, []) \\\n or evaluated\n\n # Evaluate ArUco landmarks\n try:\n # Cache seen marker objects because vision is in another thread.\n seen_marker_objects = self.robot.world.aruco.seen_marker_objects.copy()\n except:\n seen_marker_objects = dict()\n for marker in seen_marker_objects.values():\n if self.landmark_test(marker):\n evaluated = self.process_landmark(marker.id_string, marker,\n just_looking, seen_marker_objects) \\\n or evaluated\n\n # Evaluate walls. First find the set of \"good\" markers.\n # Good markers have been seen consistently enough to be deemed reliable.\n good_markers = []\n for marker in seen_marker_objects.values():\n if marker.id_string in self.robot.world.world_map.objects or \\\n self.candidate_arucos.get(marker.id_string,-1) > 10:\n good_markers.append(marker.id)\n walls = self.generate_walls_from_markers(seen_marker_objects, good_markers)\n for wall in walls:\n evaluated = self.process_landmark(wall.id, wall, just_looking, seen_marker_objects) \\\n or evaluated\n #print('for', wall, ' evaluated now', evaluated, ' just_looking=', just_looking, seen_marker_objects)\n\n # Evaluate perched cameras as landmarks\n if self.use_perched_cameras:\n # Add cameras that can see the robot as landmarks.\n perched = list(self.robot.world.perched.camera_pool.get(self.robot.aruco_id,{}).values())\n for cam in perched:\n id = 'Cam-XXX'\n evaluated = self.process_landmark(id, cam, just_looking, seen_marker_objects) \\\n or evaluated\n\n #print('nwalls=', len(walls), ' evaluated=',evaluated)\n if evaluated:\n wmax = - np.inf\n for p in particles:\n wmax = max(wmax, p.log_weight)\n if wmax > -5.0 and self.pf.state != ParticleFilter.LOCALIZED:\n print('::: LOCALIZED :::')\n self.pf.state = ParticleFilter.LOCALIZED\n elif self.pf.state != ParticleFilter.LOCALIZED:\n print('not localized because wmax =', wmax)\n min_log_weight = self.robot.world.particle_filter.min_log_weight\n if wmax < min_log_weight:\n wt_inc = min_log_weight - wmax\n # print('wmax=',wmax,'wt_inc=',wt_inc)\n for p in particles:\n p.log_weight += wt_inc\n self.robot.world.particle_filter.variance_estimate()\n\n # Update counts for candidate arucos and delete any losers.\n cached_keys = tuple(self.candidate_arucos.keys())\n for id in cached_keys:\n self.candidate_arucos[id] -= 1\n if self.candidate_arucos[id] <= 0:\n #print('*** DELETING CANDIDATE ARUCO', id)\n del self.candidate_arucos[id]\n\n return evaluated\n\n def process_landmark(self, id, data, just_looking, seen_marker_objects):\n particles = self.robot.world.particle_filter.particles\n if id.startswith('Aruco-'):\n marker_number = int(id[6:])\n # print('spurious data=',data)\n marker = seen_marker_objects[marker_number]\n sensor_dist = marker.camera_distance\n sensor_bearing = atan2(marker.camera_coords[0],\n marker.camera_coords[2])\n # Rotation about Y axis of marker. Fix sign.\n sensor_orient = wrap_angle(pi - marker.euler_rotation[1] * (pi/180))\n elif id.startswith('Wall-'):\n # Turning to polar coordinates\n sensor_dist = sqrt(data.x**2 + data.y**2)\n sensor_bearing = atan2(data.y, data.x)\n sensor_orient = wrap_angle(data.theta)\n elif id.startswith('Cube-'):\n # sdk values are in SDK's coordinate system, not ours\n sdk_dx = data.pose.position.x - self.robot.pose.position.x\n sdk_dy = data.pose.position.y - self.robot.pose.position.y\n sensor_dist = sqrt(sdk_dx**2 + sdk_dy**2)\n sdk_bearing = atan2(sdk_dy, sdk_dx)\n # sensor_bearing is lm bearing relative to robot centerline\n sensor_bearing = \\\n wrap_angle(sdk_bearing-self.robot.pose.rotation.angle_z.radians)\n # sensor_orient is lm bearing relative to cube's North\n sensor_orient = \\\n wrap_angle(sdk_bearing - data.pose.rotation.angle_z.radians)\n elif id.startswith('Cam'):\n # Converting to cylindrical coordinates\n sensor_dist = sqrt(landmark.x**2 + landmark.y**2)\n sensor_bearing = atan2(landmark.y,landmark.x)\n sensor_height = landmark.z\n sensor_phi = landmark.phi\n sensor_theta = landmark.theta\n if sensor_height < 0:\n print(\"FLIP!!!\")\n # Using str instead of capture object as new object is added by perched_cam every time\n else:\n print(\"Don't know how to process landmark; id =\",id)\n\n if id not in self.landmarks:\n if id.startswith('Aruco-'):\n seen_count = self.candidate_arucos.get(id,0)\n if seen_count < 10:\n # add 2 because we're going to subtract 1 later\n self.candidate_arucos[id] = seen_count + 2\n return False\n print(' *** PF ADDING LANDMARK %s at: distance=%6.1f bearing=%5.1f deg. orient=%5.1f deg.' %\n (id, sensor_dist, sensor_bearing*180/pi, sensor_orient*180/pi))\n for p in particles:\n if not id.startswith('Video'):\n p.add_regular_landmark(id, sensor_dist, sensor_bearing, sensor_orient)\n else:\n # special function for cameras as landmark list has more variables\n p.add_cam_landmark(id, sensor_dist, sensor_bearing, sensor_height, sensor_phi, sensor_theta)\n # Add new landmark to sensor model's landmark list so worldmap can reference it\n #self.landmarks[id] = self.robot.world.particle_filter.particles[0].landmarks[id]\n self.landmarks[id] = self.pf.best_particle.landmarks[id]\n # Delete new aruco from tentative candidate list; it's established now.\n if id.startswith('Aruco-'):\n del self.candidate_arucos[id]\n return False\n\n # If we reach here, we're seeing a familiar landmark, so evaluate\n if just_looking: # *** DEBUG ***\n # We can't afford to update all the particles on each\n # camera frame so we'll just update particle 0 and use\n # that to update the sensor model.\n #pp = [particles[0]]\n return False\n pp = [self.pf.best_particle]\n evaluated = False\n else:\n # We've moved a bit, so we should update every particle.\n pp = particles\n evaluated = True\n\n if id in self.robot.world.world_map.objects:\n obj = self.robot.world.world_map.objects[id]\n should_update_landmark = (not obj.is_fixed) and \\\n (self.pf.state == ParticleFilter.LOCALIZED)\n else:\n should_update_landmark = True\n\n landmark_is_camera = id.startswith('Video')\n\n for p in pp:\n # Use sensed bearing and distance to get particle's\n # prediction of landmark position in the world. Compare\n # to its stored map position.\n sensor_direction = p.theta + sensor_bearing\n dx = sensor_dist * cos(sensor_direction)\n dy = sensor_dist * sin(sensor_direction)\n predicted_lm_x = p.x + dx\n predicted_lm_y = p.y + dy\n (lm_mu, lm_orient, lm_sigma) = p.landmarks[id]\n map_lm_x = lm_mu[0,0]\n map_lm_y = lm_mu[1,0]\n error_x = map_lm_x - predicted_lm_x\n error_y = map_lm_y - predicted_lm_y\n error1_sq = error_x**2 + error_y**2\n error2_sq = 0 # *** (sensor_dist * wrap_angle(sensor_orient - lm_orient))**2\n p.log_weight -= (error1_sq + error2_sq) / self.distance_variance\n # Update landmark in this particle's map\n if should_update_landmark:\n if not landmark_is_camera:\n p.update_regular_landmark(id, sensor_dist, sensor_bearing,\n sensor_orient, dx, dy)\n else:\n # special function for cameras as landmark list has more variables\n p.update_cam_landmark(id, sensor_dist, sensor_bearing,\n sensor_height, sensor_phi, sensor_theta, dx, dy)\n return evaluated\n\nclass SLAMParticleFilter(ParticleFilter):\n def __init__(self, robot, landmark_test=SLAMSensorModel.is_solo_aruco_landmark, **kwargs):\n if 'sensor_model' not in kwargs or kwargs['sensor_model'] == 'default':\n kwargs['sensor_model'] = SLAMSensorModel(robot, landmark_test=landmark_test)\n if 'particle_factory' not in kwargs:\n kwargs['particle_factory'] = SLAMParticle\n if 'initializer' not in kwargs:\n kwargs['initializer'] = RobotPosition(0,0,0)\n super().__init__(robot, **kwargs)\n self.initializer.pf = self\n self.new_landmarks = [None] * self.num_particles\n\n def clear_landmarks(self):\n for p in self.particles:\n p.landmarks.clear()\n self.sensor_model.landmarks.clear()\n\n def add_fixed_landmark(self,landmark):\n mu = np.array([[landmark.x], [landmark.y]])\n theta = landmark.theta\n sigma = np.zeros([3,3])\n mu_theta_sigma = (mu, theta, sigma)\n for p in self.particles:\n p.landmarks[landmark.id] = mu_theta_sigma\n self.sensor_model.landmarks[landmark.id] = mu_theta_sigma\n\n def update_weights(self):\n var = super().update_weights()\n best_particle = self.particles[self.exp_weights.argmax()]\n #print(' weight update: BEST ==> ',best_particle)\n self.sensor_model.landmarks = best_particle.landmarks\n return var\n\n def install_new_particles(self):\n particles = self.particles # make local for faster access\n new_landmarks = self.new_landmarks\n new_indices = self.new_indices\n for i in range(self.num_particles):\n new_landmarks[i] = particles[new_indices[i]].landmarks.copy()\n super().install_new_particles()\n for i in range(self.num_particles):\n particles[i].landmarks = new_landmarks[i]\n\n def make_particles_from_landmarks(self):\n try:\n # Cache seen marker objects because vision is in another thread.\n seen_marker_objects = self.robot.world.aruco.seen_marker_objects.copy()\n except:\n seen_marker_objects = dict()\n lm_specs = self.get_cube_landmark_specs() + \\\n self.get_aruco_landmark_specs(seen_marker_objects) + \\\n self.get_wall_landmark_specs(seen_marker_objects)\n if not lm_specs: return False\n num_specs = len(lm_specs)\n particles = self.particles\n phi_jitter = np.random.normal(0.0, self.angle_jitter, size=self.num_particles)\n x_jitter = np.random.uniform(-self.dist_jitter, self.dist_jitter, size=self.num_particles)\n y_jitter = np.random.uniform(-self.dist_jitter, self.dist_jitter, size=self.num_particles)\n theta_jitter = np.random.uniform(-self.angle_jitter/2, self.angle_jitter/2, size=self.num_particles)\n for i in range(self.num_particles):\n (obj, sensor_dist, sensor_bearing, sensor_orient, lm_pose) = lm_specs[i % num_specs]\n # phi is our bearing relative to the landmark, independent of our orientation\n phi = wrap_angle(lm_pose[1] - sensor_orient + sensor_bearing + phi_jitter[i])\n if i == -1: # change to i==1 to re-enable\n print('phi=', phi*180/pi, ' lm_pose[1]=', lm_pose[1]*180/pi,\n ' sensor_bearing=',sensor_bearing*180/pi,\n ' sensor_orient=', sensor_orient*180/pi)\n p = particles[i]\n p.x = lm_pose[0][0,0] - sensor_dist * cos(phi) + x_jitter[i]\n p.y = lm_pose[0][1,0] - sensor_dist * sin(phi) + y_jitter[i]\n p.theta = wrap_angle(phi - sensor_bearing + phi_jitter[i] + theta_jitter[i])\n p_landmarks = self.sensor_model.landmarks.copy()\n if False: #i<3:\n print('NEW PARTICLE %d: ' % i, p.x, p.y, p.theta*180/pi)\n print(' lm_pose[1]=',lm_pose[1]*180/pi, ' sensor_orient=',sensor_orient*180/pi,\n ' phi=',phi*180/pi)\n print('lm_pose = ', lm_pose)\n return True\n\n def get_cube_landmark_specs(self):\n lm_specs = []\n # TODO: iterate over cubes as we do aruco markers below\n return lm_specs\n\n def get_aruco_landmark_specs(self, seen_marker_objects):\n lm_specs = []\n for marker in seen_marker_objects.values():\n id = 'Aruco-'+str(marker.id)\n lm_pose = self.sensor_model.landmarks.get(id,None)\n if lm_pose is None: continue # not a familiar landmark\n sensor_dist = marker.camera_distance\n sensor_bearing = atan2(marker.camera_coords[0],\n marker.camera_coords[2])\n sensor_orient = wrap_angle(pi - marker.euler_rotation[1] * (pi/180))\n lm_specs.append((marker, sensor_dist, sensor_bearing, sensor_orient, lm_pose))\n return lm_specs\n\n def get_wall_landmark_specs(self, seen_marker_objects):\n lm_specs = []\n good_markers = []\n for marker in seen_marker_objects.values():\n if marker.id_string in self.robot.world.world_map.objects or \\\n self.sensor_model.candidate_arucos.get(marker.id_string,-1) > 10:\n good_markers.append(marker.id)\n walls = self.sensor_model.generate_walls_from_markers(seen_marker_objects, good_markers)\n for wall in walls:\n lm_pose = self.sensor_model.landmarks.get(wall.id,None)\n if lm_pose is None: continue # not a familiar landmark\n sensor_dist = sqrt(wall.x**2 + wall.y**2)\n sensor_bearing = atan2(wall.y, wall.x)\n sensor_orient = wall.theta\n lm_specs.append((wall, sensor_dist, sensor_bearing, sensor_orient, lm_pose))\n return lm_specs\n\n def look_for_new_landmarks(self):\n \"\"\"Calls evaluate() to find landmarks and add them to the maps.\n Also updates existing landmarks.\"\"\"\n self.sensor_model.evaluate(self.particles, force=True, just_looking=True)\n self.sensor_model.landmarks = self.best_particle.landmarks\n\n"
},
{
"alpha_fraction": 0.6177777647972107,
"alphanum_fraction": 0.6395555734634399,
"avg_line_length": 39.90909194946289,
"blob_id": "082e7efc1bebebf630207542df6ce3fc53eb61d7",
"content_id": "fb6dea93ffc8399f2535c84e85070763b04f78b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2250,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 55,
"path": "/cozmo_fsm/examples/Nested.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n The Nested demo shows the use of nested state machines. We define a\n new node class DingDong that has a three-node state machine inside\n it. We then define the main class, Nested, whose state machine\n contains two instances of DingDong. DingDong uses a ParentCompletes\n node to cause DinDong to post a completion event, which allows\n Nested's first DingDong instance 'dd1' to move on to the next state,\n which is 'bridge'. (The 'dd2' instance of DingDong also posts a\n completion event, but nothing is listening for it.)\n\n Behavior: Cozmo says 'ding', then 'dong', then says 'once again'\n (that's the bridge), then 'ding', and then 'dong'.\n\"\"\"\n\nfrom cozmo_fsm import *\n\nclass DingDong(StateNode):\n def setup(self):\n \"\"\"\n ding: Say('ding') =C=> dong: Say('dong') =C=> ParentCompletes()\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:14:24 2020:\n \n ding = Say('ding') .set_name(\"ding\") .set_parent(self)\n dong = Say('dong') .set_name(\"dong\") .set_parent(self)\n parentcompletes1 = ParentCompletes() .set_name(\"parentcompletes1\") .set_parent(self)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(ding) .add_destinations(dong)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(dong) .add_destinations(parentcompletes1)\n \n return self\n\nclass Nested(StateMachineProgram):\n def setup(self):\n \"\"\"\n dd1: DingDong() =C=> bridge: Say('once again') =C=> dd2: DingDong()\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:14:24 2020:\n \n dd1 = DingDong() .set_name(\"dd1\") .set_parent(self)\n bridge = Say('once again') .set_name(\"bridge\") .set_parent(self)\n dd2 = DingDong() .set_name(\"dd2\") .set_parent(self)\n \n completiontrans3 = CompletionTrans() .set_name(\"completiontrans3\")\n completiontrans3 .add_sources(dd1) .add_destinations(bridge)\n \n completiontrans4 = CompletionTrans() .set_name(\"completiontrans4\")\n completiontrans4 .add_sources(bridge) .add_destinations(dd2)\n \n return self\n"
},
{
"alpha_fraction": 0.5061424970626831,
"alphanum_fraction": 0.5659295916557312,
"avg_line_length": 40.38983154296875,
"blob_id": "afe5f52ff19056c3d30e83dd619d924f7237638d",
"content_id": "bd1aca41eef7e4f1251be443e82a5caeda5b2338",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2442,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 59,
"path": "/cozmo_fsm/examples/CV_Contour.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy as np\nfrom cozmo_fsm import *\n\nclass CV_Contour(StateMachineProgram):\n def __init__(self):\n self.colors = [(0,0,255), (0,255,0), (255,0,0),\n (255,255,0), (255,0,255), (0,255,255),\n (0,0,128), (0,128,0), (128,0,0),\n (128,128,0), (0,128,128), (128,0,128),\n (255,255,255)]\n super().__init__(aruco=False, particle_filter=False, cam_viewer=False,\n force_annotation=True, annotate_sdk=False)\n\n def start(self):\n super().start()\n dummy = numpy.array([[0]*320], dtype='uint8')\n cv2.namedWindow('contour')\n cv2.imshow('contour',dummy)\n\n cv2.createTrackbar('thresh1','contour',0,255,lambda self: None)\n cv2.setTrackbarPos('thresh1','contour',100)\n\n cv2.createTrackbar('minArea','contour',1,1000,lambda self: None)\n cv2.setTrackbarPos('minArea','contour',50)\n\n def user_image(self,image,gray):\n thresh1 = cv2.getTrackbarPos('thresh1','contour')\n ret, thresholded = cv2.threshold(gray, thresh1, 255, 0)\n #cv2.imshow('contour',thresholded)\n if cv2.__version__[0] >= '4':\n contours, hierarchy = \\\n cv2.findContours(thresholded, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n else: # in OpenCV 3.x there was an additional return value\n dummy, contours, hierarchy = \\\n cv2.findContours(thresholded, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n areas = [(i, cv2.contourArea(contours[i])) for i in range(len(contours))]\n areas.sort(key=lambda x: x[1])\n areas.reverse()\n self.areas = areas\n self.contours = contours\n self.hierarchy = hierarchy\n\n def user_annotate(self,annotated_image):\n minArea = cv2.getTrackbarPos('minArea','contour')\n scale = self.annotated_scale_factor\n for area_entry in self.areas:\n if area_entry[1] < minArea:\n break\n temp = index = area_entry[0]\n depth = -1\n while temp != -1 and depth < len(self.colors)-1:\n depth += 1\n temp = self.hierarchy[0,temp,3]\n contour = scale * self.contours[index]\n cv2.drawContours(annotated_image, [contour], 0, self.colors[depth], 2)\n cv2.imshow('contour',annotated_image)\n cv2.waitKey(1)\n return annotated_image\n"
},
{
"alpha_fraction": 0.5996376872062683,
"alphanum_fraction": 0.6295289993286133,
"avg_line_length": 37.068965911865234,
"blob_id": "d87e3f456d36eede79db785c81eea6382239d692",
"content_id": "93cac877e1e11ac207dfc8597b4d72c8ef4f7575",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1104,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 29,
"path": "/cozmo_fsm/examples/Look5.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Example of starting a behavior and indicating that it should not be\n automatically stopped when the behavior node is exited. We later\n use StopBehavior() to stop the behavior.\n\"\"\"\n\nfrom cozmo_fsm import *\n\nclass Look5(StateMachineProgram):\n def setup(self):\n \"\"\"\n LookAroundInPlace(stop_on_exit=False)\n =T(5)=> Say(\"I'm almost done\")\n =T(5)=> StopBehavior()\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:13:56 2020:\n \n lookaroundinplace1 = LookAroundInPlace(stop_on_exit=False) .set_name(\"lookaroundinplace1\") .set_parent(self)\n say1 = Say(\"I'm almost done\") .set_name(\"say1\") .set_parent(self)\n stopbehavior1 = StopBehavior() .set_name(\"stopbehavior1\") .set_parent(self)\n \n timertrans1 = TimerTrans(5) .set_name(\"timertrans1\")\n timertrans1 .add_sources(lookaroundinplace1) .add_destinations(say1)\n \n timertrans2 = TimerTrans(5) .set_name(\"timertrans2\")\n timertrans2 .add_sources(say1) .add_destinations(stopbehavior1)\n \n return self\n"
},
{
"alpha_fraction": 0.5901704430580139,
"alphanum_fraction": 0.6183115243911743,
"avg_line_length": 44.87272644042969,
"blob_id": "79bd88a109ab3e696c8e2ececaf85f4678b47149",
"content_id": "944fcbbda768181660bf0982cd5a2f6746de0a73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2523,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 55,
"path": "/cozmo_fsm/examples/Texting.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from cozmo_fsm import *\n\nclass Texting(StateMachineProgram):\n def setup(self):\n \"\"\"\n startnode: StateNode()\n startnode =TM('1')=> do_null\n startnode =TM('2')=> do_time\n startnode =TM('3')=> do_comp\n \n do_null: Say(\"Full steam ahead\") =N=> Forward(20) =C=> startnode\n \n do_time: Say(\"Full steam ahead\") =T(2)=> Forward(20) =C=> startnode\n \n do_comp: Say(\"Full steam ahead\") =C=> Forward(20) =C=> startnode\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:17:21 2020:\n \n startnode = StateNode() .set_name(\"startnode\") .set_parent(self)\n do_null = Say(\"Full steam ahead\") .set_name(\"do_null\") .set_parent(self)\n forward1 = Forward(20) .set_name(\"forward1\") .set_parent(self)\n do_time = Say(\"Full steam ahead\") .set_name(\"do_time\") .set_parent(self)\n forward2 = Forward(20) .set_name(\"forward2\") .set_parent(self)\n do_comp = Say(\"Full steam ahead\") .set_name(\"do_comp\") .set_parent(self)\n forward3 = Forward(20) .set_name(\"forward3\") .set_parent(self)\n \n textmsgtrans1 = TextMsgTrans('1') .set_name(\"textmsgtrans1\")\n textmsgtrans1 .add_sources(startnode) .add_destinations(do_null)\n \n textmsgtrans2 = TextMsgTrans('2') .set_name(\"textmsgtrans2\")\n textmsgtrans2 .add_sources(startnode) .add_destinations(do_time)\n \n textmsgtrans3 = TextMsgTrans('3') .set_name(\"textmsgtrans3\")\n textmsgtrans3 .add_sources(startnode) .add_destinations(do_comp)\n \n nulltrans1 = NullTrans() .set_name(\"nulltrans1\")\n nulltrans1 .add_sources(do_null) .add_destinations(forward1)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(forward1) .add_destinations(startnode)\n \n timertrans1 = TimerTrans(2) .set_name(\"timertrans1\")\n timertrans1 .add_sources(do_time) .add_destinations(forward2)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(forward2) .add_destinations(startnode)\n \n completiontrans3 = CompletionTrans() .set_name(\"completiontrans3\")\n completiontrans3 .add_sources(do_comp) .add_destinations(forward3)\n \n completiontrans4 = CompletionTrans() .set_name(\"completiontrans4\")\n completiontrans4 .add_sources(forward3) .add_destinations(startnode)\n \n return self\n"
},
{
"alpha_fraction": 0.4631600081920624,
"alphanum_fraction": 0.47038745880126953,
"avg_line_length": 37.612403869628906,
"blob_id": "2c903789f6f8dacb53e94c4692459a7df82fee48",
"content_id": "2b78bcb252bd8598ecf1de8eb23bdbd9741279ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4981,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 129,
"path": "/cozmo_fsm/speech.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "try:\n import speech_recognition as sr\nexcept: pass \n\nimport time\nfrom threading import Thread\n\nfrom .evbase import Event\nfrom .events import SpeechEvent\n\n#================ Thesaurus ================\n\nclass Thesaurus():\n def __init__(self):\n self.words = dict()\n self.add_homophones('cozmo', \\\n [\"cozimo\",\"cosimo\",\"cosmo\", \\\n \"kozmo\",\"cosmos\",\"cozmos\"])\n self.add_homophones('right', ['write','wright'])\n self.add_homophones('1',['one','won'])\n self.add_homophones('cube1',['q1','coupon','cuban'])\n self.phrase_tree = dict()\n self.add_phrases('cube1',['cube 1'])\n self.add_phrases('cube2',['cube 2'])\n self.add_phrases('cube2',['cube to'])\n self.add_phrases('cube3',['cube 3'])\n self.add_phrases('paperclip',['paper clip'])\n self.add_phrases('deli-slicer',['deli slicer'])\n \n\n def add_homophones(self,word,homophones):\n if not isinstance(homophones,list):\n homophones = [homophones]\n for h in homophones:\n self.words[h] = word\n\n def lookup_word(self,word):\n return self.words.get(word,word)\n\n def add_phrases(self,word,phrases):\n if not isinstance(phrases,list):\n phrases = [phrases]\n for phrase in phrases:\n wdict = self.phrase_tree\n for pword in phrase.split(' '):\n wdict[pword] = wdict.get(pword,dict())\n wdict = wdict[pword]\n wdict[''] = word\n\n def substitute_phrases(self,words):\n result = []\n while words != []:\n word = words[0]\n del words[0]\n wdict = self.phrase_tree.get(word,None) \n if wdict is None:\n result.append(word)\n continue\n prefix = [word]\n while words != []:\n wdict2 = wdict.get(words[0],None)\n if wdict2 is None: break\n prefix.append(words[0])\n del words[0]\n wdict = wdict2\n subst = wdict.get('',None)\n if subst is not None:\n result.append(subst)\n else:\n result = result + prefix\n return result\n\n#================ SpeechListener ================\n\nclass SpeechListener():\n def __init__(self,robot, thesaurus=Thesaurus(), debug=False):\n self.robot = robot\n self.thesaurus = thesaurus\n self.debug = debug\n\n def speech_listener(self):\n warned_no_mic = False\n print('Launched speech listener.')\n self.rec = sr.Recognizer()\n while True:\n try:\n with sr.Microphone() as source:\n if warned_no_mic:\n print('Got a microphone!')\n warned_no_mic = False\n while True:\n if self.debug: print('--> Listening...')\n try:\n audio = self.rec.listen(source, timeout=8, phrase_time_limit=8)\n audio_len = len(audio.frame_data)\n except:\n continue\n if self.debug:\n print('--> Got audio data: length = {:,d} bytes.'. \\\n format(audio_len))\n if audio_len > 1000000: #500000:\n print('**** Audio segment too long. Try again.')\n continue\n try:\n utterance = self.rec.recognize_google(audio).lower()\n print(\"Raw utterance: '%s'\" % utterance)\n words = [self.thesaurus.lookup_word(w) for w in utterance.split(\" \")]\n words = self.thesaurus.substitute_phrases(words)\n string = \" \".join(words)\n print(\"Heard: '%s'\" % string)\n evt = SpeechEvent(string,words)\n self.robot.erouter.post(evt)\n except sr.RequestError as e:\n print(\"Could not request results from google speech recognition service; {0}\".format(e)) \n except sr.UnknownValueError:\n if self.debug:\n print('--> Recognizer found no words.')\n except Exception as e:\n print('Speech recognition got exception:', repr(e))\n except OSError as e:\n if not warned_no_mic:\n print(\"Couldn't get a microphone:\",e)\n warned_no_mic = True\n time.sleep(10)\n\n def start(self):\n self.thread = Thread(target=self.speech_listener)\n self.thread.daemon = True #ending fg program will kill bg program\n self.thread.start()\n"
},
{
"alpha_fraction": 0.5690370798110962,
"alphanum_fraction": 0.5781286954879761,
"avg_line_length": 36.84218215942383,
"blob_id": "0ffc07541390e81beb0fae3888af737324c6f966",
"content_id": "72202dfb494285b6d3fccbbb9b6a7ae6eb252161",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50596,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 1337,
"path": "/cozmo_fsm/nodes.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import time\nimport asyncio\nimport inspect\nimport types\nimport random\nimport numpy as np\n\ntry: import cv2\nexcept: pass\n\nfrom math import pi, sqrt, atan2, inf, nan\nfrom multiprocessing import Process, Queue\n\nimport cozmo\nfrom cozmo.util import distance_mm, speed_mmps, degrees, Distance, Angle, Pose\n\nfrom . import evbase\nfrom .base import *\nfrom .events import *\nfrom .cozmo_kin import wheelbase\nfrom .geometry import wrap_angle\nfrom .worldmap import WorldObject, FaceObj, CustomMarkerObj\n\n#________________ Ordinary Nodes ________________\n\nclass ParentCompletes(StateNode):\n def start(self,event=None):\n super().start(event)\n if TRACE.trace_level > TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop,\n '%s is causing %s to complete' % (self, self.parent))\n if self.parent:\n self.parent.post_completion()\n\nclass ParentSucceeds(StateNode):\n def start(self,event=None):\n super().start(event)\n if TRACE.trace_level > TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop,\n '%s is causing %s to succeed' % (self, self.parent))\n if self.parent:\n self.parent.post_success()\n\nclass ParentFails(StateNode):\n def start(self,event=None):\n super().start(event)\n if TRACE.trace_level > TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop,\n '%s is causing %s to fail' % (self, self.parent))\n if self.parent:\n self.parent.post_failure()\n\nclass Iterate(StateNode):\n \"\"\"Iterates over an iterable, posting DataEvents. Completes when done.\"\"\"\n def __init__(self,iterable=None):\n super().__init__()\n self.iterable = iterable\n\n class NextEvent(Event): pass\n\n def start(self,event=None):\n if self.running: return\n super().start(event)\n if isinstance(event, DataEvent):\n self.iterable = event.data\n if isinstance(self.iterable, int):\n self.iterable = range(self.iterable)\n if self.iterable is None:\n raise ValueError('~s has nothing to iterate on.' % repr(self))\n if not isinstance(event, self.NextEvent):\n self.iterator = self.iterable.__iter__()\n try:\n value = next(self.iterator)\n except StopIteration:\n self.post_completion()\n return\n self.post_data(value)\n\nclass MoveLift(StateNode):\n \"Move lift at specified speed.\"\n def __init__(self,speed):\n super().__init__()\n self.speed = speed\n\n def start(self,event=None):\n if self.running: return\n super().start(event)\n # Temporary hack supplied by Mark Wesley at Anki\n msg = cozmo._clad._clad_to_engine_iface.EnableLiftPower(True)\n self.robot.conn.send_msg(msg)\n self.robot.move_lift(self.speed)\n\n def stop(self):\n if not self.running: return\n self.robot.move_lift(0)\n super().stop()\n\nclass RelaxLift(StateNode):\n def start(self,event=None):\n if self.running: return\n super().start(event)\n # Temporary hack supplied by Mark Wesley at Anki\n msg = cozmo._clad._clad_to_engine_iface.EnableLiftPower(False)\n self.robot.conn.send_msg(msg)\n\nclass SetLights(StateNode):\n def __init__(self, object, light):\n super().__init__()\n self.object = object\n self.light = light\n\n def start(self,event=None):\n super().start(event)\n if self.object is not self.robot:\n self.object.set_lights(self.light)\n else:\n if self.light.on_color.int_color & 0x00FFFF00 == 0: # no green or blue component\n self.robot.set_all_backpack_lights(self.light)\n else:\n self.robot.set_backpack_lights_off()\n self.robot.set_center_backpack_lights(self.light)\n self.post_completion()\n\nclass DriveContinuous(StateNode):\n def __init__(self,path=[]):\n self.path = path\n self.polling_interval = 0.05\n super().__init__()\n\n def start(self,event=None):\n if isinstance(event, DataEvent) and isinstance(event.data,(list,tuple)):\n self.path = event.data\n if len(self.path) == 0:\n raise ValueError('Node %s has a null path' % repr(self))\n self.path_index = 0\n self.cur = self.path[self.path_index]\n self.prev = None\n self.last_dist = -1\n self.target_q = None\n self.reached_dist = False\n self.mode = None\n self.pause_counter = 0\n super().start(event)\n\n def stop(self):\n self.robot.stop_all_motors()\n super().stop()\n\n def poll(self):\n if not self.running: return\n # Quit if the robot is picked up.\n if self.robot.really_picked_up():\n print('** Robot was picked up.')\n self.robot.stop_all_motors()\n self.poll_handle.cancel()\n self.path_index = None\n #print('<><><>', self, 'punting')\n self.post_failure()\n return\n # See where we are\n x = self.robot.world.particle_filter.pose[0]\n y = self.robot.world.particle_filter.pose[1]\n q = self.robot.world.particle_filter.pose[2]\n dist = sqrt((self.cur.x-x)**2 + (self.cur.y-y)**2)\n delta_q = wrap_angle(q - self.target_q) if self.target_q is not None else inf\n\n # If we're pausing, print our position and exit\n if self.pause_counter > 0:\n #print('%x p%1d. x: %5.1f y: %5.1f q:%6.1f dist: %5.1f' %\n # (self.__hash__() & 0xffffffffffffffff, self.pause_counter, x, y, q*180/pi, dist))\n self.pause_counter -= 1\n return\n\n # See if we've passed the closest approach to the waypoint,\n # i.e., distance to the waypoint is consistently INCREASING\n # or waypoint is behind us.\n if not self.reached_dist:\n self.reached_dist = \\\n abs(delta_q) > 135*pi/180 or \\\n (dist - self.last_dist) > 0.1 and \\\n ( (self.mode == 'x' and np.sign(x-self.cur.x) == np.sign(self.cur.x-self.prev.x)) or\n (self.mode == 'y' and np.sign(y-self.cur.y) == np.sign(self.cur.y-self.prev.y)) )\n self.last_dist = dist\n\n # Is it time to switch to the next waypoint?\n reached_waypoint = (self.path_index == 0) or \\\n (self.reached_dist and \\\n (self.path_index < len(self.path)-1 or \\\n abs(delta_q) < 5*pi/180))\n\n # Advance to next waypoint if indicated\n if reached_waypoint:\n self.path_index += 1\n print('DriveContinuous: current position is (%.1f, %.1f) @ %.1f deg.' %\n (x, y, q*180/pi))\n print(' path index advanced to %d' % self.path_index, end='')\n if self.path_index == len(self.path):\n print('\\nDriveContinous: path complete. Stopping.')\n self.robot.stop_all_motors()\n self.post_completion()\n return\n elif self.path_index > len(self.path):\n # uncaught completion event\n print('\\nDriveContinuous: uncaught completion! Stopping.')\n self.stop()\n return\n self.prev = self.cur\n self.cur = self.path[self.path_index]\n self.last_dist = inf\n self.reached_dist = False\n self.target_q = atan2(self.cur.y-self.prev.y, self.cur.x-self.prev.x)\n print(': [%.1f, %.1f] tgtQ is %.1f deg.' % (self.cur.x, self.cur.y, self.target_q*180/pi))\n\n # Is the target behind us?\n delta_dist = sqrt((self.cur.x-x)**2 + (self.cur.y-y)**2)\n if delta_q < inf and abs(delta_q) > 135*pi/180:\n #self.target_q = wrap_angle(self.target_q + pi)\n print('New waypoint is behind us --> delta_q = %.1f deg., new target_q = %.1f deg., dist = %.1f' %\n (delta_q*180/pi, self.target_q*180/pi, delta_dist))\n self.drive_direction = +1 # was -1\n else:\n self.drive_direction = +1\n\n # Heading determines whether we're solving y=f(x) or x=f(y)\n if abs(self.target_q) < pi/4 or abs(abs(self.target_q)-pi) < pi/4:\n self.mode = 'x' # y = m*x + b\n xdiff = self.cur.x - self.prev.x\n xdiv = xdiff if xdiff != 0 else 0.01\n self.m = (self.cur.y-self.prev.y) / xdiv\n self.b = self.cur.y - self.m * self.cur.x\n #print(' y =', self.m, ' * x +', self.b)\n else:\n self.mode = 'y' # x = m*y + b\n ydiff = self.cur.y - self.prev.y\n ydiv = ydiff if ydiff != 0 else 0.01\n self.m = (self.cur.x-self.prev.x) / ydiv\n self.b = self.cur.x - self.m * self.cur.y\n #print(' x =', self.m, ' * y +', self.b)\n\n # Do we need to turn in place before setting off toward new waypoint?\n if abs(wrap_angle(q-self.target_q)) > 30*pi/180:\n self.saved_mode = self.mode\n self.mode = 'q'\n print('DriveContinuous: turning to %.1f deg. before driving to waypoint.' %\n (self.target_q*180/pi))\n\n # If we were moving, come to a full stop before trying to change direction\n if self.path_index > 1:\n self.robot.stop_all_motors()\n self.pause_counter = 5\n return\n\n # Haven't reached waypoint yet\n elif self.reached_dist:\n # But we have traveled far enough, and this is the last waypoint, so\n # come to a stop and then fix heading\n if self.mode != 'q':\n if abs(wrap_angle(q-self.target_q)) > 5:\n self.robot.stop_all_motors()\n self.robot.pause_counter = 5\n self.mode = 'q'\n print('DriveContinuous: final waypoint reached; adjusting heading to %.1f deg.' %\n (self.target_q*180/pi))\n elif self.mode == 'q' and abs(wrap_angle(q-self.target_q)) < 10*pi/180:\n # If within 10 degrees, cut motors and let inertia carry us the rest of the way.\n print('DriveContinuous: turn to heading complete: heading is %.1f deg.' %\n (q*180/pi))\n self.robot.stop_all_motors()\n self.mode = self.saved_mode\n self.pause_counter = 5\n return\n\n # Calculate error and correction based on present x/y/q position\n q_error = wrap_angle(q - self.target_q)\n intercept_distance = 100 # was 25\n if self.mode == 'x': # y = m*x + b\n target_y = self.m * x + self.b\n d_error = (y - target_y) * np.sign(pi/2 - abs(self.target_q)) # *** CHECK THIS\n correcting_q = - 0.8*q_error - 0.8*atan2(d_error,intercept_distance)\n elif self.mode == 'y': # x = m*y + b\n target_x = self.m * y + self.b\n d_error = (x - target_x) * np.sign(pi/2 - abs(self.target_q-pi/2)) # *** CHECK THIS\n correcting_q = - 0.8*q_error - 0.8*atan2(-d_error,intercept_distance)\n elif self.mode == 'q':\n d_error = nan\n correcting_q = - nan # 0.8*q_error\n else:\n print(\"Bad mode value '%s'\" % repr(self.mode))\n return\n\n # Calculate wheel speeds based on correction value\n if self.mode == 'q' or abs(q_error)*180/pi >= 15:\n # For large heading error, turn in place\n flag = \"<>\"\n speed = 0\n qscale = 150 # was 50\n correcting_q = nan # - 1.0 * np.sign(q_error) * min(abs(q_error), 25*pi/180)\n speedinc = qscale * 15*pi/180 * -np.sign(q_error)\n elif abs(q_error)*180/pi > 5 and abs(d_error) < 100:\n # For moderate heading error where distance error isn't huge,\n # slow down and turn more slowly\n flag = \"* \"\n speed = 50 # was 20\n qscale = 150\n speedinc = qscale * correcting_q\n else:\n # We're doing pretty well; go fast and make minor corrections\n flag = \" \"\n speed = 100\n qscale = 150 # was 150\n speedinc = qscale * correcting_q\n lspeed = self.drive_direction * (speed - self.drive_direction*speedinc)\n rspeed = self.drive_direction * (speed + self.drive_direction*speedinc)\n\n if self.mode == 'x': display_target = target_y\n elif self.mode == 'y': display_target = target_x\n elif self.mode == 'q': display_target = self.target_q*180/pi\n elif self.mode == 'p': display_target = inf\n else: display_target = nan\n \"\"\"\n print('%x %s x: %5.1f y: %5.1f q:%6.1f tgt:%6.1f derr: %5.1f qerr:%6.1f corq: %5.1f inc: %5.1f dist: %5.1f speeds: %4.1f/%4.1f' %\n (self.__hash__() & 0xffffffffffffffff,\n self.mode+flag, x, y, q*180/pi, display_target, d_error, q_error*180/pi,\n correcting_q*180/pi, speedinc, dist, lspeed, rspeed))\n \"\"\"\n self.robot.drive_wheel_motors(lspeed, rspeed, 500, 500)\n\nclass LookAtObject(StateNode):\n \"Continuously adjust head angle to fixate object.\"\n def __init__(self):\n super().__init__()\n self.object = None\n self.handle = None\n\n def start(self,event=None):\n self.set_polling_interval(0.1)\n self.handle = None\n super().start()\n\n def stop(self):\n if self.handle:\n self.handle.cancel()\n super().stop()\n\n def poll(self):\n if not self.running: return\n if isinstance(self.object, FaceObj) or isinstance(self.object, CustomMarkerObj):\n image_box = self.object.sdk_obj.last_observed_image_box\n camera_center = self.robot.camera.config.center.y\n delta = image_box.top_left_y + image_box.height/2 - camera_center\n adjust_level = 0.1\n if self.robot.left_wheel_speed.speed_mmps != 0 and self.robot.right_wheel_speed.speed_mmps != 0:\n adjust_level = 0.2\n if delta > 15:\n angle = self.robot.head_angle.radians - adjust_level\n elif delta < -15:\n angle = self.robot.head_angle.radians + adjust_level\n else:\n angle = self.robot.head_angle.radians\n angle = cozmo.robot.MAX_HEAD_ANGLE.radians if angle > cozmo.robot.MAX_HEAD_ANGLE.radians else angle\n angle = cozmo.robot.MIN_HEAD_ANGLE.radians if angle < cozmo.robot.MIN_HEAD_ANGLE.radians else angle\n else:\n if isinstance(self.object, WorldObject):\n rpose = self.robot.world.particle_filter.pose\n dx = self.object.x - rpose[0]\n dy = self.object.y - rpose[1]\n else:\n opos = self.object.pose.position\n rpos = self.robot.pose.position\n dx = opos.x - rpos.x\n dy = opos.y - rpos.y\n dist = sqrt(dx**2 + dy**2)\n if dist < 60:\n angle = -0.4\n elif dist < 80:\n angle = -0.3\n elif dist < 100:\n angle = -0.2\n elif dist < 140:\n angle = -0.1\n elif dist < 180:\n angle = 0\n else:\n angle = 0.1\n if abs(self.robot.head_angle.radians - angle) > 0.03:\n self.handle = self.robot.loop.call_soon(self.move_head, angle)\n\n def move_head(self,angle):\n try:\n self.robot.set_head_angle(cozmo.util.radians(angle), in_parallel=True, num_retries=2)\n except cozmo.exceptions.RobotBusy:\n print(\"LookAtObject: robot busy; can't move head to\",angle)\n pass\n\n\nclass SetPose(StateNode):\n def __init__(self, pose=Pose(0,0,0,angle_z=degrees(0))):\n super().__init__()\n self.pose = pose\n\n def start(self, event=None):\n super().start(event)\n if isinstance(event, DataEvent) and isinstance(event.data, Pose):\n pose = event.data\n else:\n pose = self.pose\n self.robot.world.particle_filter.set_pose(self.pose.position.x,\n self.pose.position.y,\n self.pose.rotation.angle_z.radians)\n\n\nclass Print(StateNode):\n \"Argument can be a string, or a function to be evaluated at print time.\"\n def __init__(self,spec=None):\n super().__init__()\n self.spec = spec\n\n def start(self,event=None):\n super().start(event)\n if isinstance(self.spec, types.FunctionType):\n text = self.spec()\n else:\n text = self.spec\n if text is None and isinstance(event, DataEvent):\n text = repr(event.data)\n print(text)\n self.post_completion()\n\n\nclass AbortAllActions(StateNode):\n def start(self,event=None):\n super().start(event)\n self.robot.abort_all_actions()\n self.post_completion()\n\n\nclass AbortHeadAction(StateNode):\n def start(self,event=None):\n super().start(event)\n actionType = cozmo._clad._clad_to_engine_cozmo.RobotActionType.UNKNOWN\n msg = cozmo._clad._clad_to_engine_iface.CancelAction(actionType=actionType)\n self.robot.conn.send_msg(msg)\n self.post_completion()\n\n\nclass StopAllMotors(StateNode):\n def start(self,event=None):\n super().start(event)\n self.robot.stop_all_motors()\n self.post_completion()\n\n\n#________________ Color Images ________________\n\nclass ColorImageBase(StateNode):\n\n def is_color(self,image):\n raw = image.raw_image\n for i in range(0, raw.height, 15):\n pixel = raw.getpixel((i,i))\n if pixel[0] != pixel[1]:\n return True\n return False\n\n\nclass ColorImageEnabled(ColorImageBase):\n \"\"\"Turn color images on or off and post completion when setting has taken effect.\"\"\"\n def __init__(self,enabled=True):\n self.enabled = enabled\n super().__init__()\n\n def start(self,event=None):\n super().start(event)\n if self.robot.camera.color_image_enabled == self.enabled:\n self.post_completion()\n else:\n self.robot.camera.color_image_enabled = self.enabled\n self.robot.world.add_event_handler(cozmo.world.EvtNewCameraImage, self.new_image)\n\n def new_image(self,event,**kwargs):\n is_color = self.is_color(event.image)\n if is_color:\n self.robot.world.latest_color_image = event.image\n if is_color == self.enabled:\n self.robot.world.remove_event_handler(cozmo.world.EvtNewCameraImage, self.new_image)\n self.post_completion()\n\n\nclass GetColorImage(ColorImageBase):\n \"\"\"Post one color image as a data event; leave color mode unchanged.\"\"\"\n\n def start(self,event=None):\n super().start(event)\n self.save_enabled = self.robot.camera.color_image_enabled\n if not self.save_enabled:\n self.robot.camera.color_image_enabled = True\n self.robot.world.add_event_handler(cozmo.world.EvtNewCameraImage, self.new_image)\n\n def new_image(self,event,**kwargs):\n if self.is_color(event.image):\n self.robot.world.latest_color_image = event.image\n self.robot.camera.color_image_enabled = self.save_enabled\n try:\n self.robot.world.remove_event_handler(cozmo.world.EvtNewCameraImage, self.new_image)\n except: pass\n self.post_data(event.image)\n\nclass SaveImage(StateNode):\n \"Save an image to a file.\"\n\n def __init__(self, filename=\"image\", filetype=\"jpg\", counter=0, verbose=True):\n super().__init__()\n self.filename = filename\n self.filetype = filetype\n self.counter = counter\n self.verbose = verbose\n\n def start(self,event=None):\n super().start(event)\n fname = self.filename\n if isinstance(self.counter, int):\n fname = fname + str(self.counter)\n self.counter = self.counter + 1\n fname = fname + \".\" + self.filetype\n image = np.array(self.robot.world.latest_image.raw_image)\n cv2.imwrite(fname, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n if self.verbose:\n print('Wrote',fname)\n\n\n#________________ Coroutine Nodes ________________\n\nclass CoroutineNode(StateNode):\n def __init__(self):\n super().__init__()\n self.handle = None\n self.abort_launch = False\n\n def start(self, event=None):\n super().start(event)\n if self.abort_launch:\n self.handle = None\n return\n cor = self.coroutine_launcher()\n if inspect.iscoroutine(cor):\n self.handle = self.robot.loop.create_task(cor)\n elif cor is False:\n self.handle = None\n else:\n print('cor=',cor,'type=',type(cor))\n raise ValueError(\"Result of %s launch_couroutine() is %s, not a coroutine.\" %\n (self,cor))\n\n def coroutine_launcher(self):\n raise Exception('%s lacks a coroutine_launcher() method' % self)\n\n def post_when_complete(self):\n \"Call this from within start() if the coroutine will signal completion.\"\n self.robot.loop.create_task(self.wait_for_completion())\n\n async def wait_for_completion(self):\n await self.handle\n if TRACE.trace_level >= TRACE.await_satisfied:\n print('TRACE%d:' % TRACE.await_satisfied, self,\n 'await satisfied:', self.handle)\n self.post_completion()\n\n def stop(self):\n if not self.running: return\n if self.handle: self.handle.cancel()\n super().stop()\n\n\nclass DriveWheels(CoroutineNode):\n def __init__(self,l_wheel_speed,r_wheel_speed,**kwargs):\n super().__init__()\n self.l_wheel_speed = l_wheel_speed\n self.r_wheel_speed = r_wheel_speed\n self.kwargs = kwargs\n\n def start(self,event=None):\n if (isinstance(event,DataEvent) and isinstance(event.data,(list,tuple)) and\n len(event.data) == 2):\n (lspeed,rspeed) = event.data\n if isinstance(lspeed,(int,float)) and isinstance(rspeed,(int,float)):\n self.l_wheel_speed = lspeed\n self.r_wheel_speed = rspeed\n self.abort_launch = False\n if self.robot.really_picked_up():\n self.abort_launch = True\n super().start(event)\n self.post_failure()\n return\n super().start(event)\n\n def coroutine_launcher(self):\n return self.robot.drive_wheels(self.l_wheel_speed,self.r_wheel_speed,**self.kwargs)\n\n def stop_wheels(self):\n try:\n driver = self.robot.drive_wheels(0,0)\n # driver is either a co-routine or None\n if driver: driver.send(None) # will raise StopIteration\n except StopIteration: pass\n\n def stop(self):\n if not self.running: return\n self.stop_wheels()\n super().stop()\n\n\nclass DriveForward(DriveWheels):\n def __init__(self, distance=50, speed=50, **kwargs):\n if isinstance(distance, cozmo.util.Distance):\n distance = distance.distance_mm\n if isinstance(speed, cozmo.util.Speed):\n speed = speed.speed_mmps\n if distance < 0:\n distance = -distance\n speed = -speed\n self.distance = distance\n self.speed = speed\n self.kwargs = kwargs\n super().__init__(speed,speed,**self.kwargs)\n self.polling_interval = 0.1\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and isinstance(event.data, cozmo.util.Distance):\n self.distance = event.data.distance_mm\n self.start_position = self.robot.pose.position\n super().start(event)\n\n def poll(self):\n if not self.running: return\n \"\"\"See how far we've traveled\"\"\"\n p0 = self.start_position\n p1 = self.robot.pose.position\n diff = (p1.x - p0.x, p1.y - p0.y)\n dist = sqrt(diff[0]*diff[0] + diff[1]*diff[1])\n if dist >= self.distance:\n self.poll_handle.cancel()\n self.stop_wheels()\n self.post_completion()\n\nclass SmallTurn(CoroutineNode):\n \"\"\"Estimates how many polling cycles to run the wheels; doesn't use odometry.\"\"\"\n def __init__(self, angle=5):\n self.angle = angle\n self.counter = 0\n self.polling_interval = 0.025\n super().__init__()\n\n def start(self,event=None):\n # constants were determined empirically for speed 50\n self.counter = round((abs(self.angle) + 5) / 1.25) if self.angle else 0\n self.abort_launch = False\n if self.robot.really_picked_up():\n self.abort_launch = True\n super().start(event)\n self.post_failure()\n return\n super().start(event)\n\n def coroutine_launcher(self):\n if self.angle:\n speed = 50 if self.angle < 0 else -50\n return self.robot.drive_wheels(speed,-speed,500,500)\n else:\n self.robot.stop_all_motors()\n return False\n\n def poll(self):\n if not self.running: return\n self.counter -= 1\n if self.counter <= 0:\n self.poll_handle.cancel()\n self.robot.stop_all_motors()\n self.post_completion()\n\nclass DriveTurn(DriveWheels):\n def __init__(self, angle=90, speed=50, **kwargs):\n if isinstance(angle, cozmo.util.Angle):\n angle = angle.degrees\n if isinstance(speed, cozmo.util.Speed):\n speed = speed.speed_mmps\n if speed <= 0:\n raise ValueError('speed parameter must be positive')\n self.angle = angle\n self.speed = speed\n self.kwargs = kwargs\n self.polling_interval = 0.05\n super().__init__(0,0,**self.kwargs)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and isinstance(event.data, cozmo.util.Angle):\n self.angle = event.data.degrees\n if self.angle > 0:\n self.l_wheel_speed = -self.speed\n self.r_wheel_speed = self.speed\n else:\n self.l_wheel_speed = self.speed\n self.r_wheel_speed = -self.speed\n self.last_heading = self.robot.pose.rotation.angle_z.degrees\n self.traveled = 0\n super().start(event)\n\n def poll(self):\n if not self.running: return\n \"\"\"See how far we've traveled\"\"\"\n p0 = self.last_heading\n p1 = self.robot.pose.rotation.angle_z.degrees\n self.last_heading = p1\n # Assume we're polling quickly enough that diff will be small;\n # typically only about 1 degree. So diff will be large only\n # if the heading has passed through 360 degrees since the last\n # call to poll(). Use 90 degrees as an arbitrary large threshold.\n diff = p1 - p0\n if diff < -90.0:\n diff += 360.0\n elif diff > 90.0:\n diff -= 360.0\n self.traveled += diff\n if abs(self.traveled) > abs(self.angle):\n self.poll_handle.cancel()\n self.stop_wheels()\n self.post_completion()\n\n\nclass DriveArc(DriveWheels):\n \"\"\"Negative radius means right turn; negative angle means drive\n backwards. This node can be passed a DataEvent with a dict\n containing any of the arguments accepted by __init__: radius,\n angle, distance, speed, and angspeed. Values must already be in\n the appropriate units (degrees, mm, deg/sec, or mm/sec).\"\"\"\n def __init__(self, radius=0, angle=None, distance=None,\n speed=None, angspeed=None, **kwargs):\n if isinstance(radius, cozmo.util.Distance):\n radius = radius.distance_mm\n if isinstance(angle, cozmo.util.Angle):\n angle = angle.degrees\n if isinstance(speed, cozmo.util.Speed):\n speed = speed.speed_mmps\n if isinstance(angspeed, cozmo.util.Angle):\n angspeed = angspeed.degrees\n self.calculate_wheel_speeds(radius, angle, distance, speed, angspeed)\n super().__init__(self.l_wheel_speed, self.r_wheel_speed, **kwargs)\n # Call parent init before setting polling interval.\n self.polling_interval = 0.05\n\n def calculate_wheel_speeds(self, radius=0, angle=None, distance=None,\n speed=None, angspeed=None):\n if radius != 0:\n if angle is not None:\n pass\n elif distance is not None:\n angle = self.dist2ang(distance, radius)\n else:\n raise ValueError('DriveArc requires an angle or distance.')\n\n if speed is not None:\n pass\n elif angspeed is not None:\n speed = self.ang2dist(angspeed, radius)\n else:\n speed = 40 # degrees/second\n if angle < 0:\n speed = - speed\n\n self.angle = angle\n self.l_wheel_speed = speed * (1 - wheelbase / radius)\n self.r_wheel_speed = speed * (1 + wheelbase / radius)\n\n else: # radius is 0\n if angspeed is None:\n angspeed = 40 # degrees/second\n s = angspeed\n if angle < 0:\n s = -s\n self.angle = angle\n self.l_wheel_speed = -s\n self.r_wheel_speed = s\n\n def ang2dist(self, angle, radius):\n return (angle / 360) * 2 * pi * abs(radius)\n\n def dist2ang(self, distance, radius):\n return (distance / abs(2 * pi * radius)) * 360\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event,DataEvent) and isinstance(event.data,dict):\n self.calculate_wheel_speeds(**event.data)\n self.last_heading = self.robot.pose.rotation.angle_z.degrees\n self.traveled = 0\n super().start(event)\n\n def poll(self):\n if not self.running: return\n \"\"\"See how far we've traveled\"\"\"\n p0 = self.last_heading\n p1 = self.robot.pose.rotation.angle_z.degrees\n self.last_heading = p1\n # Assume we're polling quickly enough that diff will be small;\n # typically only about 1 degree. So diff will be large only\n # if the heading has passed through 360 degrees since the last\n # call to poll(). Use 90 degrees as an arbitrary large threshold.\n diff = p1 - p0\n if diff < -90.0:\n diff += 360.0\n elif diff > 90.0:\n diff -= 360.0\n self.traveled += diff\n\n if abs(self.traveled) > abs(self.angle):\n self.poll_handle.cancel()\n self.stop_wheels()\n self.post_completion()\n\n\n#________________ Cube Disconnect/Reconnect ________________\n\nclass DisconnectFromCubes(StateNode):\n def start(self, event=None):\n super().start(event)\n self.robot.world.disconnect_from_cubes()\n\n\nclass ConnectToCubes(CoroutineNode):\n def start(self, event=None):\n super().start(event)\n self.post_when_complete()\n\n def coroutine_launcher(self):\n return self.robot.world.connect_to_cubes()\n\n\n#________________ Action Nodes ________________\n\nclass ActionNode(StateNode):\n relaunch_delay = 0.050 # 50 milliseconds\n\n def __init__(self, abort_on_stop=True):\n \"\"\"Call this method only after the subclass __init__ has set\n up self.action_kwargs\"\"\"\n self.abort_on_stop = abort_on_stop\n super().__init__()\n if 'in_parallel' not in self.action_kwargs:\n self.action_kwargs['in_parallel'] = True\n if 'num_retries' not in self.action_kwargs:\n self.action_kwargs['num_retries'] = 2\n self.cozmo_action_handle = None\n self.abort_launch = False\n\n def start(self,event=None):\n super().start(event)\n self.retry_count = 0\n if not self.abort_launch:\n self.launch_or_retry()\n\n def launch_or_retry(self):\n try:\n result = self.action_launcher()\n except cozmo.exceptions.RobotBusy:\n if TRACE.trace_level >= TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop, self, 'launch_action raised RobotBusy')\n self.handle = self.robot.loop.call_later(self.relaunch_delay, self.launch_or_retry)\n return\n if isinstance(result, cozmo.action.Action):\n self.cozmo_action_handle = result\n elif result is None: # Aborted\n return\n else:\n raise ValueError(\"Result of %s launch_action() is %s, not a cozmo.action.Action.\" %\n (self,result))\n self.post_when_complete()\n\n def action_launcher(self):\n raise Exception('%s lacks an action_launcher() method' % self)\n\n def post_when_complete(self):\n self.robot.loop.create_task(self.wait_for_completion())\n\n async def wait_for_completion(self):\n async_task = self.cozmo_action_handle.wait_for_completed()\n await async_task\n if TRACE.trace_level >= TRACE.await_satisfied:\n print('TRACE%d:' % TRACE.await_satisfied, self,\n 'await satisfied:', self.cozmo_action_handle)\n # check status for 'completed'; if not, schedule relaunch or post failure\n if self.running:\n if self.cozmo_action_handle.state == 'action_succeeded':\n self.post_completion()\n elif self.cozmo_action_handle.failure_reason[0] == 'cancelled':\n print('CANCELLED: ***>',self,self.cozmo_action_handle)\n self.post_completion()\n elif self.cozmo_action_handle.failure_reason[0] == 'retry':\n if self.retry_count < self.action_kwargs['num_retries']:\n print(\"*** ACTION %s of %s FAILED WITH CODE 'retry': TRYING AGAIN\" %\n (self.cozmo_action_handle, self.name))\n self.retry_count += 1\n self.launch_or_retry()\n else:\n print(\"*** %s ACTION RETRY COUNT EXCEEDED: FAILING\" % self.name)\n self.post_failure(self.cozmo_action_handle)\n else:\n print(\"*** ACTION NODE %s %s FAILED DUE TO %s AND CAN'T BE RETRIED.\" %\n (self.name,\n self.cozmo_action_handle,\n self.cozmo_action_handle.failure_reason[0] or 'unknown reason'))\n self.post_failure(self.cozmo_action_handle)\n\n def stop(self):\n if not self.running: return\n if self.cozmo_action_handle and self.abort_on_stop and \\\n self.cozmo_action_handle.is_running:\n self.cozmo_action_handle.abort()\n super().stop()\n\n\nclass Say(ActionNode):\n \"\"\"Speaks some text, then posts a completion event.\"\"\"\n\n class SayDataEvent(Event):\n def __init__(self,text=None):\n self.text = text\n\n def __init__(self, text=\"I'm speechless\",\n abort_on_stop=False, **action_kwargs):\n self.text = text\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, self.SayDataEvent):\n utterance = event.text\n else:\n utterance = self.text\n if isinstance(utterance, (list,tuple)):\n utterance = random.choice(utterance)\n if not isinstance(utterance, str):\n utterance = repr(utterance)\n self.utterance = utterance\n print(\"Speaking: '\",utterance,\"'\",sep='')\n super().start(event)\n\n def action_launcher(self):\n if self.utterance.rstrip() == '':\n # robot.say_text() action would fail on empty string\n self.post_completion()\n return None\n else:\n return self.robot.say_text(self.utterance, **self.action_kwargs)\n\n\nclass Forward(ActionNode):\n \"\"\" Moves forward a specified distance. Can accept a Distance as a Dataevent.\"\"\"\n def __init__(self, distance=distance_mm(50),\n speed=speed_mmps(50), abort_on_stop=True, **action_kwargs):\n if isinstance(distance, (int,float)):\n distance = distance_mm(distance)\n elif not isinstance(distance, cozmo.util.Distance):\n raise ValueError('%s distance must be a number or a cozmo.util.Distance' % self)\n if isinstance(speed, (int,float)):\n speed = speed_mmps(speed)\n elif not isinstance(speed, cozmo.util.Speed):\n raise ValueError('%s speed must be a number or a cozmo.util.Speed' % self)\n self.distance = distance\n self.speed = speed\n if 'should_play_anim' not in action_kwargs:\n action_kwargs['should_play_anim'] = False\n self.action_kwargs = action_kwargs\n # super's init must come last because it checks self.action_kwargs\n super().__init__(abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and isinstance(event.data, cozmo.util.Distance):\n self.distance = event.data\n self.abort_launch = False\n if self.robot.really_picked_up():\n self.abort_launch = True\n super().start(event)\n self.post_failure()\n return\n super().start(event)\n\n def action_launcher(self):\n return self.robot.drive_straight(self.distance, self.speed,\n **self.action_kwargs)\n\n\nclass Turn(ActionNode):\n \"\"\"Turns by a specified angle. Can accept an Angle as a DataEvent.\"\"\"\n def __init__(self, angle=degrees(90), abort_on_stop=True, **action_kwargs):\n if isinstance(angle, (int,float)):\n angle = degrees(angle)\n elif angle is None:\n pass\n elif not isinstance(angle, cozmo.util.Angle):\n raise ValueError('%s angle must be a number or a cozmo.util.Angle' % self)\n self.angle = angle\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and isinstance(event.data, cozmo.util.Angle):\n self.angle = event.data\n self.abort_launch = False\n if self.robot.really_picked_up():\n self.abort_launch = True\n super().start(event)\n self.post_failure()\n return\n super().start(event)\n\n def action_launcher(self):\n if self.angle is None:\n return None\n elif not isinstance(self.angle, cozmo.util.Angle):\n print(\"*** WARNING: node\", self.name, \"self.angle =\", self.angle, \"is not an instance of cozmo.util.Angle\")\n self.angle = degrees(self.angle) if isinstance(self.angle, (int,float)) else degrees(0)\n return self.robot.turn_in_place(self.angle, **self.action_kwargs)\n\nclass GoToPose(ActionNode):\n \"Uses SDK's go_to_pose method.\"\n def __init__(self, pose, abort_on_stop=True, **action_kwargs):\n self.pose = pose\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop)\n\n def action_launcher(self):\n return self.robot.go_to_pose(self.pose, **self.action_kwargs)\n\nclass SetHeadAngle(ActionNode):\n def __init__(self, angle=degrees(0), abort_on_stop=True, **action_kwargs):\n if isinstance(angle, (int,float)):\n angle = degrees(angle)\n elif not isinstance(angle, cozmo.util.Angle):\n raise ValueError('%s angle must be a number or a cozmo.util.Angle' % self)\n self.angle = angle\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and isinstance(event.data, cozmo.util.Angle):\n self.angle = event.data\n super().start(event)\n\n def action_launcher(self):\n return self.robot.set_head_angle(self.angle, **self.action_kwargs)\n\nclass SetLiftHeight(ActionNode):\n def __init__(self, height=0, abort_on_stop=True, **action_kwargs):\n \"\"\"height is a percentage from 0 to 1\"\"\"\n self.height = height\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop)\n\n def action_launcher(self):\n # Temporary hack supplied by Mark Wesley at Anki\n msg = cozmo._clad._clad_to_engine_iface.EnableLiftPower(True)\n self.robot.conn.send_msg(msg)\n return self.robot.set_lift_height(self.height, **self.action_kwargs)\n\nclass SetLiftAngle(SetLiftHeight):\n def __init__(self, angle, abort_on_stop=True, **action_kwargs):\n\n #def get_theta(height):\n # return math.asin((height-45)/66)\n\n if isinstance(angle, cozmo.util.Angle):\n angle = angle.degrees\n self.angle = angle\n super().__init__(0, abort_on_stop=abort_on_stop, **action_kwargs)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and isinstance(event.data, cozmo.util.Angle):\n self.angle = event.data.degrees\n min_theta = cozmo.robot.MIN_LIFT_ANGLE.degrees\n max_theta = cozmo.robot.MAX_LIFT_ANGLE.degrees\n angle_range = max_theta - min_theta\n self.height = (self.angle - min_theta) / angle_range\n super().start(event)\n\n\nclass SdkDockWithCube(ActionNode):\n \"Uses SDK's dock_with_cube method.\"\n def __init__(self, object=None, abort_on_stop=False, **action_kwargs):\n self.object = object\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop=abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and \\\n isinstance(event.data,cozmo.objects.LightCube):\n self.object = event.data\n super().start(event)\n\n def action_launcher(self):\n if self.object is None:\n raise ValueError('No cube to dock with')\n return self.robot.dock_with_cube(self.object, **self.action_kwargs)\n\n\nclass SdkPickUpObject(ActionNode):\n \"Uses SDK's pick_up_object method.\"\n def __init__(self, object=None, abort_on_stop=False, **action_kwargs):\n self.object = object\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop=abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and \\\n isinstance(event.data,cozmo.objects.LightCube):\n self.object = event.data\n super().start(event)\n\n def action_launcher(self):\n if self.object is None:\n raise ValueError('No object to pick up')\n return self.robot.pickup_object(self.object, **self.action_kwargs)\n\n\nclass SdkPlaceObjectOnGroundHere(ActionNode):\n \"Uses SDK's place_object_on_ground_here method.\"\n def __init__(self, object=None, abort_on_stop=False, **action_kwargs):\n self.object = object\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop=abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and \\\n isinstance(event.data,cozmo.objects.LightCube):\n self.object = event.data\n super().start(event)\n\n def action_launcher(self):\n if self.object is None:\n raise ValueError('No object to place')\n return self.robot.place_object_on_ground_here(self.object, **self.action_kwargs)\n\nclass SdkPlaceOnObject(ActionNode):\n \"Uses SDK's place_on_object method.\"\n def __init__(self, object=None, abort_on_stop=False, **action_kwargs):\n self.object = object\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop=abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and \\\n isinstance(event.data,cozmo.objects.LightCube):\n self.object = event.data\n super().start(event)\n\n def action_launcher(self):\n if self.object is None:\n raise ValueError('No object to place')\n return self.robot.place_on_object(self.object, **self.action_kwargs)\n\n\nclass SdkRollCube(ActionNode):\n \"Uses SDK's roll_cube method.\"\n def __init__(self, object=None, abort_on_stop=True, **action_kwargs):\n self.object = object\n self.action_kwargs = action_kwargs\n super().__init__(abort_on_stop=abort_on_stop)\n\n def start(self,event=None):\n if self.running: return\n if isinstance(event, DataEvent) and \\\n isinstance(event.data,cozmo.objects.LightCube):\n self.object = event.data\n super().start(event)\n\n def action_launcher(self):\n if self.object is None:\n raise ValueError('No object to roll')\n return self.robot.roll_cube(self.object, **self.action_kwargs)\n\n# Note: additional nodes for object manipulation are in pickup.fsm.\n\n#________________ Animations ________________\n\nclass AnimationNode(ActionNode):\n def __init__(self, anim_name='anim_bored_01', **kwargs):\n self.anim_name = anim_name\n self.action_kwargs = kwargs\n super().__init__()\n\n def action_launcher(self):\n return self.robot.play_anim(self.anim_name, **self.action_kwargs)\n\nclass AnimationTriggerNode(ActionNode):\n def __init__(self, trigger=cozmo.anim.Triggers.CubePouncePounceNormal, **kwargs):\n if not isinstance(trigger, cozmo.anim._AnimTrigger):\n raise TypeError('%s is not an instance of cozmo.anim._AnimTrigger' %\n repr(trigger))\n self.trigger = trigger\n self.action_kwargs = kwargs\n super().__init__()\n\n def action_launcher(self):\n return self.robot.play_anim_trigger(self.trigger, **self.action_kwargs)\n\n#________________ Behaviors ________________\n\nclass StartBehavior(StateNode):\n def __init__(self, behavior=None, stop_on_exit=True):\n if not isinstance(behavior, cozmo.behavior._BehaviorType):\n raise ValueError(\"'%s' isn't an instance of cozmo.behavior._BehaviorType\" %\n repr(behavior))\n self.behavior = behavior\n self.behavior_handle = None\n self.stop_on_exit = stop_on_exit\n super().__init__()\n\n def __repr__(self):\n if self.behavior_handle:\n return '<%s %s active=%s>' % \\\n (self.__class__.__name__, self.name, self.behavior_handle.is_active)\n else:\n return super().__repr__()\n\n def start(self,event=None):\n if self.running: return\n super().start(event)\n try:\n if self.robot.behavior_handle:\n self.robot.behavior_handle.stop()\n except: pass\n finally:\n self.robot.behavior_handle = None\n self.behavior_handle = self.robot.start_behavior(self.behavior)\n self.robot.behavior_handle = self.behavior_handle\n self.post_completion()\n\n def stop(self):\n if not self.running: return\n if self.stop_on_exit and self.behavior_handle is self.robot.behavior_handle:\n self.robot.behavior_handle.stop()\n self.robot.behavior_handle = None\n super().stop()\n\nclass StopBehavior(StateNode):\n def start(self,event=None):\n if self.running: return\n super().start(event)\n try:\n if self.robot.behavior_handle:\n self.robot.behavior_handle.stop()\n except: pass\n self.robot.behavior_handle = None\n self.post_completion()\n\nclass FindFaces(StartBehavior):\n def __init__(self,stop_on_exit=True):\n super().__init__(cozmo.robot.behavior.BehaviorTypes.FindFaces,stop_on_exit)\n\nclass KnockOverCubes(StartBehavior):\n def __init__(self,stop_on_exit=True):\n super().__init__(cozmo.robot.behavior.BehaviorTypes.KnockOverCubes,stop_on_exit)\n\nclass LookAroundInPlace(StartBehavior):\n def __init__(self,stop_on_exit=True):\n super().__init__(cozmo.robot.behavior.BehaviorTypes.LookAroundInPlace,stop_on_exit)\n\nclass PounceOnMotion(StartBehavior):\n def __init__(self,stop_on_exit=True):\n super().__init__(cozmo.robot.behavior.BehaviorTypes.PounceOnMotion,stop_on_exit)\n\nclass RollBlock(StartBehavior):\n def __init__(self,stop_on_exit=True):\n super().__init__(cozmo.robot.behavior.BehaviorTypes.RollBlock,stop_on_exit)\n\nclass StackBlocks(StartBehavior):\n def __init__(self,stop_on_exit=True):\n super().__init__(cozmo.robot.behavior.BehaviorTypes.StackBlocks,stop_on_exit)\n\n#________________ Multiprocessing ________________\n\nclass LaunchProcess(StateNode):\n\n def __init__(self):\n super().__init__()\n self.process = None\n\n @staticmethod\n def process_workhorse(reply_token):\n \"\"\"\n Override this static method with the code to do your computation.\n The method must be static because we can't pickle methods of StateNode\n instances.\n \"\"\"\n print('*** Failed to override process_workhorse for LaunchProcess node ***')\n print('Sleeping for 2 seconds...')\n time.sleep(2)\n # A process returns its result to the caller as an event.\n result = 42\n\n LaunchProcess.post_event(reply_token,DataEvent(result)) # source must be None for pickling\n LaunchProcess.post_event(reply_token,CompletionEvent()) # we can post more than one event\n\n @staticmethod\n def post_event(reply_token,event):\n id,queue = reply_token\n event_pair = (id, event)\n queue.put(event_pair)\n\n def create_process(self, reply_token):\n p = Process(target=self.__class__.process_workhorse,\n args=[reply_token])\n return p\n\n def start(self, event=None):\n super().start(event)\n reply_token = (id(self), self.robot.erouter.interprocess_queue)\n self.process = self.create_process(reply_token)\n self.robot.erouter.add_process_node(self)\n self.process.start()\n print('Launched', self.process)\n\n def stop(self):\n if self.process:\n print('Exiting',self.process,self.process.is_alive())\n self.process = None\n super().stop()\n self.robot.erouter.delete_process_node(self)\n\n"
},
{
"alpha_fraction": 0.6501156687736511,
"alphanum_fraction": 0.6508275270462036,
"avg_line_length": 33.048484802246094,
"blob_id": "026137d2dce64638cddfdc9e33a7831c655c6ba3",
"content_id": "2d01d0d865c7a84c0b4fad7d21f070c946b23feb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5619,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 165,
"path": "/event_monitor.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nEvent Monitor Tool for Cozmo\n============================\n\nUsage:\n monitor(robot) to monitor all event types in the dispatch table\n monitor(robot, Event) to monitor a specific type of event\n\n unmonitor(robot[, Event]) to turn off monitoring\n\nAuthor: David S. Touretzky, Carnegie Mellon University\n=====\n\nChangeLog\n=========\n\n* Add event handlers to world instead of to robot.\n Dave Touretzky\n - Many events (e.g. face stuff) aren't reliably sent to robot.\n\n* Renaming and more face support\n Dave Touretzky\n - Renamed module to event_monitor\n - Renamed monitor_on/off to monitor/unmonitor\n - Added monitor_face to handle face events\n\n* Created\n Dave Touretzky\n\n\"\"\"\n\nimport re\n\nimport cozmo\n\n\ndef print_prefix(evt):\n robot.world.last_event = evt\n print('-> ', evt.event_name, ' ', sep='', end='')\n\n\ndef print_object(obj):\n if isinstance(obj,cozmo.objects.LightCube):\n cube_id = next(k for k,v in robot.world.light_cubes.items() if v==obj)\n print('LightCube-',cube_id,sep='',end='')\n else:\n r = re.search('<(\\w*)', obj.__repr__())\n print(r.group(1), end='')\n\n\ndef monitor_generic(evt, **kwargs):\n print_prefix(evt)\n if 'behavior_type_name' in kwargs:\n print(kwargs['behavior_type_name'], '', end='')\n print(' ', end='')\n if 'obj' in kwargs:\n print_object(kwargs['obj'])\n print(' ', end='')\n if 'action' in kwargs:\n action = kwargs['action']\n if isinstance(action, cozmo.anim.Animation):\n print(action.anim_name, '', end='')\n elif isinstance(action, cozmo.anim.AnimationTrigger):\n print(action.trigger.name, '', end='')\n print(set(kwargs.keys()))\n\n\ndef monitor_EvtActionCompleted(evt, action, state, failure_code, failure_reason, **kwargs):\n print_prefix(evt)\n print_object(action)\n if isinstance(action, cozmo.anim.Animation):\n print('', action.anim_name, end='')\n elif isinstance(action, cozmo.anim.AnimationTrigger):\n print('', action.trigger.name, end='')\n print('',state,end='')\n if failure_code is not None:\n print('',failure_code,failure_reason,end='')\n print()\n\n\ndef monitor_EvtObjectTapped(evt, *, obj, tap_count, tap_duration, tap_intensity, **kwargs):\n print_prefix(evt)\n print_object(obj)\n print(' count=', tap_count,\n ' duration=', tap_duration, ' intensity=', tap_intensity, sep='')\n\n\ndef monitor_EvtObjectMovingStarted(evt, *, obj, acceleration, **kwargs):\n print_prefix(evt)\n print_object(obj)\n print(' accleration=', acceleration, sep='')\n\n\ndef monitor_EvtObjectMovingStopped(evt, *, obj, move_duration, **kwargs):\n print_prefix(evt)\n print_object(obj)\n print(' move_duration=%3.1f secs' %move_duration)\n\n\ndef monitor_face(evt, face, **kwargs):\n print_prefix(evt)\n name = face.name if face.name != '' else '[unknown face]'\n expr = face.expression if face.expression is not None else 'expressionless'\n kw = set(kwargs.keys()) if len(kwargs) > 0 else '{}'\n print(name, ' (%s) ' % expr, ' face_id=', face.face_id, ' ', kw, sep='')\n\ndispatch_table = {\n cozmo.action.EvtActionStarted : monitor_generic,\n cozmo.action.EvtActionCompleted : monitor_EvtActionCompleted,\n cozmo.behavior.EvtBehaviorStarted : monitor_generic,\n cozmo.behavior.EvtBehaviorStopped : monitor_generic,\n cozmo.anim.EvtAnimationsLoaded : monitor_generic,\n cozmo.anim.EvtAnimationCompleted : monitor_EvtActionCompleted,\n cozmo.objects.EvtObjectAppeared : monitor_generic,\n cozmo.objects.EvtObjectDisappeared : monitor_generic,\n cozmo.objects.EvtObjectMovingStarted : monitor_EvtObjectMovingStarted,\n cozmo.objects.EvtObjectMovingStopped : monitor_EvtObjectMovingStopped,\n cozmo.objects.EvtObjectObserved : monitor_generic,\n cozmo.objects.EvtObjectTapped : monitor_EvtObjectTapped,\n cozmo.faces.EvtFaceAppeared : monitor_face,\n cozmo.faces.EvtFaceObserved : monitor_face,\n cozmo.faces.EvtFaceDisappeared : monitor_face,\n}\n\nexcluded_events = { # Occur too frequently to monitor by default\n cozmo.objects.EvtObjectObserved,\n cozmo.faces.EvtFaceObserved,\n}\n\n\ndef monitor(_robot, evt_class=None):\n if not isinstance(_robot, cozmo.robot.Robot):\n raise TypeError('First argument must be a Robot instance')\n if evt_class is not None and not issubclass(evt_class, cozmo.event.Event):\n raise TypeError('Second argument must be an Event subclass')\n global robot\n robot = _robot\n if evt_class in dispatch_table:\n robot.world.add_event_handler(evt_class,dispatch_table[evt_class])\n elif evt_class is not None:\n robot.world.add_event_handler(evt_class,monitor_generic)\n else:\n for k,v in dispatch_table.items():\n if k not in excluded_events:\n robot.world.add_event_handler(k,v)\n\n\ndef unmonitor(_robot, evt_class=None):\n if not isinstance(_robot, cozmo.robot.Robot):\n raise TypeError('First argument must be a Robot instance')\n if evt_class is not None and not issubclass(evt_class, cozmo.event.Event):\n raise TypeError('Second argument must be an Event subclass')\n global robot\n robot = _robot\n try:\n if evt_class in dispatch_table:\n robot.world.remove_event_handler(evt_class,dispatch_table[evt_class])\n elif evt_class is not None:\n robot.world.remove_event_handler(evt_class,monitor_generic)\n else:\n for k,v in dispatch_table.items():\n robot.world.remove_event_handler(k,v)\n except Exception:\n pass\n\n"
},
{
"alpha_fraction": 0.5453275442123413,
"alphanum_fraction": 0.5647398829460144,
"avg_line_length": 42.43096160888672,
"blob_id": "e88e7d343f227bd51367ec60258c9516b10a4ced",
"content_id": "49e1024fe39cce17c48c6ec99cf9f858f589442f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20760,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 478,
"path": "/cozmo_fsm/obstavoidance.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from cozmo.util import Pose\nfrom numpy import matrix, tan, arctan2\n\ntry: from cv2 import Rodrigues\nexcept: pass\n\nfrom .nodes import *\nfrom .transitions import *\nfrom .geometry import wrap_angle\nfrom .pilot import PilotToPose, PilotCheckStart\nfrom .worldmap import WallObj\nfrom time import sleep\n\n\nclass GoToWall(StateNode):\n def __init__(self, wall=None, door=-1):\n super().__init__()\n self.object = wall\n if isinstance(wall, int):\n self.wall_name = 'Wall-'+str(wall)\n elif isinstance(wall, str):\n self.wall_name = wall\n elif wall is None:\n self.wall_name = None\n else:\n raise ValueError('wall should be an integer, string, or None, not %r' % wall)\n self.door_id = door\n\n def start(self,event=None):\n if self.wall_name is None:\n self.wall_name = self.find_closest_wall()\n\n if self.wall_name in self.robot.world.world_map.objects:\n self.wobj = self.robot.world.world_map.objects[self.wall_name]\n else:\n print(\"GoToWall: %s is not in the map.\" % self.wall_name)\n super().start(event)\n self.post_failure()\n return\n\n if self.wobj != -1:\n if self.door_id == -1:\n self.door_coordinates = -1\n print(\"Going to closest door\")\n super().start(event)\n elif self.door_id in self.wobj.door_ids:\n self.door_coordinates = self.wobj.markers[self.door_id][1]\n print(self.door_coordinates)\n super().start(event)\n else:\n print(self.door_id,\"is not a door\")\n super().start(event)\n self.post_failure()\n\n def find_closest_wall(self):\n (x,y,theta) = self.robot.world.particle_filter.pose\n walls = []\n for obj in self.robot.world.world_map.objects.values():\n if isinstance(obj,WallObj):\n distsq = (x-obj.x)**2 + (y-obj.y)**2\n walls.append((distsq,obj))\n walls = sorted(walls, key=lambda x: x[0])\n return 'Wall-%d' % walls[0][1].id if walls else None\n\n def pick_side(self, dist):\n wall = self.object\n door_coordinates = self.door_coordinates\n x = self.wobj.x \n y = self.wobj.y\n ang = self.wobj.theta\n rx = self.robot.world.particle_filter.pose[0]\n ry = self.robot.world.particle_filter.pose[1]\n l = self.wobj.length/2\n if door_coordinates == -1:\n door_ids = self.wobj.door_ids\n sides = []\n for id in door_ids:\n door_coordinates = self.wobj.markers[id][1]\n s = self.wobj.markers[id][0]\n sides.append((x - s*cos(ang)*dist - sin(ang)*(l - door_coordinates[0]),\n y - s*sin(ang)*dist + cos(ang)*( l - door_coordinates[0]),\n wrap_angle(ang+(1-s)*pi/2), id))\n\n sorted_sides = sorted(sides, key=lambda pt: (pt[0]-rx)**2 + (pt[1]-ry)**2)\n self.door_id = sorted_sides[0][3]\n self.door_coordinates = self.wobj.markers[self.door_id][1]\n print(\"Going to door\", self.door_id )\n shortest = sorted_sides[0][0:3]\n else:\n side1 = (x + cos(ang)*dist - sin(ang)*(self.wobj.length/2 - door_coordinates[0]),\n y + sin(ang)*dist + cos(ang)*( self.wobj.length/2 - door_coordinates[0]),\n wrap_angle(ang+pi))\n side2 = (x - cos(ang)*dist - sin(ang)*(self.wobj.length/2 - door_coordinates[0]),\n y - sin(ang)*dist + cos(ang)*( self.wobj.length/2 - door_coordinates[0]),\n wrap_angle(ang))\n sides = [side1, side2]\n sorted_sides = sorted(sides, key=lambda pt: (pt[0]-rx)**2 + (pt[1]-ry)**2)\n shortest = sorted_sides[0]\n return shortest\n\n class TurnToSide(Turn):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n wall = self.parent.object\n wobj = self.parent.wobj\n (x, y, ang) = self.parent.pick_side(150)\n dtheta = wrap_angle(ang - self.robot.world.particle_filter.pose_estimate()[2])\n if abs(dtheta) > 0.1:\n self.angle = Angle(dtheta)\n super().start(event)\n else:\n self.angle = Angle(0)\n super().start(event)\n self.post_success()\n\n\n class GoToSide(PilotToPose):\n def __init__(self):\n super().__init__(None)\n\n def start(self, event=None):\n wall = self.parent.object\n print('Selected wall',self.parent.wobj)\n (x, y, theta) = self.parent.pick_side(150)\n\n self.target_pose = Pose(x, y, self.robot.pose.position.z,\n angle_z=Angle(radians = wrap_angle(theta)))\n print('Traveling to',self.target_pose)\n super().start(event)\n\n\n class ReportPosition(StateNode):\n def start(self,event=None):\n super().start(event)\n wall = self.parent.object\n wobj = self.parent.wobj\n cx = wobj.x\n cy = wobj.y\n rx = self.robot.pose.position.x\n ry = self.robot.pose.position.y\n dx = cx - rx\n dy = cy - ry\n dist = math.sqrt(dx*dx + dy*dy)\n bearing = wrap_angle(atan2(dy,dx) - self.robot.pose.rotation.angle_z.radians) * 180/pi\n print('wall at (%5.1f,%5.1f) robot at (%5.1f,%5.1f) dist=%5.1f brg=%5.1f' %\n (cx, cy, rx, ry, dist, bearing))\n\n\n class TurnToWall(Turn):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n if self.running: return\n cube = self.parent.object\n door_id = self.parent.door_id\n\n for i in range(4):\n if door_id not in self.robot.world.aruco.seen_marker_ids:\n #Check three times that the marker is not visible\n if i > 2:\n self.angle = Angle(degrees=0)\n super().start(event)\n self.post_failure()\n break\n else:\n sleep(0.1)\n continue\n else:\n while True:\n rx = self.robot.pose.position.x\n ry = self.robot.pose.position.y\n rt = self.robot.pose.rotation.angle_z.radians\n\n marker = self.robot.world.aruco.seen_marker_objects.get(door_id,0)\n if marker!=0:\n break\n\n sensor_dist = marker.camera_distance\n sensor_bearing = atan2(marker.camera_coords[0],\n marker.camera_coords[2])\n sensor_orient = - marker.opencv_rotation[1] * (pi/180)\n\n direction = rt + sensor_bearing\n dx = sensor_dist * cos(direction)\n dy = sensor_dist * sin(direction)\n cx = rx + dx\n cy = ry + dy\n dist = math.sqrt(dx*dx + dy*dy)\n self.angle = wrap_angle(atan2(dy,dx) - self.robot.pose.rotation.angle_z.radians) \\\n * 180/pi\n if abs(self.angle) < 2:\n self.angle = 0\n self.angle = Angle(degrees=self.angle)\n #print(\"TurnToWall\", self.angle)\n super().start(event)\n break\n\n\n class ForwardToWall(Forward):\n def __init__(self, offset):\n self.offset = offset\n super().__init__()\n\n def start(self, event=None):\n if self.running: return\n door_id = self.parent.door_id\n rx = self.robot.pose.position.x\n ry = self.robot.pose.position.y\n rt = self.robot.pose.rotation.angle_z.radians\n if door_id in self.robot.world.aruco.seen_marker_objects:\n marker = self.robot.world.aruco.seen_marker_objects[door_id]\n sensor_dist = marker.camera_distance\n sensor_bearing = atan2(marker.camera_coords[0],\n marker.camera_coords[2])\n sensor_orient = - marker.opencv_rotation[1] * (pi/180)\n\n direction = rt + sensor_bearing\n dx = sensor_dist * cos(direction)\n dy = sensor_dist * sin(direction)\n cx = rx + dx\n cy = ry + dy\n dist = math.sqrt(dx*dx + dy*dy)\n self.distance = Distance(sqrt(dx*dx + dy*dy) - self.offset)\n super().start(event)\n else:\n self.distance = Distance(0)\n super().start(event)\n self.post_failure()\n\n\n class FindWall(SetHeadAngle):\n def __init__(self):\n super().__init__()\n\n def start(self, event=None):\n if self.running: return\n door_id = self.parent.door_id\n if door_id not in self.robot.world.aruco.seen_marker_ids:\n #print('Looking higher for wall')\n if self.robot.head_angle.degrees < 40:\n self.angle = Angle(self.robot.head_angle.radians + 0.15)\n super().start(event)\n else:\n self.angle = self.robot.head_angle\n super().start(event)\n else:\n self.angle = self.robot.head_angle\n super().start(event)\n\n # GoToWall state machine\n def setup(self):\n \"\"\"\n droplift: SetLiftHeight(0) =T(0.5)=> check_start # time for vision to set up world map\n \n check_start: PilotCheckStart()\n check_start =S=> SetHeadAngle(0) =C=> turn_to_side\n check_start =F=> Forward(-80) =C=> check_start\n \n turn_to_side: self.TurnToSide()\n turn_to_side =C=> turn_to_side\n turn_to_side =S=> self.ReportPosition() =N=> go_side\n \n go_side: self.GoToSide() =C=> self.TurnToSide() =C=> lookup\n \n lookup: SetHeadAngle(35) =C=> find\n \n find: self.TurnToWall() =C=>approach\n find =F=> Forward(-80) =C=> StateNode() =T(1)=> find2\n \n find2: self.TurnToWall() =C=>approach\n find2 =F=> Forward(-80) =C=> Say(\"No Door trying again\") =C=> turn_to_side\n \n approach: self.ForwardToWall(100) =C=> self.FindWall() =C=>\n self.TurnToWall() =C=> self.FindWall() =C=>\n self.ForwardToWall(70) =C=> self.FindWall() =C=>\n self.TurnToWall()=C=> end\n approach =F=> end\n \n end: SetHeadAngle(0) =C=> Forward(150) =C=> ParentCompletes()\n \"\"\"\n \n # Code generated by genfsm on Fri Apr 6 04:49:50 2018:\n \n droplift = SetLiftHeight(0) .set_name(\"droplift\") .set_parent(self)\n check_start = PilotCheckStart() .set_name(\"check_start\") .set_parent(self)\n setheadangle1 = SetHeadAngle(0) .set_name(\"setheadangle1\") .set_parent(self)\n forward1 = Forward(-80) .set_name(\"forward1\") .set_parent(self)\n turn_to_side = self.TurnToSide() .set_name(\"turn_to_side\") .set_parent(self)\n reportposition1 = self.ReportPosition() .set_name(\"reportposition1\") .set_parent(self)\n go_side = self.GoToSide() .set_name(\"go_side\") .set_parent(self)\n turntoside1 = self.TurnToSide() .set_name(\"turntoside1\") .set_parent(self)\n lookup = SetHeadAngle(35) .set_name(\"lookup\") .set_parent(self)\n find = self.TurnToWall() .set_name(\"find\") .set_parent(self)\n forward2 = Forward(-80) .set_name(\"forward2\") .set_parent(self)\n statenode1 = StateNode() .set_name(\"statenode1\") .set_parent(self)\n find2 = self.TurnToWall() .set_name(\"find2\") .set_parent(self)\n forward3 = Forward(-80) .set_name(\"forward3\") .set_parent(self)\n say1 = Say(\"No Door trying again\") .set_name(\"say1\") .set_parent(self)\n approach = self.ForwardToWall(100) .set_name(\"approach\") .set_parent(self)\n findwall1 = self.FindWall() .set_name(\"findwall1\") .set_parent(self)\n turntowall1 = self.TurnToWall() .set_name(\"turntowall1\") .set_parent(self)\n findwall2 = self.FindWall() .set_name(\"findwall2\") .set_parent(self)\n forwardtowall1 = self.ForwardToWall(70) .set_name(\"forwardtowall1\") .set_parent(self)\n findwall3 = self.FindWall() .set_name(\"findwall3\") .set_parent(self)\n turntowall2 = self.TurnToWall() .set_name(\"turntowall2\") .set_parent(self)\n end = SetHeadAngle(0) .set_name(\"end\") .set_parent(self)\n forward4 = Forward(150) .set_name(\"forward4\") .set_parent(self)\n parentcompletes1 = ParentCompletes() .set_name(\"parentcompletes1\") .set_parent(self)\n \n timertrans1 = TimerTrans(0.5) .set_name(\"timertrans1\")\n timertrans1 .add_sources(droplift) .add_destinations(check_start)\n \n successtrans1 = SuccessTrans() .set_name(\"successtrans1\")\n successtrans1 .add_sources(check_start) .add_destinations(setheadangle1)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(setheadangle1) .add_destinations(turn_to_side)\n \n failuretrans1 = FailureTrans() .set_name(\"failuretrans1\")\n failuretrans1 .add_sources(check_start) .add_destinations(forward1)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(forward1) .add_destinations(check_start)\n \n completiontrans3 = CompletionTrans() .set_name(\"completiontrans3\")\n completiontrans3 .add_sources(turn_to_side) .add_destinations(turn_to_side)\n \n successtrans2 = SuccessTrans() .set_name(\"successtrans2\")\n successtrans2 .add_sources(turn_to_side) .add_destinations(reportposition1)\n \n nulltrans1 = NullTrans() .set_name(\"nulltrans1\")\n nulltrans1 .add_sources(reportposition1) .add_destinations(go_side)\n \n completiontrans4 = CompletionTrans() .set_name(\"completiontrans4\")\n completiontrans4 .add_sources(go_side) .add_destinations(turntoside1)\n \n completiontrans5 = CompletionTrans() .set_name(\"completiontrans5\")\n completiontrans5 .add_sources(turntoside1) .add_destinations(lookup)\n \n completiontrans6 = CompletionTrans() .set_name(\"completiontrans6\")\n completiontrans6 .add_sources(lookup) .add_destinations(find)\n \n completiontrans7 = CompletionTrans() .set_name(\"completiontrans7\")\n completiontrans7 .add_sources(find) .add_destinations(approach)\n \n failuretrans2 = FailureTrans() .set_name(\"failuretrans2\")\n failuretrans2 .add_sources(find) .add_destinations(forward2)\n \n completiontrans8 = CompletionTrans() .set_name(\"completiontrans8\")\n completiontrans8 .add_sources(forward2) .add_destinations(statenode1)\n \n timertrans2 = TimerTrans(1) .set_name(\"timertrans2\")\n timertrans2 .add_sources(statenode1) .add_destinations(find2)\n \n completiontrans9 = CompletionTrans() .set_name(\"completiontrans9\")\n completiontrans9 .add_sources(find2) .add_destinations(approach)\n \n failuretrans3 = FailureTrans() .set_name(\"failuretrans3\")\n failuretrans3 .add_sources(find2) .add_destinations(forward3)\n \n completiontrans10 = CompletionTrans() .set_name(\"completiontrans10\")\n completiontrans10 .add_sources(forward3) .add_destinations(say1)\n \n completiontrans11 = CompletionTrans() .set_name(\"completiontrans11\")\n completiontrans11 .add_sources(say1) .add_destinations(turn_to_side)\n \n completiontrans12 = CompletionTrans() .set_name(\"completiontrans12\")\n completiontrans12 .add_sources(approach) .add_destinations(findwall1)\n \n completiontrans13 = CompletionTrans() .set_name(\"completiontrans13\")\n completiontrans13 .add_sources(findwall1) .add_destinations(turntowall1)\n \n completiontrans14 = CompletionTrans() .set_name(\"completiontrans14\")\n completiontrans14 .add_sources(turntowall1) .add_destinations(findwall2)\n \n completiontrans15 = CompletionTrans() .set_name(\"completiontrans15\")\n completiontrans15 .add_sources(findwall2) .add_destinations(forwardtowall1)\n \n completiontrans16 = CompletionTrans() .set_name(\"completiontrans16\")\n completiontrans16 .add_sources(forwardtowall1) .add_destinations(findwall3)\n \n completiontrans17 = CompletionTrans() .set_name(\"completiontrans17\")\n completiontrans17 .add_sources(findwall3) .add_destinations(turntowall2)\n \n completiontrans18 = CompletionTrans() .set_name(\"completiontrans18\")\n completiontrans18 .add_sources(turntowall2) .add_destinations(end)\n \n failuretrans4 = FailureTrans() .set_name(\"failuretrans4\")\n failuretrans4 .add_sources(approach) .add_destinations(end)\n \n completiontrans19 = CompletionTrans() .set_name(\"completiontrans19\")\n completiontrans19 .add_sources(end) .add_destinations(forward4)\n \n completiontrans20 = CompletionTrans() .set_name(\"completiontrans20\")\n completiontrans20 .add_sources(forward4) .add_destinations(parentcompletes1)\n \n return self\n\nclass Exploren(StateNode):\n \n def __init__(self):\n self.current_wall = None\n self.to_do_wall = []\n self.done_wall = []\n super().__init__()\n\n class Think(StateNode):\n def start(self,event=None):\n super().start(event)\n for key, val in self.robot.world.world_map.objects.items():\n if isinstance(val,WallObj) and val.id not in self.parent.done_wall and val.id not in self.parent.to_do_wall:\n self.parent.to_do_wall.append(val)\n print(val.id)\n\n if len(self.parent.to_do_wall) > 0:\n wall = self.parent.to_do_wall.pop()\n self.parent.current_wall = wall.id\n self.parent.done_wall.append(wall.id)\n print(self.parent.to_do_wall,self.parent.current_wall,self.parent.done_wall)\n self.post_failure()\n else:\n self.post_success()\n\n class Go(GoToWall):\n def __init__(self):\n super().__init__()\n \n def start(self,event=None):\n self.object = self.parent.current_wall\n self.wall_name = 'Wall-'+str(self.object)\n self.door_id = -1\n super().start(event)\n\n # Explore state machine\n def setup(self):\n \"\"\"\n look: LookAroundInPlace(stop_on_exit=False) =T(5)=> StopBehavior() =C=> think\n \n think: self.Think()\n think =F=> go\n think =S=> end\n \n go: self.Go() =C=> look\n \n end: Say(\"Done\") =C=> ParentCompletes()\n \"\"\"\n \n # Code generated by genfsm on Fri Apr 6 04:49:50 2018:\n \n look = LookAroundInPlace(stop_on_exit=False) .set_name(\"look\") .set_parent(self)\n stopbehavior1 = StopBehavior() .set_name(\"stopbehavior1\") .set_parent(self)\n think = self.Think() .set_name(\"think\") .set_parent(self)\n go = self.Go() .set_name(\"go\") .set_parent(self)\n end = Say(\"Done\") .set_name(\"end\") .set_parent(self)\n parentcompletes2 = ParentCompletes() .set_name(\"parentcompletes2\") .set_parent(self)\n \n timertrans3 = TimerTrans(5) .set_name(\"timertrans3\")\n timertrans3 .add_sources(look) .add_destinations(stopbehavior1)\n \n completiontrans21 = CompletionTrans() .set_name(\"completiontrans21\")\n completiontrans21 .add_sources(stopbehavior1) .add_destinations(think)\n \n failuretrans5 = FailureTrans() .set_name(\"failuretrans5\")\n failuretrans5 .add_sources(think) .add_destinations(go)\n \n successtrans3 = SuccessTrans() .set_name(\"successtrans3\")\n successtrans3 .add_sources(think) .add_destinations(end)\n \n completiontrans22 = CompletionTrans() .set_name(\"completiontrans22\")\n completiontrans22 .add_sources(go) .add_destinations(look)\n \n completiontrans23 = CompletionTrans() .set_name(\"completiontrans23\")\n completiontrans23 .add_sources(end) .add_destinations(parentcompletes2)\n \n return self\n"
},
{
"alpha_fraction": 0.518448531627655,
"alphanum_fraction": 0.5371420383453369,
"avg_line_length": 33.08830642700195,
"blob_id": "b2300d19577ff97e01fa449992ac90f26d091a38",
"content_id": "d3441baa2a05a213b4af74fcbc1c64aa167c6e0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14283,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 419,
"path": "/cozmo_fsm/path_viewer.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPath planner display in OpenGL.\n\"\"\"\n\ntry:\n from OpenGL.GLUT import *\n from OpenGL.GL import *\n from OpenGL.GLU import *\nexcept:\n pass\n\nimport time\nfrom math import pi, sin, cos\nimport array\nimport numpy as np\nimport platform\n\nWINDOW = None\nWINDOW_WF = None\n\nfrom . import opengl\nfrom .rrt import RRTNode\nfrom .rrt_shapes import *\nfrom .wavefront import WaveFront\nfrom . import geometry\nfrom .geometry import wrap_angle\n\nthe_rrt = None\nold_grid = None\nthe_items = [] # each item is a tuple (tree,color)\n\nhelp_text = \"\"\"\nPath viewer commands:\n arrows Translate the view up/down/left/right\n Home Center the view (zero translation)\n < Zoom in\n > Zoom out\n o Show objects\n b Show obstacles\n p Show pose\n space Toggle redisplay (for debugging)\n h Print this help text\n\"\"\"\n\nhelp_text_mac = \"\"\"\nPath viewer commands:\n arrows Translate the view up/down/left/right\n fn + left-arrow Center the view (zero translation)\n option + < Zoom in\n option + > Zoom out\n option + o Show objects\n option + b Show obstacles\n option + p Show pose\n space Toggle redisplay (for debugging)\n option + h Print this help text\n\"\"\"\n\n\nclass PathViewer():\n def __init__(self, robot, rrt,\n width=512, height=512,\n windowName = \"path viewer\",\n bgcolor = (0,0,0)):\n global the_rrt, the_items\n the_rrt = rrt\n the_items = []\n self.robot = robot\n self.width = width\n self.height = height\n self.bgcolor = bgcolor\n self.aspect = self.width/self.height\n self.windowName = windowName\n self.translation = [0., 0.] # Translation in mm\n self.scale = 0.64\n\n def window_creator(self):\n global WINDOW\n WINDOW = opengl.create_window(bytes(self.windowName,'utf-8'), (self.width,self.height))\n glutDisplayFunc(self.display)\n glutReshapeFunc(self.reshape)\n glutKeyboardFunc(self.keyPressed)\n glutSpecialFunc(self.specialKeyPressed)\n glViewport(0,0,self.width,self.height)\n glClearColor(*self.bgcolor, 0)\n # Enable transparency\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n\n def window_creator_wf(self):\n global WINDOW_WF\n WINDOW_WF = opengl.create_window(bytes('wavefront grid','utf-8'), (self.width,self.height))\n glutDisplayFunc(self.display_wf)\n # glutReshapeFunc(self.reshape)\n glutKeyboardFunc(self.keyPressed)\n glutSpecialFunc(self.specialKeyPressed)\n glViewport(0,0,self.width,self.height)\n glClearColor(*self.bgcolor, 0)\n # Enable transparency\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n\n def start(self): # Displays in background\n opengl.init()\n if not WINDOW:\n opengl.CREATION_QUEUE.append(self.window_creator)\n if not WINDOW_WF:\n opengl.CREATION_QUEUE.append(self.window_creator_wf)\n if platform.system() == 'Darwin':\n print(\"Type 'option' + 'h' in the path viewer window for help.\")\n else:\n print(\"Type 'h' in the path viewer window for help.\")\n\n def clear(self):\n global the_items, old_grid\n the_items = []\n old_grid = np.zeros([1,1], dtype=np.int32)\n the_rrt.grid_display = None\n the_rrt.draw_path = None\n the_rrt.treeA = []\n the_rrt.treeB = []\n\n def set_rrt(self,new_rrt):\n global the_rrt\n the_rrt = new_rrt\n\n def draw_rectangle(self, center, width=4, height=None,\n angle=0, color=(1,1,1), fill=True):\n # Default to solid color and square shape\n if len(color)==3:\n color = (color[0],color[1],color[2],1)\n if height is None:\n height = width\n\n # Calculate vertices as offsets from center\n w = width/2; h = height/2\n v1 = (-w,-h); v2 = (w,-h); v3 = (w,h); v4 = (-w,h)\n\n # Draw the rectangle\n glPushMatrix()\n if fill:\n glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)\n glColor4f(color[0],color[1],color[2],color[3])\n glTranslatef(*center,0)\n glRotatef(angle,0,0,1)\n glBegin(GL_QUADS)\n glVertex2f(*v1)\n glVertex2f(*v2)\n glVertex2f(*v3)\n glVertex2f(*v4)\n glEnd()\n glPopMatrix()\n\n def draw_circle(self,center,radius=1,color=(1,1,1),fill=True):\n if len(color) == 3:\n color = (*color,1)\n glColor4f(*color)\n if fill:\n glBegin(GL_TRIANGLE_FAN)\n glVertex2f(*center)\n else:\n glBegin(GL_LINE_LOOP)\n for angle in range(0,360):\n theta = angle/180*pi\n glVertex2f(center[0]+radius*cos(theta), center[1]+radius*sin(theta))\n glEnd()\n\n\n def draw_triangle(self,center,scale=1,angle=0,color=(1,1,1),fill=True):\n # Default to solid color\n if len(color) == 3:\n color = (*color,1)\n\n glPushMatrix()\n if fill:\n glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)\n glColor4f(*color)\n glTranslatef(*center,0)\n glRotatef(angle,0,0,1)\n glBegin(GL_TRIANGLES)\n glVertex2f( 5.*scale, 0.)\n glVertex2f(-5.*scale, -3.*scale)\n glVertex2f(-5.*scale, 3.*scale)\n glEnd()\n glPopMatrix()\n\n def draw_line(self,pt1,pt2,color=(1,1,1,1)):\n if len(color) == 3:\n color = (*color,1)\n glBegin(GL_LINES)\n glColor4f(*color)\n glVertex2f(*pt1)\n glVertex2f(*pt2)\n glEnd()\n\n def draw_path(self,path):\n \"\"\" Also used if WaveFront generated the path and we want to display it.\"\"\"\n if isinstance(path[0], RRTNode):\n path = [(node.x,node.y) for node in path]\n for i in range(len(path)-1):\n self.draw_line(path[i],path[i+1])\n\n def draw_tree(self,tree,color):\n for node in tree:\n self.draw_node(node,color)\n\n def draw_node(self,node,color):\n self.draw_rectangle((node.x,node.y), color=color)\n if node.parent:\n if node.radius is None or node.radius == 0:\n self.draw_line((node.x,node.y), (node.parent.x,node.parent.y), color=color)\n else:\n color = (1, 1, 0.5)\n init_x = node.parent.x\n init_y = node.parent.y\n init_q = node.parent.q\n targ_q = node.q\n radius = node.radius\n dir = +1 if radius >= 0 else -1\n r = abs(radius)\n center = geometry.translate(init_x,init_y).dot(\n geometry.aboutZ(init_q+dir*pi/2).dot(geometry.point(r)))\n theta = wrap_angle(init_q - dir*pi/2)\n targ_theta = wrap_angle(targ_q - dir*pi/2)\n ang_step = 0.05 # radians\n while abs(theta - targ_theta) > ang_step:\n theta = wrap_angle(theta + dir * ang_step)\n cur_x = center[0,0] + r*cos(theta)\n cur_y = center[1,0] + r*sin(theta)\n self.draw_line((init_x,init_y), (cur_x,cur_y), color=color)\n (init_x,init_y) = (cur_x,cur_y)\n\n def draw_robot(self,node):\n parts = the_rrt.robot_parts_to_node(node)\n for part in parts:\n if isinstance(part,Circle):\n self.draw_circle(center=(part.center[0,0],part.center[1,0]),\n radius=part.radius,\n color=(1,1,0,0.7), fill=False)\n elif isinstance(part,Rectangle):\n self.draw_rectangle(center=(part.center[0,0],part.center[1,0]),\n width=part.max_Ex-part.min_Ex,\n height=part.max_Ey-part.min_Ey,\n angle=part.orient*180/pi,\n color=(1,1,0,0.7), fill=False)\n\n def draw_obstacle(self,obst):\n if isinstance(obst,Circle):\n self.draw_circle(center=(obst.center[0,0],obst.center[1,0]),\n radius=obst.radius,\n color=(1,0,0,0.5), fill=True)\n elif isinstance(obst,Rectangle):\n width = obst.max_Ex - obst.min_Ex\n height = obst.max_Ey - obst.min_Ey\n if width <= 10*height:\n color = (1, 0, 0, 0.5)\n else:\n color = (1, 1, 0, 0.5)\n self.draw_rectangle(center=(obst.center[0], obst.center[1]),\n angle=obst.orient*(180/pi),\n width=width, height=height, color=color, fill=True)\n\n def draw_wf(self, grid):\n square_size = 6\n grid_flat = list(set(grid.flatten()))\n grid_flat.sort()\n goal_marker = WaveFront.goal_marker\n try:\n max_val = grid_flat[-2]\n except IndexError:\n max_val = max(grid_flat)\n if max_val <= 0:\n max_val = goal_marker\n max_val = float(max_val)\n\n w = square_size * 0.5\n h = square_size * 0.5\n for x in range(0, grid.shape[0]):\n for y in range(0, grid.shape[1]):\n c = (x*square_size, y*square_size)\n try:\n if grid[x,y] == goal_marker:\n self.draw_rectangle(center=c, width=w, height=h, color=(0, 1, 0)) # green for goal\n elif grid[x,y] == 1:\n self.draw_rectangle(center=c, width=w, height=h, color=(1, 1, 0)) # yellow for start\n elif grid[x,y] < 0:\n self.draw_rectangle(center=c, width=w, height=h, color=(1, 0, 0)) # red for obstacle\n else:\n value = grid[x,y]/max_val # shades of gray for distance values\n self.draw_rectangle(center=c, width=w, height=h, color=(value, value, value))\n except IndexError:\n # print('index is out of bounds', x, y)\n pass\n\n def add_tree(self, tree, color):\n global the_items\n the_items.append((tree,color))\n\n def display(self):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n w = self.width / 2\n glOrtho(-w, w, -w, w, 1, -1)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glRotatef(90,0,0,1)\n glScalef(self.scale, self.scale, self.scale)\n glTranslatef(-self.translation[0], -self.translation[1], 0.)\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n self.draw_rectangle(center=(0,0), angle=45, width=5, height=5, color=(0.9, 0.5, 0), fill=False)\n\n if the_rrt.draw_path: # WaveFront-generated path\n self.draw_path(the_rrt.draw_path)\n\n self.draw_tree(the_rrt.treeA, color=(0,1,0))\n self.draw_tree(the_rrt.treeB, color=(0,0,1))\n for (tree,color) in the_items:\n self.draw_tree(tree,color)\n\n for obst in the_rrt.obstacles:\n self.draw_obstacle(obst)\n\n #if the_rrt.start:\n # self.draw_robot(the_rrt.start)\n pose = self.robot.world.particle_filter.pose\n self.draw_robot(RRTNode(x=pose[0], y=pose[1], q=pose[2]))\n\n glutSwapBuffers()\n\n def display_wf(self):\n global old_grid\n grid = the_rrt.grid_display if the_rrt.grid_display is not None else old_grid\n if grid is None: return\n old_grid = grid\n square_size = 5\n w = max(grid.shape) * square_size / 2\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(-w, w, -w, w, 1, -1)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glRotatef(90,0,0,1)\n glScalef(self.scale/2, self.scale/2, self.scale/2)\n glTranslatef(-self.translation[0]-w, -self.translation[1]-w, 0.)\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n self.draw_wf(grid)\n glutSwapBuffers()\n\n def reshape(self,width,height):\n glViewport(0,0,width,height)\n self.width = width\n self.height = height\n self.aspect = self.width/self.height\n self.display()\n glutPostRedisplay()\n\n def keyPressed(self,key,mouseX,mouseY):\n # print(str(key), ord(key))\n if key == b'<': # zoom in\n self.scale *= 1.25\n self.print_display_params()\n return\n elif key == b'>': # zoom out\n self.scale /= 1.25\n self.print_display_params()\n return\n elif key == b'o': # show objects\n self.robot.world.world_map.show_objects()\n return\n elif key == b'b': # show obstacles\n self.show_obstacles()\n return\n elif key == b'p': # show pose\n self.robot.world.world_map.show_pose()\n return\n elif key == b'h': # print help\n self.print_help()\n return\n\n def specialKeyPressed(self, key, mouseX, mouseY):\n # arrow keys for translation\n incr = 25.0 # millimeters\n if key == GLUT_KEY_UP:\n self.translation[0] += incr / self.scale\n elif key == GLUT_KEY_DOWN:\n self.translation[0] -= incr / self.scale\n elif key == GLUT_KEY_LEFT:\n self.translation[1] += incr / self.scale\n elif key == GLUT_KEY_RIGHT:\n self.translation[1] -= incr / self.scale\n elif key == GLUT_KEY_HOME:\n self.translation = [0., 0.]\n self.print_display_params()\n glutPostRedisplay()\n\n def print_display_params(self):\n print('scale=%.2f translation=[%.1f, %.1f]' %\n (self.scale, *self.translation))\n\n def show_obstacles(self):\n print('RRT has %d obstacles.' % len(the_rrt.obstacles))\n for obstacle in the_rrt.obstacles:\n print(' ', obstacle)\n print()\n\n def print_help(self):\n if platform.system() == 'Darwin':\n print(help_text_mac)\n else:\n print(help_text)\n"
},
{
"alpha_fraction": 0.5400729179382324,
"alphanum_fraction": 0.5497413277626038,
"avg_line_length": 47.723140716552734,
"blob_id": "3791476376e7d2b1d2e470b85d10b8c16b1a54dd",
"content_id": "b096c9affc3682f4a3d8fdc6cfcafbb2c996b958",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11791,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 242,
"path": "/cozmo_fsm/sharedmap.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import socket\nimport pickle\nimport threading\nfrom time import sleep\nfrom numpy import inf, arctan2, pi, cos, sin\nfrom .worldmap import RobotForeignObj, LightCubeForeignObj, WallObj\nfrom .geometry import wrap_angle\nfrom cozmo.objects import LightCube\nfrom copy import deepcopy\n\nclass ServerThread(threading.Thread):\n def __init__(self, robot, port=1800):\n threading.Thread.__init__(self)\n self.port = port\n self.socket = None #not running until startServer is called\n self.robot= robot\n self.camera_landmark_pool = {} # used to find transforms\n self.poses = {}\n self.started = False\n self.foreign_objects = {} # foreign walls and cubes\n\n def run(self):\n self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.socket.setblocking(True) \n self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,True) #enables server restart\n self.socket.bind((\"\",self.port)) \n self.threads =[]\n print(\"Server started\")\n self.started = True\n self.fusion.start()\n self.robot.world.is_server = True\n\n for i in range(100): # Limit of 100 clients\n self.socket.listen(5) # Now wait for client connection.\n c, addr = self.socket.accept() # Establish connection with client.\n print('Got connection from', addr)\n self.threads.append(ClientHandlerThread(i, c, self.robot))\n self.threads[i].start()\n\n def start_server_thread(self):\n if self.robot.aruco_id == -1:\n self.robot.aruco_id = int(input(\"Please enter the aruco id of the robot:\"))\n self.robot.world.server.camera_landmark_pool[self.robot.aruco_id]={}\n # try to get transforms from camera_landmark_pool\n self.fusion = FusionThread(self.robot)\n self.start()\n\nclass ClientHandlerThread(threading.Thread):\n def __init__(self, threadID, client, robot):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.c = client\n self.robot = robot\n self.c.sendall(pickle.dumps(\"Hello\"))\n self.aruco_id = int(pickle.loads(self.c.recv(1024)))\n self.name = \"Client-\"+str(self.aruco_id)\n self.robot.world.server.camera_landmark_pool[self.aruco_id]={}\n self.to_send={}\n print(\"Started thread for\",self.name)\n\n def run(self):\n # Send from server to clients\n while(True):\n for key, value in self.robot.world.world_map.objects.items():\n if isinstance(key,LightCube):\n self.to_send[\"LightCubeForeignObj-\"+str(value.id)]= LightCubeForeignObj(id=value.id, x=value.x, y=value.y, z=value.z, theta=value.theta)\n elif isinstance(key,str):\n # Send walls and cameras\n self.to_send[key] = value # Fix case when object removed from shared map\n else:\n pass # Nothing else in sent\n # append 'end' to end to mark end\n self.c.sendall(pickle.dumps([self.robot.world.perched.camera_pool,self.to_send])+b'end')\n # hack to recieve variable size data without crashing\n data = b''\n while True:\n data += self.c.recv(1024)\n if data[-3:]==b'end':\n break\n\n cams, landmarks, foreign_objects, pose = pickle.loads(data[:-3])\n for key, value in cams.items():\n if key in self.robot.world.perched.camera_pool:\n self.robot.world.perched.camera_pool[key].update(value)\n else:\n self.robot.world.perched.camera_pool[key]=value\n self.robot.world.server.camera_landmark_pool[self.aruco_id].update(landmarks)\n self.robot.world.server.poses[self.aruco_id] = pose\n self.robot.world.server.foreign_objects[self.aruco_id] = foreign_objects\n\nclass FusionThread(threading.Thread):\n def __init__(self, robot):\n threading.Thread.__init__(self)\n self.robot = robot\n self.aruco_id = self.robot.aruco_id\n self.accurate = {}\n self.transforms = {}\n\n def run(self):\n while(True):\n # adding local camera landmarks into camera_landmark_pool\n self.robot.world.server.camera_landmark_pool[self.aruco_id].update( \\\n {k:self.robot.world.particle_filter.sensor_model.landmarks[k] for k in \\\n [x for x in self.robot.world.particle_filter.sensor_model.landmarks.keys()\\\n if isinstance(x,str) and \"Video\" in x]})\n flag = False\n # Choose accurate camera\n for key1, value1 in self.robot.world.server.camera_landmark_pool.items():\n for key2, value2 in self.robot.world.server.camera_landmark_pool.items():\n if key1 == key2:\n continue\n for cap, lan in value1.items():\n if cap in value2:\n varsum = lan[2].sum()+value2[cap][2].sum()\n if varsum < self.accurate.get((key1,key2),(inf,None))[0]:\n self.accurate[(key1,key2)] = (varsum,cap)\n flag = True\n # Find transform\n if flag:\n for key, value in self.accurate.items():\n x1,y1 = self.robot.world.server.camera_landmark_pool[key[0]][value[1]][0]\n h1,p1,t1 = self.robot.world.server.camera_landmark_pool[key[0]][value[1]][1]\n x2,y2 = self.robot.world.server.camera_landmark_pool[key[1]][value[1]][0]\n h2,p2,t2 = self.robot.world.server.camera_landmark_pool[key[1]][value[1]][1]\n theta_t = wrap_angle(p1 - p2)\n x_t = x2 - ( x1*cos(theta_t) + y1*sin(theta_t))\n y_t = y2 - (-x1*sin(theta_t) + y1*cos(theta_t))\n self.transforms[key] = (x_t, y_t, theta_t, value[1])\n self.update_foreign_robot()\n self.update_foreign_objects()\n sleep(0.01)\n\n def update_foreign_robot(self):\n for key, value in self.transforms.items():\n if key[1] == self.robot.aruco_id:\n x_t, y_t, theta_t, cap = value\n x, y, theta = self.robot.world.server.poses[key[0]]\n x2 = x*cos(theta_t) + y*sin(theta_t) + x_t\n y2 = -x*sin(theta_t) + y*cos(theta_t) + y_t\n # improve using update function instead of new obj everytime\n self.robot.world.world_map.objects[\"Foreign-\"+str(key[0])]=RobotForeignObj(cozmo_id=key[0],\n x=x2, y=y2, z=0, theta=wrap_angle(theta-theta_t), camera_id = int(cap[-2]))\n\n def update_foreign_objects(self):\n for key, value in self.transforms.items():\n if key[1] == self.robot.aruco_id:\n x_t, y_t, theta_t, cap = value\n for k, v in self.robot.world.server.foreign_objects[key[0]].items():\n x2 = v.x*cos(theta_t) + v.y*sin(theta_t) + x_t\n y2 = -v.x*sin(theta_t) + v.y*cos(theta_t) + y_t\n if isinstance(k,str) and \"Wall\" in k:\n # update wall\n if k in self.robot.world.world_map.objects:\n if self.robot.world.world_map.objects[k].is_foreign:\n self.robot.world.world_map.objects[k].update(x=x2, y=y2, theta=wrap_angle(v.theta-theta_t))\n else:\n copy_obj = deepcopy(v)\n copy_obj.x = x2\n copy_obj.y = y2\n copy_obj.theta = wrap_angle(v.theta-theta_t)\n copy_obj.is_foreign = True\n self.robot.world.world_map.objects[k]=copy_obj\n elif isinstance(k,str) and \"Cube\" in k and not self.robot.world.light_cubes[v.id].is_visible:\n # update cube\n if k in self.robot.world.world_map.objects:\n if self.robot.world.world_map.objects[k].is_foreign:\n self.robot.world.world_map.objects[k].update(x=x2, y=y2, theta=wrap_angle(v.theta-theta_t))\n else:\n copy_obj = deepcopy(v)\n copy_obj.x = x2\n copy_obj.y = y2\n copy_obj.theta = wrap_angle(v.theta-theta_t)\n copy_obj.is_foreign = True\n self.robot.world.world_map.objects[k]=copy_obj\n\n\nclass ClientThread(threading.Thread):\n def __init__(self, robot):\n threading.Thread.__init__(self)\n self.port = None\n self.socket = None #not running until startClient is called\n self.ipaddr = None\n self.robot= robot\n self.to_send = {}\n\n def start_client_thread(self,ipaddr=\"\",port=1800):\n if self.robot.aruco_id == -1:\n self.robot.aruco_id = int(input(\"Please enter the aruco id of the robot:\"))\n self.robot.world.server.camera_landmark_pool[self.robot.aruco_id]={}\n self.port = port\n self.ipaddr = ipaddr\n self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,True)\n while True:\n try:\n print(\"Attempting to connect to %s at port %d\" % (ipaddr,port))\n self.socket.connect((ipaddr,port))\n data = pickle.loads(self.socket.recv(1024))\n break\n except:\n print(\"No server found, make sure the address is correct, retrying in 10 seconds\")\n sleep(10)\n print(\"Connected.\")\n self.socket.sendall(pickle.dumps(self.robot.aruco_id))\n self.robot.world.is_server = False\n self.start()\n\n def use_shared_map(self):\n # currently affects only worldmap_viewer\n # uses robot.world.world_map.shared_objects instead of robot.world.world_map.objects\n self.robot.use_shared_map = True\n\n def use_local_map(self):\n self.robot.use_shared_map = False\n\n def run(self):\n # Send from client to server\n while(True):\n # hack to recieve variable size data without crashing\n data = b''\n while True:\n data += self.socket.recv(1024)\n if data[-3:]==b'end':\n break\n self.robot.world.perched.camera_pool, self.robot.world.world_map.shared_objects = pickle.loads(data[:-3])\n\n for key, value in self.robot.world.world_map.objects.items():\n if isinstance(key,LightCube):\n self.to_send[\"LightCubeForeignObj-\"+str(value.id)]= LightCubeForeignObj(id=value.id, cozmo_id=self.robot.aruco_id, x=value.x, y=value.y, z=value.z, theta=value.theta)\n elif isinstance(key,str) and 'Wall' in key:\n # Send walls\n self.to_send[key] = value # Fix case when object removed from shared map\n else:\n pass \n\n # send cameras, landmarks, objects and pose\n self.socket.sendall(pickle.dumps([self.robot.world.perched.cameras,\n {k:self.robot.world.particle_filter.sensor_model.landmarks[k] for k in \n [x for x in self.robot.world.particle_filter.sensor_model.landmarks.keys() \n if isinstance(x,str) and \"Video\" in x]},\n self.to_send,\n self.robot.world.particle_filter.pose])+b'end')\n"
},
{
"alpha_fraction": 0.5702306032180786,
"alphanum_fraction": 0.5790897607803345,
"avg_line_length": 43.80908966064453,
"blob_id": "5d58b9e753fafb12b0d5e8bd3feed4a62d05201e",
"content_id": "5385878921bfbb3d23e082197f4a11cf3cf59504",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14787,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 330,
"path": "/cozmo_fsm/path_planner.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPath planner using RRT and Wavefront algorithms.\n\"\"\"\n\nfrom math import pi, sin, cos\nfrom multiprocessing import Process\n\nfrom .nodes import LaunchProcess\nfrom .events import DataEvent, PilotEvent\nfrom .pilot0 import NavPlan, NavStep\nfrom .worldmap import WorldObject, LightCubeObj, ChargerObj, CustomMarkerObj, RoomObj, DoorwayObj, MapFaceObj\nfrom .rrt import RRT, RRTNode, StartCollides, GoalCollides, GoalUnreachable\nfrom .wavefront import WaveFront\nfrom .geometry import wrap_angle, segment_intersect_test\nfrom .doorpass import DoorPass\n\nfrom . import rrt\n\nclass PathPlanner():\n \"\"\"This path planner can be called directly, or it can be used inside\n a PathPlannerProcess node that runs the heavy lifting portion of\n the algorithm in a child process. Because child processes in\n Windows don't share memory with the parent, we must transmit\n certain data to the child as parameters during process creation.\n But only structures that are pickle-able can be sent. The\n setup_problem() method sets up those structures, and do_planning()\n uses them to do the work. If we don't want to run in a separate\n process then we can use plan_path_this_process() to call both\n methods in the main process and return the result.\n \"\"\"\n\n # Note: the obstacle inflation parameter is a radius, not a diameter.\n\n # Fat obstacles for the wavefefront algorithm because the robot\n # itself is treated as a point. Since the robot is longer than it is wide,\n # an inflation value less than the length (95 mm) could miss a collision if\n # the robot turns.\n fat_obstacle_inflation = 50 # must be << pilot's escape_distance\n fat_wall_inflation = 35\n fat_doorway_adjustment = -62\n\n # Skinny obstacles for the RRT are skinny because we model the\n # robot's shape explicitly.\n skinny_obstacle_inflation = 10\n skinny_wall_inflation = 10\n skinny_doorway_adjustment = 0\n\n @staticmethod\n def plan_path_this_process(goal_object, robot, use_doorways=False):\n # Get pickle-able data structures\n (start_node, goal_shape, robot_parts, bbox,\n fat_obstacles, skinny_obstacles, doorway_list, need_grid_display) = \\\n __class__.setup_problem(goal_object, robot, use_doorways)\n # Do the actual path planning\n result = \\\n __class__.do_planning(robot.world.rrt, start_node, goal_shape,\n fat_obstacles, skinny_obstacles, doorway_list,\n need_grid_display)\n if isinstance(result, PilotEvent):\n grid_display = result.args['grid_display']\n elif isinstance(result, DataEvent):\n (navplan, grid_display) = result.data\n else:\n ValueError('Bad result type:', result)\n robot.world.rrt.grid_display = grid_display\n return result\n\n @staticmethod\n def setup_problem(goal_object, robot, use_doorways):\n \"\"\"Calculate values from world map in main process since the map won't\n be available in the child process.\"\"\"\n\n # Fat obstacles and narrow doorways for WaveFront\n robot.world.rrt.generate_obstacles(PathPlanner.fat_obstacle_inflation,\n PathPlanner.fat_wall_inflation,\n PathPlanner.fat_doorway_adjustment)\n fat_obstacles = robot.world.rrt.obstacles\n\n # Skinny obstacles and normal doorways for RRT\n robot.world.rrt.generate_obstacles(PathPlanner.skinny_obstacle_inflation,\n PathPlanner.skinny_wall_inflation,\n PathPlanner.skinny_doorway_adjustment)\n skinny_obstacles = robot.world.rrt.obstacles\n\n (pose_x, pose_y, pose_theta) = robot.world.particle_filter.pose\n start_node = RRTNode(x=pose_x, y=pose_y, q=pose_theta)\n\n if isinstance(goal_object, (LightCubeObj,ChargerObj)):\n goal_shape = RRT.generate_cube_obstacle(goal_object)\n elif isinstance(goal_object, CustomMarkerObj):\n goal_shape = RRT.generate_marker_obstacle(goal_object)\n elif isinstance(goal_object, RoomObj):\n goal_shape = RRT.generate_room_obstacle(goal_object)\n elif isinstance(goal_object, MapFaceObj):\n goal_shape = RRT.generate_mapFace_obstacle(goal_object)\n else:\n raise ValueError(\"Can't convert path planner goal %s to shape.\" % goal_object)\n\n robot_parts = robot.world.rrt.make_robot_parts(robot)\n bbox = robot.world.rrt.compute_bounding_box()\n\n if use_doorways:\n doorway_list = robot.world.world_map.generate_doorway_list()\n else:\n doorway_list = [] # don't truncate path at doorways in simulator\n\n need_grid_display = robot.world.path_viewer is not None\n\n return (start_node, goal_shape, robot_parts, bbox,\n fat_obstacles, skinny_obstacles, doorway_list, need_grid_display)\n\n @staticmethod\n def do_planning(rrt_instance, start_node, goal_shape,\n fat_obstacles, skinny_obstacles, doorway_list, need_grid_display):\n \"\"\"Does the heavy lifting; may be called in a child process.\"\"\"\n\n escape_options = (\n # angle distance(mm)\n (0, 40),\n (+30/180*pi, 50),\n (-30/180*pi, 50),\n (pi, 40),\n (pi, 80), # if we're wedged between two cubes\n (+60/180*pi, 80),\n (-60/180*pi, 80),\n (+pi/2, 70),\n (-pi/2, 70)\n )\n\n rrt_instance.obstacles = skinny_obstacles\n start_escape_move = None\n\n wf = WaveFront(bbox=rrt_instance.bbox)\n for obstacle in fat_obstacles:\n wf.add_obstacle(obstacle)\n\n collider = rrt_instance.collides(start_node)\n if not collider:\n collider = wf.check_start_collides(start_node.x, start_node.y)\n\n if collider:\n if collider.obstacle_id is goal_shape.obstacle_id: # We're already at the goal\n step = NavStep(NavStep.DRIVE, [RRTNode(x=start_node.x, y=start_node.y)])\n navplan = NavPlan([step])\n grid_display = None if not need_grid_display else wf.grid\n result = (navplan, grid_display)\n return DataEvent(result)\n else:\n # Find an escape move from this collision condition\n q = start_node.q\n for (phi, escape_distance) in escape_options:\n if phi != pi:\n new_q = wrap_angle(q + phi)\n escape_type = NavStep.DRIVE\n else:\n new_q = q # drive backwards on current heading\n escape_type = NavStep.BACKUP\n new_start = RRTNode(x=start_node.x + escape_distance*cos(q+phi),\n y=start_node.y + escape_distance*sin(q+phi),\n q=new_q)\n collider2 = rrt_instance.collides(new_start)\n #print('trying escape', new_start, 'collision:', collider2)\n if not collider2 and \\\n not wf.check_start_collides(new_start.x,new_start.y):\n start_escape_move = (escape_type, phi, start_node, new_start)\n start_node = new_start\n print('Path planner found escape move from', collider, 'using:', start_escape_move)\n break\n if start_escape_move is None:\n print('PathPlanner: Start collides!', collider)\n return PilotEvent(StartCollides,collider=collider,grid_display=None,text=\"start collides\")\n\n # Run the wavefront path planner\n rrt_instance.obstacles = fat_obstacles\n if goal_shape.obstacle_id.startswith('Room'):\n offsets = [1, -25, -1]\n else:\n offsets = [None]\n for i in range(len(offsets)):\n offset = offsets[i]\n if i > 0:\n wf = WaveFront(bbox=rrt_instance.bbox) # need a fresh grid\n # obstacles come after the goal so they can overwrite goal pixels\n for obstacle in fat_obstacles:\n wf.add_obstacle(obstacle)\n wf.set_goal_shape(goal_shape, offset, obstacle_inflation=PathPlanner.fat_obstacle_inflation)\n wf_start = (start_node.x, start_node.y)\n goal_found = wf.propagate(*wf_start)\n if goal_found: break\n print('Wavefront planning failed with offset', offset)\n grid_display = None if not need_grid_display else wf.grid\n if goal_found is None:\n print('PathPlanner wavefront: goal unreachable!')\n return PilotEvent(GoalUnreachable, grid_display=grid_display, text='unreachable')\n\n # Extract and smooth the path\n coords_pairs = wf.extract(goal_found, wf_start)\n rrt_instance.path = rrt_instance.coords_to_path(coords_pairs)\n rrt_instance.obstacles = skinny_obstacles\n #rrt_instance.obstacles = fat_obstacles\n rrt_instance.smooth_path()\n\n # If the path ends in a collision according to the RRT, back off\n while len(rrt_instance.path) > 2:\n last_node = rrt_instance.path[-1]\n if rrt_instance.collides(last_node):\n rrt_instance.path = rrt_instance.path[:-1]\n else:\n break\n\n # Construct the navigation plan\n navplan = PathPlanner.from_path(rrt_instance.path, doorway_list)\n\n # Insert the StartCollides escape move if there is one\n if start_escape_move:\n escape_type, phi, start, new_start = start_escape_move\n if escape_type == NavStep.BACKUP:\n escape_step = NavStep(NavStep.BACKUP, (RRTNode(x=new_start.x, y=new_start.y),))\n navplan.steps.insert(0, escape_step)\n elif navplan.steps[0].type == NavStep.DRIVE:\n navplan.steps[0].param.insert(0, RRTNode(x=start.x, y=start.y))\n else:\n # Shouldn't get here, but just in case\n print(\"Shouldn't end up here!\", navplan.steps[0])\n escape_step = NavStep(NavStep.DRIVE,\n (RRTNode(x=start.x, y=start.y),\n RRTNode(x=new_start.x, y=new_start.y)))\n navplan.steps.insert(0, escape_step)\n\n # Return the navigation plan\n print('navplan=',navplan, ' steps=',navplan.steps)\n result = (navplan, grid_display)\n return DataEvent(result)\n\n @staticmethod\n def intersects_doorway(node1, node2, doorways):\n for door in doorways:\n p1 = (node1.x, node1.y)\n p2 = (node2.x, node2.y)\n p3 = door[1][0]\n p4 = door[1][1]\n result = segment_intersect_test(p1, p2, p3, p4)\n #label = '**INTERSECTS**' if result else 'no_int:'\n #print(label,door[0].id,' ( %.1f, %.1f )<=>( %.1f, %.1f ) vs ( %.1f, %.1f )<=>( %.1f, %.1f )' % (p1+p2+p3+p4))\n if result:\n return door[0]\n return None\n\n @staticmethod\n def from_path(path, doorways):\n # Consider each path segment (defined by start and end\n # RRTNodes) and see if it crosses a doorway.\n door = None\n i = 0 # in case len(path) is 1 and we skip the for loop\n pt1 = path[i]\n for i in range(1, len(path)):\n pt2 = path[i]\n door = PathPlanner.intersects_doorway(pt1,pt2,doorways)\n if door:\n i -= 1\n break\n else:\n pt1 = pt2\n\n # If no doorway, we're good to go\n if door is None:\n step = NavStep(NavStep.DRIVE, path)\n plan = NavPlan([step])\n return plan\n\n # Truncate the path at the doorway, and ajust to make sure\n # we're outside the approach gate.\n start_point = (pt1.x, pt1.y)\n DELTA = 15 # mm\n gate = DoorPass.calculate_gate(start_point, door, DoorPass.OUTER_GATE_DISTANCE + DELTA)\n (dx,dy) = (door.x, door.y)\n (gx,gy) = (gate[0],gate[1])\n gate_node = RRTNode(x=gx, y=gy)\n print('door=', door, 'gate_node=', gate_node)\n\n while i > 0:\n (px,py) = (path[i].x, path[i].y)\n if ((px-dx)**2 + (py-dy)**2) > (DoorPass.OUTER_GATE_DISTANCE + DELTA)**2:\n break\n i -= 1\n\n # For now, just truncate the path and insert an approach gate node.\n new_path = path[0:i+1]\n new_path.append(gate_node)\n step1 = NavStep(NavStep.DRIVE, new_path)\n step2 = NavStep(NavStep.DOORPASS, door)\n plan = NavPlan([step1, step2])\n return plan\n\n#----------------------------------------------------------------\n\n# This code is for running the path planner in a child process.\n\nclass PathPlannerProcess(LaunchProcess):\n def start(self, event=None):\n if not isinstance(event,DataEvent):\n raise ValueError('PathPlanner node must be invoked with a DataEvent for the goal.')\n goal_object = event.data\n if not isinstance(goal_object, WorldObject):\n raise ValueError('Path planner goal %s is not a WorldObject' % goal_object)\n self.goal_object = goal_object\n self.print_trace_message('started:', 'goal=%s' % goal_object)\n super().start(event) # will call create_process\n\n def create_process(self, reply_token):\n use_doorways = True # assume we're running on the robot\n (start_node, goal_shape, robot_parts, bbox,\n fat_obstacles, skinny_obstacles, doorway_list, need_grid_display) = \\\n PathPlanner.setup_problem(self.goal_object, self.robot, use_doorways)\n p = Process(target=self.__class__.process_workhorse,\n args = [reply_token,\n start_node, goal_shape, robot_parts, bbox,\n fat_obstacles, skinny_obstacles, doorway_list,\n need_grid_display])\n return p\n\n @staticmethod\n def process_workhorse(reply_token, start_node, goal_shape, robot_parts, bbox,\n fat_obstacles, skinny_obstacles, doorway_list, need_grid_display):\n rrt_instance = RRT(robot_parts=robot_parts, bbox=bbox)\n result = \\\n PathPlanner.do_planning(rrt_instance, start_node, goal_shape,\n fat_obstacles, skinny_obstacles, doorway_list,\n need_grid_display)\n __class__.post_event(reply_token, result)\n"
},
{
"alpha_fraction": 0.5871853828430176,
"alphanum_fraction": 0.588890016078949,
"avg_line_length": 38.26377868652344,
"blob_id": "9a91f265fafa6cbcac88b11625460a4c78741ee8",
"content_id": "c520d04b5bbe12b5a901ff4a912b908121f76e75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9973,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 254,
"path": "/cozmo_fsm/base.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Base classes StateNode for nodes.py and Transition for\n transitions.py are placed here due to circular dependencies.\n Their parent class EventListener is imported from evbase.py.\n\n\"\"\"\n\nimport cozmo\n\nfrom .trace import TRACE\nfrom .evbase import Event, EventListener\nfrom .events import CompletionEvent, SuccessEvent, FailureEvent, DataEvent\n\nclass StateNode(EventListener):\n \"\"\"Base class for state nodes; does nothing.\"\"\"\n def __init__(self):\n super().__init__()\n self.parent = None\n self.children = {}\n self.transitions = []\n self.start_node = None\n self.setup()\n self.setup2()\n\n # Cache 'robot' in the instance because we could have two state\n # machine instances controlling different robots.\n @property\n def robot(self):\n return self._robot\n\n def setup(self):\n \"\"\"Redefine this to set up a child state machine.\"\"\"\n pass\n\n def setup2(self):\n \"\"\"Redefine this if post-setup processing is required.\"\"\"\n pass\n\n def start(self,event=None):\n if self.running: return\n if TRACE.trace_level >= TRACE.statenode_start:\n print('TRACE%d:' % TRACE.statenode_start, self, 'starting')\n super().start()\n # Start transitions before children, because children\n # may post an event that we're listening for (such as completion).\n for t in self.transitions:\n t.start()\n if self.start_node:\n if TRACE.trace_level >= TRACE.statenode_start:\n print('TRACE%d:' % TRACE.statenode_start, self, 'starting child', self.start_node)\n self.start_node.start()\n\n def stop(self):\n # If this node was stopped by an outgoing transition firing,\n # and then its parent tries to stop it, we need to cancel the\n # pending fire2 call.\n if self.running:\n if TRACE.trace_level >= TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop, self, 'stopping')\n super().stop()\n self.stop_children()\n # Stop transitions even if we're not running, because a firing\n # transition could have stopped us and left a fire2 pending.\n for t in self.transitions:\n t.stop()\n\n def stop_children(self):\n if self.children == {}:\n return\n if TRACE.trace_level >= TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop, self, 'is stopping its children')\n for child in self.children.values():\n if child.running:\n child.stop()\n\n def add_transition(self, trans):\n if not isinstance(trans, Transition):\n raise TypeError('%s is not a Transition' % trans)\n self.transitions.append(trans)\n\n def set_parent(self, parent):\n if not isinstance(parent, StateNode):\n raise TypeError('%s is not a StateNode' % parent)\n try:\n if isinstance(self.parent, StateNode):\n raise Exception('parent already set')\n except AttributeError:\n raise Exception(\"It appears %s's __init__ method did not call super().__init__\"\n % self.__class__.__name__)\n self.parent = parent\n parent.children[self.name] = self\n # First-declared child is the default start node.\n if not parent.start_node:\n parent.start_node = self\n return self\n\n def post_event(self, event, suppress_trace=False):\n if not isinstance(event,Event):\n raise ValuError('post_event given a non-Event argument:',event)\n if event.source is None:\n event.source = self\n if (not suppress_trace) and (TRACE.trace_level >= TRACE.event_posted):\n print('TRACE%d:' % TRACE.event_posted, self, 'posting event',event)\n if not self.running:\n print(\"*** ERROR: Node\", self, \"posted event\", event,\"before calling super().start(). ***\")\n self.robot.erouter.post(event)\n\n def post_completion(self):\n if TRACE.trace_level > TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop, self, 'posting completion')\n event = CompletionEvent()\n event.source = self\n self.post_event(event, suppress_trace=True)\n\n def post_success(self,details=None):\n if TRACE.trace_level > TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop,\n self, 'posting success, details=%s' % details)\n event = SuccessEvent(details)\n event.source = self\n self.post_event(event, suppress_trace=True)\n\n def post_failure(self,details=None):\n if TRACE.trace_level > TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop,\n self, 'posting failure, details=%s' % details)\n event = FailureEvent(details)\n event.source = self\n self.post_event(event, suppress_trace=True)\n\n def post_data(self,value):\n if TRACE.trace_level > TRACE.statenode_startstop:\n print('TRACE%d:' % TRACE.statenode_startstop,\n self, 'posting data', value)\n event = DataEvent(value)\n event.source = self\n self.post_event(event, suppress_trace=True)\n\n def now(self):\n \"\"\"Use now() to execute this node from the command line instead of as part of a state machine.\"\"\"\n if not self.robot:\n raise ValueError('Node %s has no robot designated.' % self)\n # 'program' is inserted into this module by __init__ to avoid circular importing\n program.running_fsm.children = dict()\n program.running_fsm.children[self.name] = self\n self.robot.loop.call_soon(self.start)\n return self\n\n def print_trace_message(self, msg1='', msg2=''):\n print('<><><> %s' % msg1, self, end='')\n p = self.parent\n while p is not None:\n print(' of', p.name, end='')\n p = p.parent\n print(' ', msg2)\n\nclass Transition(EventListener):\n \"\"\"Base class for transitions: does nothing.\"\"\"\n def __init__(self):\n super().__init__()\n self.sources = []\n self.destinations = []\n self.handle = None\n\n def __repr__(self):\n srcs = ','.join(node.name for node in self.sources)\n dests = ','.join(node.name for node in self.destinations)\n return '<%s %s: %s=>%s >' % \\\n (self.__class__.__name__, self.name, srcs, dests)\n\n @property\n def robot(self):\n return self._robot\n\n def _sibling_check(self,node):\n for sibling in self.sources + self.destinations:\n if sibling.parent is not node.parent:\n raise ValueError(\"All source/destination nodes must have the same parent.\")\n\n def add_sources(self, *nodes):\n for node in nodes:\n if not isinstance(node, StateNode):\n raise TypeError('%s is not a StateNode' % node)\n self._sibling_check(node)\n node.add_transition(self)\n self.sources.append(node)\n return self\n\n def add_destinations(self, *nodes):\n for node in nodes:\n if not isinstance(node, StateNode):\n raise TypeError('%s is not a StateNode' % node)\n self._sibling_check(node)\n self.destinations.append(node)\n return self\n\n def start(self):\n if self.running: return\n self.handle = None\n if TRACE.trace_level >= TRACE.transition_startstop:\n print('TRACE%d:' % TRACE.transition_startstop, self, 'starting')\n super().start()\n\n def stop(self):\n if self.running:\n # don't stop if we still have a live source\n for src in self.sources:\n if src.running:\n if TRACE.trace_level >= TRACE.transition_startstop:\n print('TRACE%d:' % TRACE.transition_startstop,self,'saved from stopping by',src)\n return\n if TRACE.trace_level >= TRACE.transition_startstop:\n print('TRACE%d:' % TRACE.transition_startstop, self, 'stopping')\n super().stop()\n # stop pending fire2 if fire already stopped this transition\n if self.handle:\n self.handle.cancel()\n if TRACE.trace_level >= TRACE.task_cancel:\n print('TRACE%d:' % TRACE.task_cancel, self.handle, 'cancelled')\n self.handle = None\n\n def fire(self,event=None):\n \"\"\"Shut down source nodes and schedule start of destination nodes.\n Lets the stack unwind by returning before destinations are started.\n Delay also gives time for Cozmo action cancellation to take effect.\"\"\"\n if not self.running: return\n if TRACE.trace_level >= TRACE.transition_fire:\n if event == None:\n evt_desc = ''\n else:\n evt_desc = ' on %s' % event\n print('TRACE%d:' % TRACE.transition_fire, self, 'firing'+evt_desc)\n for src in self.sources:\n src.stop()\n self.stop()\n action_cancel_delay = 0.01 # wait for source node action cancellations to take effect\n self.handle = self.robot.loop.call_later(action_cancel_delay, self.fire2, event)\n\n def fire2(self,event):\n if self.handle is None:\n print('@ @ @ @ @ HANDLE GONE:', self, 'SHOULD BE DEAD', self, event)\n return\n else:\n self.handle = None\n parent = self.sources[0].parent\n if not parent.running:\n # print('@ @ @ @ @ PARENT OF', self, 'IS', parent, 'IS DEAD! event=', event, '%x' % event.__hash__())\n return\n for dest in self.destinations:\n if TRACE.trace_level >= TRACE.transition_fire:\n print('TRACE%d: ' % TRACE.transition_fire, self, 'starting', dest)\n dest.start(event)\n\n default_value_delay = 0.1 # delay before wildcard match will fire\n"
},
{
"alpha_fraction": 0.5391098856925964,
"alphanum_fraction": 0.5536075234413147,
"avg_line_length": 44.9844970703125,
"blob_id": "48f6ed695d4c574a7de53c85de7cfb7b4d4b0329",
"content_id": "10d9a074b4a28cf1d51f8ba899b21929e3cee7fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5932,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 129,
"path": "/cozmo_fsm/cozmo_kin.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from math import pi, tan\n\nimport cozmo\n\nfrom .kine import *\nfrom cozmo_fsm import geometry\nfrom .geometry import tprint, point, translation_part, rotation_part\nfrom .rrt_shapes import *\n\n# ================ Constants ================\n\nwheelbase = 45 # millimeters\nfront_wheel_diameter = 52 # millimeters\nhook_spacing = 35 # millimeters\ncenter_of_rotation_offset = -19.7 # millimeters\n\n# ================================================================\n\nclass CozmoKinematics(Kinematics):\n def __init__(self,robot):\n base_frame = Joint('base',\n description='Base frame: the root of the kinematic tree')\n\n # cor is center of rotation\n cor_frame = Joint('cor', parent=base_frame,\n description='Center of rotation',\n r=-19.,\n collision_model=Rectangle(geometry.point(),\n dimensions=(95,60)))\n\n # Use link instead of joint for world_frame\n world_frame = Joint('world', parent=base_frame, type='world', getter=self.get_world,\n description='World origin in base frame coordinates',\n qmin=None, qmax=None)\n\n front_axle_frame = Joint('front_axle', parent=base_frame,\n description='Center of the front axle',\n d=front_wheel_diameter/2, alpha=pi/2)\n back_axle_frame = Joint('back_axle', parent=base_frame, r=-46., alpha=pi/2)\n\n # This frame is on the midline. Could add separate left and right shoulders.\n # Positive angle is up, so z must point to the right.\n # x is forward, y points up.\n shoulder_frame = Joint('shoulder', parent=base_frame,\n type='revolute', getter=self.get_shoulder,\n description='Rotation axis of the lift; z points to the right',\n qmin=cozmo.robot.MIN_LIFT_ANGLE.radians,\n qmax=cozmo.robot.MAX_LIFT_ANGLE.radians,\n d=21., r=-39., alpha=pi/2)\n\n lift_attach_frame = \\\n Joint('lift_attach', parent=shoulder_frame, type='revolute',\n description='Tip of the lift, where cubes attach; distal end of four-bar linkage',\n getter=self.get_lift_attach, r=66.,\n qmax = - cozmo.robot.MIN_LIFT_ANGLE.radians,\n qmin = - cozmo.robot.MAX_LIFT_ANGLE.radians,\n #collision_model=Circle(geometry.point(), radius=10))\n )\n\n # Positive head angle is up, so z must point to the right.\n # With x pointing forward, y must point up.\n head_frame = Joint('head', parent=base_frame, type='revolute',\n getter=self.get_head,\n description='Axis of head rotation; z points to the right',\n qmin=cozmo.robot.MIN_HEAD_ANGLE.radians,\n qmax=cozmo.robot.MAX_HEAD_ANGLE.radians,\n d=35., r=-10., alpha=pi/2)\n\n # Dummy joint located below head joint at level of the camera frame,\n # and x axis points down, z points forward, y points left\n camera_dummy = Joint('camera_dummy', parent=head_frame,\n description='Dummy joint below the head, at the level of the camera frame',\n theta=-pi/2, r=7.5, alpha=-pi/2)\n # x axis points right, y points down, z points forward\n camera_frame = Joint('camera', parent=camera_dummy,\n description='Camera reference frame; y is down and z is outward',\n d=15., theta=-pi/2)\n\n joints = [base_frame, world_frame, cor_frame,\n front_axle_frame, back_axle_frame,\n shoulder_frame, lift_attach_frame,\n head_frame, camera_dummy, camera_frame]\n\n super().__init__(joints,robot)\n\n def get_head(self):\n return self.robot.head_angle.radians\n\n def get_shoulder(self):\n # Formula supplied by Mark Wesley at Anki\n # Check SDK documentation for new lift-related calls that might replace this\n return math.asin( (self.robot.lift_height.distance_mm-45.0) / 66.0)\n\n def get_lift_attach(self):\n return -self.get_shoulder()\n\n def get_world(self):\n return self.robot.world.particle_filter.pose_estimate()\n\n def project_to_ground(self,cx,cy):\n \"Converts camera coordinates to a ground point in the base frame.\"\n # Formula taken from Tekkotsu's projectToGround method\n camera_res = (320, 240)\n half_camera_max = max(*camera_res) / 2\n config = self.robot.camera.config\n # Convert to generalized coordinates in range [-1, 1]\n gx = (cx-config.center.x) / half_camera_max\n gy = (cy-config.center.y) / half_camera_max\n #tekkotsu_focal_length_x = camera_res[0]/camera_max / tan(config.fov_x.radians/2)\n #tekkotsu_focal_length_y = camera_res[1]/camera_max / tan(config.fov_y.radians/2)\n # Generate a ray in the camera frame\n rx = gx / (config.focal_length.x / half_camera_max)\n ry = gy / (config.focal_length.y / half_camera_max)\n ray = point(rx,ry,1)\n\n cam_to_base = self.robot.kine.joint_to_base('camera')\n offset = translation_part(cam_to_base)\n rot_ray = rotation_part(cam_to_base).dot(ray)\n dist = - offset[2,0]\n align = rot_ray[2,0]\n\n if abs(align) > 1e-5:\n s = dist / align\n hit = point(rot_ray[0,0]*s, rot_ray[1,0]*s, rot_ray[2,0]*s) + offset\n elif align * dist < 0:\n hit = point(-rot_ray[0,0], -rot_ray[1,0], -rot_ray[2,0], abs(align))\n else:\n hit = point(rot_ray[0,0], rot_ray[1,0], rot_ray[2,0], abs(align))\n return hit\n"
},
{
"alpha_fraction": 0.6007992625236511,
"alphanum_fraction": 0.6198934316635132,
"avg_line_length": 39.21428680419922,
"blob_id": "2b4d8a59509e0af95d9cec11467056ee5b3576ef",
"content_id": "529b27c4836092f84e491edd24d1310b67001f4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2252,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 56,
"path": "/cozmo_fsm/examples/Iteration.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Iteration.fsm demonstrates nested iteration using the Iterate node\n and the =CNext=> transition, which waits for completion before advancing\n the iterator. Use =Next=> if the source nodes don't need to\n complete.\n\"\"\"\n\nfrom cozmo_fsm import *\n\nclass PrintIt(StateNode):\n def start(self,event=None):\n if self.running: return\n super().start(event)\n if isinstance(event,DataEvent):\n print('I got some data: ', event.data)\n\nclass Iteration(StateMachineProgram):\n def setup(self):\n \"\"\"\n \touter_loop: Iterate(['alpha', 'bravo', 'charlie'])\n \touter_loop =SayData=> Say() =C=> inner_loop\n \n inner_loop: Iterate(4) =D=> PrintIt() =Next=> inner_loop\n # When inner iteration is done, it posts a completion event.\n inner_loop =CNext=> outer_loop\n \n \touter_loop =C=> Say('Done')\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:13:49 2020:\n \n outer_loop = Iterate(['alpha', 'bravo', 'charlie']) .set_name(\"outer_loop\") .set_parent(self)\n say1 = Say() .set_name(\"say1\") .set_parent(self)\n inner_loop = Iterate(4) .set_name(\"inner_loop\") .set_parent(self)\n printit1 = PrintIt() .set_name(\"printit1\") .set_parent(self)\n say2 = Say('Done') .set_name(\"say2\") .set_parent(self)\n \n saydatatrans1 = SayDataTrans() .set_name(\"saydatatrans1\")\n saydatatrans1 .add_sources(outer_loop) .add_destinations(say1)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(say1) .add_destinations(inner_loop)\n \n datatrans1 = DataTrans() .set_name(\"datatrans1\")\n datatrans1 .add_sources(inner_loop) .add_destinations(printit1)\n \n nexttrans1 = NextTrans() .set_name(\"nexttrans1\")\n nexttrans1 .add_sources(printit1) .add_destinations(inner_loop)\n \n cnexttrans1 = CNextTrans() .set_name(\"cnexttrans1\")\n cnexttrans1 .add_sources(inner_loop) .add_destinations(outer_loop)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(outer_loop) .add_destinations(say2)\n \n return self\n"
},
{
"alpha_fraction": 0.6045751571655273,
"alphanum_fraction": 0.6214178204536438,
"avg_line_length": 42.2282600402832,
"blob_id": "a3e86acf37f2e7ad277a5ca236206dce64d79c9a",
"content_id": "131d8ebff731c0f297325bff528fcbce4f0bdc31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3978,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 92,
"path": "/cozmo_fsm/examples/Randomness.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Randomness.fsm demonstrates three ways to introduce randomness in\n state machine behavior.\n\n 1) Use the =RND=> transition to select a destination node at random.\n\n 2) Pass a list of utterances to Say(), and it will choose one at\n random.\n\n 3) Specialize a node class such as Forward or Turn and use Python's\n random() function to generate a random value for the node's\n parameter.\n\n\"\"\"\n\nimport random\n\nfrom cozmo_fsm import *\n\nclass RandomForward(Forward):\n \"\"\"Move forward a random distance.\"\"\"\n def __init__(self,mindist=10,maxdist=50,**kwargs):\n super().__init__(**kwargs)\n self.mindist = mindist if isinstance(mindist,Distance) else distance_mm(mindist)\n self.maxdist = maxdist if isinstance(maxdist,Distance) else distance_mm(maxdist)\n\n def start(self,event=None):\n self.distance = distance_mm(self.mindist.distance_mm +\n random.random() * (self.maxdist.distance_mm - self.mindist.distance_mm))\n super().start(event)\n\nclass RandomTurn(Turn):\n \"\"\"Turn by a random amount.\"\"\"\n def __init__(self,minangle=20,maxangle=170,**kwargs):\n super().__init__(**kwargs)\n self.minangle = minangle if isinstance(minangle,Angle) else degrees(minangle)\n self.maxangle = maxangle if isinstance(maxangle,Angle) else degrees(maxangle)\n\n def start(self,event=None):\n angle = self.minangle.degrees + random.random()*(self.maxangle.degrees - self.minangle.degrees)\n self.angle = degrees(angle) if random.random()>=0.5 else degrees(-angle)\n super().start(event)\n\nclass Randomness(StateMachineProgram):\n def setup(self):\n \"\"\"\n startnode: StateNode() =RND=> {fwd, fwd, turn, turn, joke}\n \n fwd: Say([\"Forward\", \"Straight\", \"Full steam ahead\"])\n =T(2)=> RandomForward() =T(2)=> startnode\n \n turn: Say([\"Turn\", \"Rotate\", \"Yaw\"])\n =T(2)=> RandomTurn() =C=> startnode\n \n joke: Say([\"Watch this\", \"Hold my beer\", \"I'm not lost\",\n \"Be cool\", \"Wanna race?\"])\n =C=> StateNode() =T(2)=> startnode\n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:16:24 2020:\n \n startnode = StateNode() .set_name(\"startnode\") .set_parent(self)\n fwd = Say([\"Forward\", \"Straight\", \"Full steam ahead\"]) .set_name(\"fwd\") .set_parent(self)\n randomforward1 = RandomForward() .set_name(\"randomforward1\") .set_parent(self)\n turn = Say([\"Turn\", \"Rotate\", \"Yaw\"]) .set_name(\"turn\") .set_parent(self)\n randomturn1 = RandomTurn() .set_name(\"randomturn1\") .set_parent(self)\n joke = Say([\"Watch this\", \"Hold my beer\", \"I'm not lost\",\n \"Be cool\", \"Wanna race?\"]) .set_name(\"joke\") .set_parent(self)\n statenode1 = StateNode() .set_name(\"statenode1\") .set_parent(self)\n \n randomtrans1 = RandomTrans() .set_name(\"randomtrans1\")\n randomtrans1 .add_sources(startnode) .add_destinations(fwd,fwd,turn,turn,joke)\n \n timertrans1 = TimerTrans(2) .set_name(\"timertrans1\")\n timertrans1 .add_sources(fwd) .add_destinations(randomforward1)\n \n timertrans2 = TimerTrans(2) .set_name(\"timertrans2\")\n timertrans2 .add_sources(randomforward1) .add_destinations(startnode)\n \n timertrans3 = TimerTrans(2) .set_name(\"timertrans3\")\n timertrans3 .add_sources(turn) .add_destinations(randomturn1)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(randomturn1) .add_destinations(startnode)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(joke) .add_destinations(statenode1)\n \n timertrans4 = TimerTrans(2) .set_name(\"timertrans4\")\n timertrans4 .add_sources(statenode1) .add_destinations(startnode)\n \n return self\n\n"
},
{
"alpha_fraction": 0.528537929058075,
"alphanum_fraction": 0.5458952188491821,
"avg_line_length": 35.747127532958984,
"blob_id": "ab1575d42c78997ab48ae7818f51435e0f032fad",
"content_id": "580823310bbd743af2ea58a2100865c6d389c974",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6395,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 174,
"path": "/cozmo_fsm/rrt_shapes.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from cozmo_fsm import geometry\nfrom math import sqrt, pi, atan2\nimport numpy as np\n\nclass Shape():\n def __init__(self, center=geometry.point()):\n if center is None: raise ValueError()\n self.center = center\n self.rotmat = geometry.identity()\n self.obstacle_id = None # only store the string, so shape is pickle-able\n \n def __repr__(self):\n return \"<%s >\" % (self.__class__.__name__)\n\n def collides(self, shape):\n if isinstance(shape, Rectangle):\n return self.collides_rect(shape)\n elif isinstance(shape, Polygon):\n return self.collides_poly(shape)\n elif isinstance(shape, Circle):\n return self.collides_circle(shape)\n elif isinstance(shape, Compound):\n return shape.collides(self)\n else:\n raise Exception(\"%s has no collides() method defined for %s.\" % (self, shape))\n\n def get_bounding_box(self):\n \"\"\"Should return ((xmin,ymin), (xmax,ymax))\"\"\"\n raise NotImplementedError(\"get_bounding_box\")\n\n#================ Basic Shapes ================\n\nclass Circle(Shape):\n def __init__(self, center=geometry.point(), radius=25/2):\n super().__init__(center)\n self.radius = radius\n self.orient = 0.\n\n def __repr__(self):\n id = self.obstacle_id if self.obstacle_id else '[no obstacle]'\n return '<Circle (%.1f,%.1f) r=%.1f %s>' % \\\n (self.center[0,0], self.center[1,0], self.radius, id)\n\n def instantiate(self, tmat):\n return Circle(center=tmat.dot(self.center), radius=self.radius) \n\n def collides_rect(self,rect):\n return rect.collides_circle(self)\n \n def collides_poly(self,poly):\n return poly.collides(self)\n\n def collides_circle(self,circle):\n dx = self.center[0,0] - circle.center[0,0]\n dy = self.center[1,0] - circle.center[1,0]\n dist = sqrt(dx*dx + dy*dy)\n return dist < (self.radius + circle.radius)\n \n def get_bounding_box(self):\n xmin = self.center[0,0] - self.radius\n xmax = self.center[0,0] + self.radius\n ymin = self.center[1,0] - self.radius\n ymax = self.center[1,0] + self.radius\n return ((xmin,ymin), (xmax,ymax))\n\nclass Polygon(Shape):\n def __init__(self, vertices=None, orient=0):\n center = vertices.mean(1)\n center.resize(4,1)\n super().__init__(center)\n self.vertices = vertices\n self.orient = orient # should move vertex rotation code from Rectangle to here\n N = vertices.shape[1]\n self.edges = tuple( (vertices[:,i:i+1], vertices[:,(i+1)%N:((i+1)%N)+1])\n for i in range(N) )\n\n def get_bounding_box(self):\n mins = self.vertices.min(1)\n maxs = self.vertices.max(1)\n xmin = mins[0]\n ymin = mins[1]\n xmax = maxs[0]\n ymax = maxs[1]\n return ((xmin,ymin), (xmax,ymax))\n\n def collides_poly(self,poly):\n raise NotImplementedError()\n\n def collides_circle(self,circle):\n raise NotImplementedError()\n\n\nclass Rectangle(Polygon):\n def __init__(self, center=None, dimensions=None, orient=0):\n self.dimensions = dimensions\n self.orient = orient\n if not isinstance(dimensions[0],(float,int)):\n raise ValueError(dimensions)\n dx2 = dimensions[0]/2\n dy2 = dimensions[1]/2\n relative_vertices = np.array([[-dx2, dx2, dx2, -dx2 ],\n [-dy2, -dy2, dy2, dy2 ],\n [ 0, 0, 0, 0 ],\n [ 1, 1, 1, 1 ]])\n self.unrot = geometry.aboutZ(-orient)\n center_ex = self.unrot.dot(center)\n extents = geometry.translate(center_ex[0,0],center_ex[1,0]).dot(relative_vertices)\n # Extents measured along the rectangle's axes, not world axes\n self.min_Ex = min(extents[0,:])\n self.max_Ex = max(extents[0,:])\n self.min_Ey = min(extents[1,:])\n self.max_Ey = max(extents[1,:])\n vertices = geometry.translate(center[0,0],center[1,0]).dot(\n geometry.aboutZ(orient).dot(relative_vertices))\n super().__init__(vertices=vertices, orient=orient)\n\n def __repr__(self):\n id = self.obstacle_id if self.obstacle_id else '[no obstacle]'\n return '<Rectangle (%.1f,%.1f) %.1fx%.1f %.1f deg %s>' % \\\n (self.center[0,0],self.center[1,0],*self.dimensions,\n self.orient*(180/pi), id)\n\n def instantiate(self, tmat):\n dimensions = (self.max_Ex-self.min_Ex, self.max_Ey-self.min_Ey)\n rot = atan2(tmat[1,0], tmat[0,0])\n return Rectangle(center = tmat.dot(self.center),\n orient = rot + self.orient,\n dimensions = dimensions)\n\n def collides_rect(self,other):\n # Test others edges in our reference frame\n o_verts = self.unrot.dot(other.vertices)\n o_min_x = min(o_verts[0,:])\n o_max_x = max(o_verts[0,:])\n o_min_y = min(o_verts[1,:])\n o_max_y = max(o_verts[1,:])\n if o_max_x <= self.min_Ex or self.max_Ex <= o_min_x or \\\n o_max_y <= self.min_Ey or self.max_Ey <= o_min_y:\n return False\n\n if self.orient == other.orient: return True\n\n # Test our edges in other's reference frame\n s_verts = other.unrot.dot(self.vertices)\n s_min_x = min(s_verts[0,:])\n s_max_x = max(s_verts[0,:])\n s_min_y = min(s_verts[1,:])\n s_max_y = max(s_verts[1,:])\n if s_max_x <= other.min_Ex or other.max_Ex <= s_min_x or \\\n s_max_y <= other.min_Ey or other.max_Ey <= s_min_y:\n return False\n return True\n \n def collides_circle(self,circle):\n p = self.unrot.dot(circle.center)[0:2,0]\n pmin = p - circle.radius\n pmax = p + circle.radius\n if pmax[0] <= self.min_Ex or self.max_Ex <= pmin[0] or \\\n pmax[1] <= self.min_Ey or self.max_Ey <= pmin[1]:\n return False\n # Need corner tests here\n return True\n\n#================ Compound Shapes ================\n\nclass Compound(Shape):\n def __init__(self, shapes=[]):\n self.shapes = shapes\n\n def collides(self,shape):\n for s in self.shapes:\n if s.collides(shape):\n return True\n return False\n\n"
},
{
"alpha_fraction": 0.6044031977653503,
"alphanum_fraction": 0.6048968434333801,
"avg_line_length": 32.3190803527832,
"blob_id": "affa5c204403339e6841c4cad1720e6ebf480a37",
"content_id": "8b22323e47897b79dffcdbeab826d0ddbd85e105",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10129,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 304,
"path": "/cozmo_fsm/transitions.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import random\nimport re\n\nfrom .base import *\nfrom .events import *\nfrom .nodes import Say, Iterate\n\nclass NullTrans(Transition):\n \"\"\"Transition fires immediately; does not require an event to trigger it.\"\"\"\n def start(self):\n if self.running: return\n super().start()\n # Don't fire immediately on start because the source node(s) may\n # have other startup calls to make. Give them time to finish.\n self.handle = self.robot.loop.call_soon(self.fire)\n\n def stop(self):\n if self.handle:\n print(self, 'cancelling', self.handle)\n self.handle.cancel()\n self.handle = None\n super().stop()\n\n def fire(self, event=None):\n self.handle = None\n super().fire(event)\n\n\nclass CSFEventBase(Transition):\n \"\"\"Base class for Completion, Success, and Failure Events\"\"\"\n def __init__(self,event_type,count=None):\n super().__init__()\n self.event_type = event_type\n self.count = count\n\n def start(self):\n if self.running: return\n super().start()\n self.observed_sources = set()\n for source in self.sources:\n self.robot.erouter.add_listener(self, self.event_type, source)\n\n def handle_event(self,event):\n if not self.running:\n print('***',self,'got an event ', event, ' while not running!')\n return\n if TRACE.trace_level >= TRACE.listener_invocation:\n print('TRACE%d: %s is handling %s' %\n (TRACE.listener_invocation, self,event))\n super().handle_event(event)\n if isinstance(event, self.event_type):\n self.observed_sources.add(event.source)\n if len(self.observed_sources) >= (self.count or len(self.sources)):\n self.fire(event)\n else:\n raise ValueError(\"%s can't handle %s\" % (self.event_type, event))\n\nclass CompletionTrans(CSFEventBase):\n \"\"\"Transition fires when source nodes complete.\"\"\"\n def __init__(self,count=None):\n super().__init__(CompletionEvent,count)\n\nclass SuccessTrans(CSFEventBase):\n \"\"\"Transition fires when source nodes succeed.\"\"\"\n def __init__(self,count=None):\n super().__init__(SuccessEvent,count)\n\nclass FailureTrans(CSFEventBase):\n \"\"\"Transition fires when source nodes fail.\"\"\"\n def __init__(self,count=None):\n super().__init__(FailureEvent,count)\n\nclass CNextTrans(CSFEventBase):\n \"\"\"Transition fires when source nodes complete.\"\"\"\n def __init__(self,count=None):\n super().__init__(CompletionEvent,count)\n\n def fire(self, event=None):\n super().fire(Iterate.NextEvent())\n\n\nclass NextTrans(Transition):\n \"\"\"Transition sends a NextEvent to its target nodes to advance an iterator.\"\"\"\n def start(self, event=None):\n super().start()\n self.fire(Iterate.NextEvent())\n\n\nclass SayDataTrans(Transition):\n \"\"\"Converts a DataEvent to Say.SayDataEvent so we can speak the data.\"\"\"\n def start(self):\n if self.running: return\n super().start()\n for source in self.sources:\n self.robot.erouter.add_listener(self, DataEvent, source)\n self.robot.erouter.add_listener(self, Say.SayDataEvent, source)\n\n def handle_event(self,event):\n if not self.running: return\n super().handle_event(event)\n if isinstance(event, Say.SayDataEvent):\n say_data_event = event\n elif isinstance(event, DataEvent):\n say_data_event = Say.SayDataEvent(event.data)\n else:\n return\n self.fire(say_data_event) \n\n\nclass TimerTrans(Transition):\n \"\"\"Transition fires when the timer has expired.\"\"\"\n def __init__(self,duration=None):\n if not isinstance(duration, (int, float)) or duration < 0:\n raise ValueError(\"TimerTrans requires a positive number for duration, not %s\" % duration)\n super().__init__()\n self.set_polling_interval(duration)\n\n def poll(self):\n if not self.running: return\n self.fire()\n\n\nclass TapTrans(Transition):\n \"\"\"Transition fires when a cube is tapped.\"\"\"\n def __init__(self,cube=None):\n super().__init__()\n self.cube = cube\n\n def start(self):\n if self.running: return\n super().start()\n if self.cube:\n self.robot.erouter.add_listener(self,TapEvent,self.cube)\n else:\n self.robot.erouter.add_wildcard_listener(self,TapEvent,None)\n\n def handle_event(self,event):\n if not self.running: return\n if self.cube:\n self.fire(event)\n else:\n self.handle = \\\n self.robot.loop.call_later(Transition.default_value_delay, self.fire, event)\n\n\nclass ObservedMotionTrans(Transition):\n \"\"\"Transition fires when motion is observed in the camera image.\"\"\"\n def start(self):\n if self.running: return\n super().start()\n self.robot.erouter.add_listener(self,ObservedMotionEvent,None)\n\n def handle_event(self,event):\n if not self.running: return\n super().handle_event(event)\n self.fire(event)\n\n\nclass UnexpectedMovementTrans(Transition):\n \"\"\"Transition fires when unexpected movement is detected.\"\"\"\n def start(self):\n if self.running: return\n super().start()\n self.robot.erouter.add_listener(self,UnexpectedMovementEvent,None)\n\n def handle_event(self,event):\n if not self.running: return\n super().handle_event(event)\n self.fire(event)\n\n\nclass DataTrans(Transition):\n \"\"\"Transition fires when data matches.\"\"\"\n def __init__(self, data=None):\n super().__init__()\n self.data = data\n\n def start(self):\n if self.running: return\n super().start()\n for source in self.sources:\n if self.data is None or isinstance(self.data, type):\n self.robot.erouter.add_wildcard_listener(self, DataEvent, source)\n else:\n self.robot.erouter.add_listener(self, DataEvent, source)\n\n def handle_event(self,event):\n if not self.running: return\n super().handle_event(event)\n if isinstance(event,DataEvent):\n if self.data is None or \\\n (isinstance(self.data, type) and isinstance(event.data, self.data)):\n self.fire(event)\n else:\n try:\n if self.data == event.data: # error if == barfs\n self.fire(event)\n except TypeError: pass\n else:\n raise TypeError('%s is not a DataEvent' % event)\n\nclass ArucoTrans(Transition):\n \"\"\"Fires if one of the specified markers is visible\"\"\"\n def __init__(self,marker_ids=None):\n super().__init__()\n self.polling_interval = 0.1\n if isinstance(marker_ids,(list,tuple)):\n marker_ids = set(marker_ids)\n self.marker_ids = marker_ids\n\n def poll(self,event=None):\n if not self.running: return\n if self.marker_ids is None:\n if self.robot.world.aruco.seen_marker_ids != []:\n self.fire()\n elif isinstance(self.marker_ids,set):\n if self.marker_ids.intersection(self.robot.world.aruco.seen_marker_ids) != set():\n self.fire()\n elif self.marker_ids in self.robot.world.aruco.seen_marker_ids:\n self.fire()\n\n\nclass PatternMatchTrans(Transition):\n wildcard = re.compile('.*')\n \n def __init__(self, pattern=None, event_type=None):\n super().__init__()\n if pattern:\n pattern = re.compile(pattern)\n self.pattern = pattern\n self.event_type = event_type\n \n def start(self):\n if self.running: return\n super().start()\n if self.pattern is None:\n self.robot.erouter.add_wildcard_listener(self, self.event_type, None)\n else:\n self.robot.erouter.add_listener(self, self.event_type, None)\n\n def handle_event(self,event):\n if not self.running: return\n super().handle_event(event)\n if self.pattern is None:\n result = self.wildcard.match(event.string)\n else:\n result = self.pattern.match(event.string)\n if result:\n match_event = self.event_type(event.string,event.words,result)\n self.fire(match_event)\n\nclass TextMsgTrans(PatternMatchTrans):\n \"\"\"Transition fires when text message event matches pattern.\"\"\"\n def __init__(self,pattern=None):\n super().__init__(pattern,TextMsgEvent)\n\n\nclass HearTrans(PatternMatchTrans):\n \"\"\"Transition fires if speech event matches pattern.\"\"\"\n def __init__(self,pattern=None):\n super().__init__(pattern,SpeechEvent)\n\n\nclass PilotTrans(Transition):\n \"\"\"Fires if a matching PilotEvent is observed.\"\"\"\n def __init__(self,status=None):\n super().__init__()\n self.status = status\n\n def start(self):\n if self.running: return\n super().start()\n for source in self.sources:\n if self.status == None:\n self.robot.erouter.add_wildcard_listener(self, PilotEvent, source)\n else:\n self.robot.erouter.add_listener(self, PilotEvent, source)\n\n def handle_event(self,event):\n if not self.running: return\n super().handle_event(event)\n if self.status == None or self.status == event.status:\n self.fire(event)\n \n\nclass RandomTrans(Transition):\n \"\"\"Picks a destination node at random.\"\"\"\n def start(self):\n if self.running: return\n super().start()\n # Don't fire immediately on start because the source node(s) may\n # have other startup calls to make. Give them time to finish.\n self.handle = self.robot.loop.call_soon(self.fire) # okay to use Transition.fire\n\n def stop(self):\n if self.handle:\n self.handle.cancel()\n self.handle = None\n super().stop()\n\n def fire2(self,event):\n \"\"\"Overrides Transition.fire2 to only start one randomly-chosen destination node.\"\"\"\n dest = random.choice(self.destinations)\n dest.start(event)\n"
},
{
"alpha_fraction": 0.5288984775543213,
"alphanum_fraction": 0.5422569513320923,
"avg_line_length": 40.71574783325195,
"blob_id": "6597b16e564406f1ac25f180b14dab26f1d4ba4f",
"content_id": "497c965f1634e33e789fba22809949367b3314d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 40798,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 978,
"path": "/cozmo_fsm/worldmap.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from math import pi, inf, sin, cos, tan, atan2, sqrt\nimport time\n\nfrom cozmo.faces import Face\nfrom cozmo.objects import LightCube, CustomObject\nfrom cozmo.util import Pose\n\nfrom . import evbase\nfrom . import geometry\nfrom . import custom_objs\nfrom .geometry import wrap_angle, quat2rot, quaternion_to_euler_angle, get_orientation_state\n\nimport math\nimport numpy as np\n\nclass WorldObject():\n def __init__(self, id=None, x=0, y=0, z=0, is_visible=None):\n self.id = id\n self.x = x\n self.y = y\n self.z = z\n self.is_fixed = False # True for walls and markers in predefined maps\n self.is_obstacle = True\n if is_visible is not None:\n self.is_visible = is_visible\n self.sdk_obj = None\n self.update_from_sdk = False\n self.is_foreign = False\n if is_visible:\n self.pose_confidence = +1\n else:\n self.pose_confidence = -1\n\nclass LightCubeObj(WorldObject):\n light_cube_size = (44., 44., 44.)\n\n def __init__(self, sdk_obj, id=None, x=0, y=0, z=0, theta=0):\n if id is None:\n id = 'Cube-' + str(sdk_obj.cube_id)\n super().__init__(id,x,y,z)\n self.sdk_obj = sdk_obj\n if sdk_obj and sdk_obj.pose:\n self.sdk_obj.wm_obj = self\n self.update_from_sdk = True\n self.orientation, _, _, self.theta = get_orientation_state(self.sdk_obj.pose.rotation.q0_q1_q2_q3)\n else:\n self.theta = theta\n self.orientation = geometry.ORIENTATION_UPRIGHT\n self.size = self.light_cube_size\n\n @property\n def is_visible(self):\n return self.sdk_obj and self.sdk_obj.is_visible\n\n def get_bounding_box(self):\n s = self.light_cube_size[0]\n pts = np.array([[-s/2, -s/2, s/2, s/2],\n [-s/2, s/2, -s/2, s/2],\n [ 0, 0, 0, 0 ],\n [ 1, 1, 1, 1 ]])\n pts = geometry.aboutZ(self.theta).dot(pts)\n pts = geometry.translate(self.x, self.y).dot(pts)\n mins = pts.min(1)\n maxs = pts.max(1)\n xmin = mins[0]\n ymin = mins[1]\n xmax = maxs[0]\n ymax = maxs[1]\n return ((xmin,ymin), (xmax,ymax))\n\n def __repr__(self):\n if self.pose_confidence >= 0:\n vis = ' visible' if self.sdk_obj and self.is_visible else ''\n return '<LightCubeObj %s: (%.1f, %.1f, %.1f) @ %d deg.%s %s>' % \\\n (self.id, self.x, self.y, self.z, self.theta*180/pi, vis, self.orientation)\n else:\n return '<LightCubeObj %s: position unknown>' % self.id\n\n\nclass ChargerObj(WorldObject):\n charger_size = (104, 98, 10)\n\n def __init__(self, sdk_obj, id=None, x=0, y=0, z=0, theta=0):\n if id is None:\n id = 'Charger'\n super().__init__(id,x,y,z)\n self.sdk_obj = sdk_obj\n if sdk_obj:\n self.sdk_obj.wm_obj = self\n self.update_from_sdk = True\n self.orientation = geometry.ORIENTATION_UPRIGHT\n self.theta = theta\n self.size = self.charger_size\n if self.sdk_obj and self.sdk_obj.pose:\n self.orientation, _, _, self.theta = get_orientation_state(self.sdk_obj.pose.rotation.q0_q1_q2_q3)\n\n @property\n def is_visible(self):\n return self.sdk_obj and self.sdk_obj.is_visible\n\n def __repr__(self):\n if self.pose_confidence >= 0:\n vis = ' visible' if self.sdk_obj and self.is_visible else ''\n return '<ChargerObj: (%.1f, %.1f, %.1f) @ %d deg.%s %s>' % \\\n (self.x, self.y, self.z, self.theta*180/pi, vis, self.orientation)\n else:\n return '<ChargerObj: position unknown>'\n\n\nclass CustomMarkerObj(WorldObject):\n custom_marker_size = (4,44,44)\n\n def __init__(self, sdk_obj, id=None, x=0, y=0, z=0, theta=0, rotation=0):\n # 'theta' is orientation relative to North in particle filter reference frame\n # 'rotation' is orientation relative to \"up\" in the camera image\n if id is None:\n custom_type = sdk_obj.object_type.name[-2:]\n id = 'CustomMarkerObj-' + str(custom_type)\n super().__init__(id,x,y,z)\n self.theta = wrap_angle(theta)\n self.sdk_obj = sdk_obj\n self.marker_number = int(id[-2:])\n self.size = self.custom_marker_size\n if self.sdk_obj:\n self.orientation, self.rotation, _, _ = \\\n get_orientation_state(self.sdk_obj.pose.rotation.q0_q1_q2_q3, True)\n else:\n self.rotation = wrap_angle(rotation)\n if abs(self.rotation) < 0.1:\n self.orientation = geometry.ORIENTATION_UPRIGHT\n elif abs(self.rotation-pi/2) < 0.1:\n self.orientation = geometry.ORIENTATION_LEFT\n elif abs(self.rotation+pi/2) < 0.1:\n self.orientation = geometry.ORIENTATION_RIGHT\n elif abs(wrap_angle(self.rotation+pi)) < 0.1:\n self.orientation = geometry.ORIENTATION_INVERTED\n else:\n self.orientation = geometry.ORIENTATION_TILTED\n\n @property\n def is_visible(self):\n return self.sdk_obj and self.sdk_obj.is_visible\n\n def get_bounding_box(self):\n sx,sy,sz = self.size\n pts = np.array([[ 0, 0, sx, sx],\n [-sy/2, sy/2, sy/2, -sy/2],\n [ 0, 0, 0, 0 ],\n [ 1, 1, 1, 1 ]])\n pts = geometry.aboutZ(self.theta).dot(pts)\n pts = geometry.translate(self.x, self.y).dot(pts)\n mins = pts.min(1)\n maxs = pts.max(1)\n xmin = mins[0]\n ymin = mins[1]\n xmax = maxs[0]\n ymax = maxs[1]\n return ((xmin,ymin), (xmax,ymax))\n\n def __repr__(self):\n if self.sdk_obj:\n vis = ' visible' if self.is_visible else ''\n return '<CustomMarkerObj-%s %d: (%.1f,%.1f) @ %d deg.%s %s>' % \\\n (self.sdk_obj.object_type.name[-2:], self.sdk_obj.object_id,\n self.x, self.y, self.theta*180/pi, vis, self.orientation)\n else:\n return '<CustomMarkerObj %s (%.1f,%.1f) %s>' % \\\n (self.id, self.x, self.y, self.orientation)\n\n\nclass CustomCubeObj(WorldObject):\n # *** TODO: add self.rotation and self.orientation similar to CustomMarkerObj\n def __init__(self, sdk_obj, id=None, x=0, y=0, z=0, theta=0, size=None):\n custom_type = sdk_obj.object_type.name[-2:]\n if id is None:\n id = 'CustomCubeObj-' + str(custom_type)\n super().__init__(id,x,y,z)\n self.sdk_obj = sdk_obj\n self.update_from_sdk = True\n self.theta = theta\n self.custom_type = custom_type\n if (size is None) and isinstance(id, CustomObject):\n self.size = (id.x_size_mm, id.y_size_mm, id.z_size_mm)\n elif size:\n self.size = size\n else:\n self.size = (50., 50., 50.)\n\n @property\n def is_visible(self):\n return self.sdk_obj and self.sdk_obj.is_visible\n\n def __repr__(self):\n vis = ' visible' if self.sdk_obj and self.is_visible else ''\n return '<CustomCubeObj-%s %d: (%.1f,%.1f, %.1f) @ %d deg.%s>' % \\\n (self.sdk_obj.object_type.name[-2:], self.sdk_obj.object_id,\n self.x, self.y, self.z, self.theta*180/pi, vis)\n\n\nclass ArucoMarkerObj(WorldObject):\n def __init__(self, aruco_parent, marker_number, id=None, x=0, y=0, z=0, theta=0):\n if id is None:\n id = 'Aruco-' + str(marker_number)\n super().__init__(id,x,y,z)\n self.aruco_parent = aruco_parent\n self.marker_number = marker_number\n self.theta = theta\n self.pose_confidence = +1\n\n @property\n def is_visible(self):\n return self.marker_number in self.aruco_parent.seen_marker_ids\n\n def __repr__(self):\n if self.pose_confidence >= 0:\n vis = ' visible' if self.is_visible else ''\n fix = ' fixed' if self.is_fixed else ''\n return '<ArucoMarkerObj %d: (%.1f, %.1f, %.1f) @ %d deg.%s%s>' % \\\n (self.marker_number, self.x, self.y, self.z, self.theta*180/pi, fix, vis)\n else:\n return '<ArucoMarkerObj %d: position unknown>' % self.marker_number\n\n\nclass WallObj(WorldObject):\n def __init__(self, id=None, x=0, y=0, theta=0, length=100, height=150,\n door_width=75, door_height=105, marker_specs=dict(),\n doorways=[], door_ids=[], is_foreign=False, is_fixed=False,\n wall_spec=None, spec_id=None):\n if wall_spec:\n spec_id = wall_spec.spec_id\n length = wall_spec.length\n height = wall_spec.height\n door_width = wall_spec.door_width\n door_height = wall_spec.door_height\n marker_specs = wall_spec.marker_specs.copy()\n doorways = wall_spec.doorways.copy()\n door_ids = wall_spec.door_ids.copy()\n if id:\n self.wall_label = id[1+id.rfind('-'):]\n else:\n if len(marker_specs) > 0:\n k = list(marker_specs.keys())\n k.sort()\n self.wall_label = k[0][1+k[0].rfind('-'):]\n id = 'Wall-%s' % self.wall_label\n elif wall_spec and wall_spec.label:\n self.wall_label = wall_spec.label\n id = 'Wall-%s' % wall_spec.label\n else:\n raise ValueError('id (e.g., \"A\") must be supplied if wall has no markers')\n super().__init__(id, x, y)\n self.z = height/2\n self.theta = theta\n self.spec_id = spec_id\n self.length = length\n self.height = height\n self.door_width = door_width\n self.door_height = door_height\n self.marker_specs = marker_specs\n self.doorways = doorways\n self.door_ids = door_ids\n self.is_foreign = is_foreign\n self.is_fixed = is_fixed\n self.pose_confidence = +1\n\n def update(self, world_map, x=0, y=0, theta=0):\n # Used instead of making new object for efficiency\n self.x = x\n self.y = y\n self.theta = theta\n\n def make_doorways(self, world_map):\n index = 0\n wall = self\n for index in range(len(self.doorways)):\n doorway = DoorwayObj(wall, index)\n doorway.pose_confidence = +1\n world_map.objects[doorway.id] = doorway\n\n def make_arucos(self, world_map):\n \"Called by add_fixed_landmark to make fixed aruco markers.\"\n for key,value in self.marker_specs.items():\n # Project marker onto the wall; move marker if it already exists\n marker = world_map.objects.get(key, None)\n if marker is None:\n marker_number = int(key[1+key.rfind('-'):])\n marker = ArucoMarkerObj(world_map.robot.world.aruco, marker_number=marker_number)\n world_map.objects[marker.id] = marker\n wall_xyz = geometry.point(self.length/2 - value[1][0], 0, value[1][1])\n s = 0 if value[0] == +1 else pi\n rel_xyz = geometry.aboutZ(self.theta+s).dot(wall_xyz)\n marker.x = self.x + rel_xyz[1][0]\n marker.y = self.y + rel_xyz[0][0]\n marker.z = rel_xyz[2][0]\n marker.theta = wrap_angle(self.theta + s)\n marker.is_fixed = self.is_fixed\n\n @property\n def is_visible(self):\n try:\n seen_marker_keys = [('Aruco-%d' % id) for id in evbase.robot_for_loading.world.aruco.seen_marker_ids]\n except:\n return True\n for m in self.marker_specs.keys():\n if m in seen_marker_keys:\n return True\n return False\n\n def __repr__(self):\n if self.pose_confidence >= 0:\n vis = ' visible' if self.is_visible else ''\n fix = ' fixed' if self.is_fixed else ''\n return '<WallObj %s: (%.1f,%.1f) @ %d deg. for %.1f%s%s>' % \\\n (self.id, self.x, self.y, self.theta*180/pi, self.length, fix, vis)\n else:\n return '<WallObj %s: position unknown>' % self.id\n\nclass DoorwayObj(WorldObject):\n def __init__(self, wall, index):\n self.marker_ids = wall.door_ids[index]\n id = 'Doorway-' + str(self.marker_ids[0])\n super().__init__(id,0,0)\n self.theta = wall.theta\n self.wall = wall\n self.door_width = wall.door_width\n self.index = index # which doorway is this? 0, 1, ...\n self.is_obstacle = False\n self.update()\n\n def update(self):\n bignum = 1e6\n self.theta = self.wall.theta\n m = max(-bignum, min(bignum, tan(self.theta+pi/2)))\n b = self.wall.y - m*self.wall.x\n dy = (self.wall.length/2 - self.wall.doorways[self.index][0]) * cos(self.theta)\n self.y = self.wall.y - dy\n if abs(m) > 1/bignum:\n self.x = (self.y - b) / m\n else:\n self.x = self.wall.x\n self.pose_confidence = self.wall.pose_confidence\n\n def __repr__(self):\n if self.pose_confidence >= 0:\n return '<DoorwayObj %s: (%.1f,%.1f) @ %d deg.>' % \\\n (self.id, self.x, self.y, self.theta*180/pi)\n else:\n return '<DoorwayObj %s: position unknown>' % self.id\n\n\nclass RoomObj(WorldObject):\n def __init__(self, name,\n points=np.resize(np.array([0,0,0,1]),(4,4)).transpose(),\n floor=1, door_ids=[], connections=[]):\n \"points should be four points in homogeneous coordinates forming a convex polygon\"\n id = 'Room-' + name\n self.name = name\n x,y,z,s = points.mean(1)\n super().__init__(id,x,y)\n self.points = points\n self.floor = floor\n self.door_ids = door_ids\n self.connections = connections\n self.is_obstacle = False\n self.is_fixed = True\n\n def __repr__(self):\n return '<RoomObj %s: (%.1f,%.1f) floor=%s>' % (self.id, self.x, self.y, self.floor)\n\n def get_bounding_box(self):\n mins = self.points.min(1)\n maxs = self.points.max(1)\n return ((mins[0],mins[1]), (maxs[0],maxs[1]))\n\n\nclass ChipObj(WorldObject):\n def __init__(self, id, x, y, z=0, radius=25/2, thickness=4):\n super().__init__(id,x,y,z)\n self.radius = radius\n self.thickness = thickness\n\n def __repr__(self):\n return '<ChipObj (%.1f,%.1f) radius %.1f>' % \\\n (self.x, self.y, self.radius)\n\n\nclass FaceObj(WorldObject):\n def __init__(self, sdk_obj, id, x, y, z, name):\n super().__init__(id, x, y, z)\n self.sdk_obj = sdk_obj\n self.is_obstacle = False\n self.expression = 'unknown'\n\n @property\n def name(self):\n return self.sdk_obj.name\n\n @property\n def is_visible(self):\n return self.sdk_obj and self.sdk_obj.is_visible\n\n def __repr__(self):\n return \"<FaceObj name:'%s' expression:%s (%.1f, %.1f, %.1f) vis:%s>\" % \\\n (self.name, self.expression, self.x, self.y, self.z, self.is_visible)\n\n\nclass CameraObj(WorldObject):\n camera_size = (44., 44., 44.)\n def __init__(self, id=None, x=0, y=0, z=0, theta=0, phi = 0):\n super().__init__(id,x,y,z)\n self.size = self.camera_size\n self.id = id\n self.x = x\n self.y = y\n self.z = z\n self.theta = theta\n self.phi = phi\n\n def update(self,x=0, y=0, z=0, theta = 0, phi = 0):\n # Used instead of making new object for efficiency\n self.x = x\n self.y = y\n self.z = z\n self.theta = theta\n self.phi = phi\n\n def __repr__(self):\n return '<CameraObj %d: (%.1f, %.1f, %.1f) @ %f.>\\n' % \\\n (self.id, self.x, self.y, self.z, self.phi*180/pi)\n\nclass RobotForeignObj(WorldObject):\n def __init__(self, cozmo_id=None, x=0, y=0, z=0, theta=0, camera_id = -1 ):\n super().__init__(id,x,y,z)\n self.cozmo_id = cozmo_id\n self.x = x\n self.y = y\n self.z = z\n self.theta = theta\n self.size = (120., 90., 100.)\n self.camera_id = camera_id\n\n def __repr__(self):\n return '<RobotForeignObj %d: (%.1f, %.1f, %.1f) @ %f.> from camera %f\\n' % \\\n (self.cozmo_id, self.x, self.y, self.z, self.theta*180/pi, self.camera_id)\n\n def update(self, x=0, y=0, z=0, theta=0, camera_id=-1):\n # Used instead of making new object for efficiency\n self.x = x\n self.y = y\n self.z = z\n self.theta = theta\n self.camera_id = camera_id\n\n\nclass LightCubeForeignObj(WorldObject):\n light_cube_size = (44., 44., 44.)\n def __init__(self, id=None, cozmo_id=None, x=0, y=0, z=0, theta=0, is_visible=False):\n super().__init__(id,x,y,z)\n self.theta = theta\n self.cozmo_id = cozmo_id\n self.size = self.light_cube_size\n self.is_visible = is_visible\n\n def __repr__(self):\n return '<LightCubeForeignObj %d: (%.1f, %.1f, %.1f) @ %d deg.> by cozmo %d \\n' % \\\n (self.id, self.x, self.y, self.z, self.theta*180/pi, self.cozmo_id)\n\n def update(self, x=0, y=0, z=0, theta=0):\n # Used instead of making new object for efficiency\n self.x = x\n self.y = y\n self.z = z\n self.theta = theta\n\nclass MapFaceObj(WorldObject):\n mapFace_size = (104., 98.)\n def __init__(self, id=None, x=0, y=0, is_visible=False, expression='unknown'):\n super().__init__(id,x,y,0)\n self.theta = 0\n self.is_visible = is_visible\n self.expression = expression\n self.size = self.mapFace_size\n\n def __repr__(self):\n return \"<MapFaceObj: expression:%s (%.1f, %.1f, %.1f) vis:%s>\" % \\\n (self.expression, self.x, self.y, self.z, self.is_visible)\n\n def get_bounding_box(self):\n # ((xmin,ymin), (xmax,ymax))\n return ((self.x-self.size[0]/2, self.y-self.size[1]/2), (self.x+self.size[0]/2, self.y+self.size[1]/2))\n\n#================ WorldMap ================\n\nclass WorldMap():\n vision_z_fudge = 10 # Cozmo underestimates object z coord by about this much\n\n def __init__(self,robot):\n self.robot = robot\n self.objects = dict()\n self.shared_objects = dict()\n\n def clear(self):\n self.objects.clear()\n self.robot.world.particle_filter.clear_landmarks()\n\n def add_fixed_landmark(self,landmark):\n landmark.is_fixed = True\n self.objects[landmark.id] = landmark\n if isinstance(landmark,WallObj):\n wall = landmark\n if wall.marker_specs:\n self.robot.world.particle_filter.add_fixed_landmark(landmark)\n wall.make_doorways(self)\n wall.make_arucos(self)\n else:\n self.robot.world.particle_filter.add_fixed_landmark(landmark)\n\n def add_mapFace(self, mapFace):\n self.objects[mapFace.id] = mapFace\n\n def delete_wall(self,wall_id):\n \"Delete a wall, its markers, and its doorways, so we can predefine a new one.\"\n wall = self.objects.get(wall_id,None)\n if wall is None: return\n marker_ids = [('Aruco-'+str(id)) for id in wall.marker_specs.keys()]\n door_ids = [('Doorway-'+str(id)) for id in wall.door_ids]\n landmarks = self.robot.world.particle_filter.sensor_model.landmarks\n del self.objects[wall_id]\n if wall_id in landmarks:\n del landmarks[wall_id]\n for marker_id in marker_ids:\n if marker_id in self.objects:\n del self.objects[marker_id]\n if marker_id in landmarks:\n del landmarks[marker_id]\n for door_id in door_ids:\n if door_id in self.objects:\n del self.objects[door_id]\n\n def update_map(self):\n \"\"\"Called to update the map after every camera image, after\n object_observed and object_moved events, and just before the\n path planner runs.\n \"\"\"\n for (id,cube) in self.robot.world.light_cubes.items():\n self.update_cube(cube)\n if self.robot.world.charger: self.update_charger()\n for face in self.robot.world._faces.values():\n if face.face_id == face.updated_face_id:\n self.update_face(face)\n else:\n if face in self.robot.world.world_map.objects:\n del self.robot.world.world_map.objects[face]\n self.update_aruco_landmarks()\n self.update_walls()\n self.update_doorways()\n self.update_rooms()\n self.update_perched_cameras()\n\n def update_cube(self, cube):\n cube_id = 'Cube-' + str(cube.cube_id)\n if cube_id in self.objects:\n foreign_id = \"LightCubeForeignObj-\"+str(cube.cube_id)\n if foreign_id in self.objects:\n # remove foreign cube when local cube seen\n del self.objects[foreign_id]\n wmobject = self.objects[cube_id]\n wmobject.sdk_obj = cube # In case created before seen\n if self.robot.carrying is wmobject:\n if cube.is_visible: # we thought we were carrying it, but we're wrong\n self.robot.carrying = None\n return self.update_cube(cube)\n else: # we do appear to be carrying it\n self.update_carried_object(wmobject)\n elif cube.pose is None: # not in contact with cube\n return None\n else:\n # Cube is not in the worldmap, so add it.\n wmobject = LightCubeObj(cube)\n self.objects[cube_id] = wmobject\n if cube.is_visible:\n wmobject.update_from_sdk = True # In case we've just dropped it; now we see it\n wmobject.pose_confidence = +1\n elif (cube.pose is None):\n return wmobject\n elif wmobject.update_from_sdk and not cube.pose.is_comparable(self.robot.pose): # Robot picked up or cube moved\n if (self.robot.fetching and self.robot.fetching.sdk_obj is cube) or \\\n (self.robot.carrying and self.robot.carrying.sdk_obj is cube):\n pass\n else:\n wmobject.pose_confidence = -1\n return wmobject\n else: # Robot re-localized so cube came back\n pass # skip for now due to SDK bug\n # wmobject.update_from_sdk = True\n wmobject.pose_confidence = max(0, wmobject.pose_confidence)\n if wmobject.update_from_sdk: # True unless if we've dropped it and haven't seen it yet\n self.update_coords_from_sdk(wmobject, cube)\n wmobject.orientation, _, _, wmobject.theta = get_orientation_state(cube.pose.rotation.q0_q1_q2_q3)\n return wmobject\n\n def update_charger(self):\n charger = self.robot.world.charger\n if charger is None: return\n charger_id = 'Charger'\n wmobject = self.objects.get(charger_id, None)\n if wmobject is None:\n wmobject = ChargerObj(charger)\n self.objects[charger_id] = wmobject\n wmobject.sdk_obj = charger # In case we created charger before seeing it\n if self.robot.is_on_charger:\n wmobject.update_from_sdk = False\n theta = wrap_angle(self.robot.world.particle_filter.pose[2] + pi)\n charger_offset = np.array([[-30], [0], [0], [1]])\n offset = geometry.aboutZ(theta).dot(charger_offset)\n wmobject.x = self.robot.world.particle_filter.pose[0] + offset[0,0]\n wmobject.y = self.robot.world.particle_filter.pose[1] + offset[1,0]\n wmobject.theta = theta\n wmobject.pose_confidence = +1\n elif charger.is_visible:\n wmobject.update_from_sdk = True\n wmobject.pose_confidence = +1\n elif ((charger.pose is None) or not charger.pose.is_comparable(self.robot.pose)):\n wmobject.update_from_sdk = False\n wmobject.pose_confidence = -1\n else: # Robot re-localized so charger pose came back\n pass # skip for now due to SDK bug\n # wmobject.update_from_sdk = True\n # wmobject.pose_confidence = max(0, wmobject.pose_confidence)\n if wmobject.update_from_sdk: # True unless pose isn't comparable\n self.update_coords_from_sdk(wmobject, charger)\n wmobject.orientation, _, _, wmobject.theta = get_orientation_state(charger.pose.rotation.q0_q1_q2_q3)\n return wmobject\n\n def update_aruco_landmarks(self):\n try:\n seen_marker_objects = self.robot.world.aruco.seen_marker_objects.copy()\n except:\n return\n aruco_parent = self.robot.world.aruco\n for (key,value) in seen_marker_objects.items():\n marker_id = value.id_string\n wmobject = self.objects.get(marker_id, None)\n if wmobject is None:\n # TODO: wait to see marker several times before adding.\n wmobject = ArucoMarkerObj(aruco_parent,key) # coordinates will be filled in below\n self.objects[marker_id] = wmobject\n landmark_spec = None\n else:\n landmark_spec = self.robot.world.particle_filter.sensor_model.landmarks.get(marker_id, None)\n wmobject.pose_confidence = +1\n if isinstance(landmark_spec, tuple): # Particle filter is tracking this marker\n wmobject.x = landmark_spec[0][0][0]\n wmobject.y = landmark_spec[0][1][0]\n wmobject.theta = landmark_spec[1]\n elevation = atan2(value.camera_coords[1], value.camera_coords[2])\n cam_pos = geometry.point(0,\n value.camera_distance * sin(elevation),\n value.camera_distance * cos(elevation))\n base_pos = self.robot.kine.joint_to_base('camera').dot(cam_pos)\n wmobject.z = base_pos[2,0]\n wmobject.elevation = elevation\n wmobject.cam_pos = cam_pos\n wmobject.base_pos = base_pos\n elif isinstance(landmark_spec, Pose): # Hard-coded landmark pose for laboratory exercises\n wmobject.x = landmark_spec.position.x\n wmobject.y = landmark_spec.position.y\n wmobject.theta = landmark_spec.rotation.angle_z.radians\n wmobject.is_fixed = True\n else:\n # Non-landmark: convert aruco sensor values to pf coordinates and update\n elevation = atan2(value.camera_coords[1], value.camera_coords[2])\n cam_pos = geometry.point(0,\n value.camera_distance * sin(elevation),\n value.camera_distance * cos(elevation))\n base_pos = self.robot.kine.joint_to_base('camera').dot(cam_pos)\n wmobject.x = base_pos[0,0]\n wmobject.y = base_pos[1,0]\n wmobject.z = base_pos[2,0]\n wmobject.theta = wrap_angle(self.robot.world.particle_filter.pose[2] +\n value.euler_rotation[1]*(pi/180) + pi)\n wmobject.elevation = elevation\n wmobject.cam_pos = cam_pos\n wmobject.base_pos = base_pos\n\n def update_walls(self):\n for key, value in self.robot.world.particle_filter.sensor_model.landmarks.items():\n if key.startswith('Wall-'):\n if key in self.objects:\n wall = self.objects[key]\n if not wall.is_fixed and not wall.is_foreign:\n wall.update(self,x=value[0][0][0], y=value[0][1][0], theta=value[1])\n else:\n print('Creating new wall in worldmap:',key)\n wall_spec = wall_marker_dict[key]\n wall = WallObj(id=key,\n x=value[0][0][0],\n y=value[0][1][0],\n theta=value[1],\n length=wall_spec.length,\n height=wall_spec.height,\n door_width=wall_spec.door_width,\n door_height=wall_spec.door_height,\n marker_specs=wall_spec.marker_specs,\n doorways=wall_spec.doorways,\n door_ids=wall_spec.door_ids,\n is_foreign=False,\n spec_id=key)\n self.objects[key] = wall\n wall.pose_confidence = +1\n # Make the doorways\n wall.make_doorways(self.robot.world.world_map)\n # Relocate the aruco markers to their predefined positions\n spec = wall_marker_dict.get(wall.id, None)\n if spec is None: return\n for key,value in spec.marker_specs.items():\n if key in self.robot.world.world_map.objects:\n aruco_marker = self.robot.world.world_map.objects[key]\n dir = value[0] # +1 for front side or -1 for back side\n s = 0 if dir == +1 else pi\n aruco_marker.theta = wrap_angle(wall.theta + s)\n wall_xyz = geometry.point(-dir*(wall.length/2 - value[1][0]), 0, value[1][1])\n rel_xyz = geometry.aboutZ(aruco_marker.theta + pi/2).dot(wall_xyz)\n aruco_marker.x = wall.x + rel_xyz[0][0]\n aruco_marker.y = wall.y + rel_xyz[1][0]\n aruco_marker.z = rel_xyz[2][0]\n aruco_marker.is_fixed = wall.is_fixed\n\n def update_doorways(self):\n for key,value in self.robot.world.world_map.objects.items():\n if key.startswith('Doorway'):\n value.update()\n\n def update_rooms(self):\n LOCALIZED = 'localized' # should be from ParticleFilter.LOCALIZED\n if self.robot.world.particle_filter.state == LOCALIZED:\n confidence = +1\n else:\n confidence = -1\n for obj in self.robot.world.world_map.objects.values():\n if isinstance(obj, RoomObj):\n obj.pose_confidence = confidence\n\n def lookup_face_obj(self,face):\n \"Look up face by name, not by Face instance.\"\n for (key,value) in self.robot.world.world_map.objects.items():\n if isinstance(value, FaceObj) and value.name == face.name:\n if value.sdk_obj is not face and face.is_visible:\n # Older Face object with same name: replace it with new one\n value.sdk_obj = face\n value.id = face.face_id\n return value\n return None\n\n def update_face(self,face):\n if face.pose is None:\n return\n pos = face.pose.position\n face_obj = self.lookup_face_obj(face)\n if face_obj is None:\n face_obj = FaceObj(face, face.face_id, pos.x, pos.y, pos.z,\n face.name)\n if len(face.name) == 0:\n key = 'Face:unknown'\n else:\n key = 'Face:' + face.name\n self.robot.world.world_map.objects[key] = face_obj\n else:\n face_obj.sdk_obj = face # in case face.updated_id changed\n # now update the face\n if face.is_visible:\n face_obj.x = pos.x\n face_obj.y = pos.y\n face_obj.z = pos.z\n face_obj.expression = face.expression\n self.update_coords_from_sdk(face_obj, face)\n\n def update_custom_object(self, sdk_obj):\n if not sdk_obj.pose.is_comparable(self.robot.pose):\n print('Should never get here:',sdk_obj.pose,self.robot.pose)\n return\n id = 'CustomMarkerObj-' + str(sdk_obj.object_type.name[-2:])\n if not sdk_obj.is_unique:\n id += '-' + str(sdk_obj.object_id)\n if id in self.objects:\n wmobject = self.objects[id]\n wmobject.sdk_obj = sdk_obj # In case created marker before seeing it\n else:\n type = sdk_obj.object_type\n if type in custom_objs.custom_marker_types:\n wmobject = CustomMarkerObj(sdk_obj,id)\n elif type in custom_objs.custom_cube_types:\n wmobject = CustomCubeObj(sdk_obj,id)\n else: # if we don't know what else to do with it, treat as a custom marker\n wmobject = CustomMarkerObj(sdk_obj,id)\n self.objects[id] = wmobject\n wmobject.pose_confidence = +1\n self.update_coords_from_sdk(wmobject, sdk_obj)\n if isinstance(wmobject, CustomMarkerObj):\n wmobject.orientation, wmobject.rotation, _, _ = \\\n get_orientation_state(sdk_obj.pose.rotation.q0_q1_q2_q3, isPlanar=True)\n elif isinstance(wmobject, CustomCubeObj):\n wmobject.orientation, _, _, wmobject.rotation = \\\n get_orientation_state(sdk_obj.pose.rotation.q0_q1_q2_q3, isPlanar=False)\n\n def update_carried_object(self, wmobject):\n #print('Updating carried object ',wmobject)\n # set x,y based on robot's pose\n # need to cache initial orientation relative to robot:\n # grasped_orient = wmobject.theta - robot.pose.rotation.angle_z\n world_frame = self.robot.kine.joints['world']\n lift_attach_frame = self.robot.kine.joints['lift_attach']\n tmat = self.robot.kine.base_to_link(world_frame).dot(self.robot.kine.joint_to_base(lift_attach_frame))\n # *** HACK *** : depth calculation only works for cubes; need to handle custom obj, chips\n half_depth = wmobject.size[0] / 2\n new_pose = tmat.dot(geometry.point(half_depth,0))\n theta = self.robot.world.particle_filter.pose[2]\n wmobject.x = new_pose[0,0]\n wmobject.y = new_pose[1,0]\n wmobject.z = new_pose[2,0]\n wmobject.theta = theta\n\n def update_coords_from_sdk(self, wmobject, sdk_obj):\n dx = sdk_obj.pose.position.x - self.robot.pose.position.x\n dy = sdk_obj.pose.position.y - self.robot.pose.position.y\n alpha = atan2(dy,dx) - self.robot.pose.rotation.angle_z.radians\n r = sqrt(dx*dx + dy*dy)\n (rob_x,rob_y,rob_theta) = self.robot.world.particle_filter.pose\n wmobject.x = rob_x + r * cos(alpha + rob_theta)\n wmobject.y = rob_y + r * sin(alpha + rob_theta)\n wmobject.z = sdk_obj.pose.position.z\n orient_diff = wrap_angle(rob_theta - self.robot.pose.rotation.angle_z.radians)\n wmobject.theta = wrap_angle(sdk_obj.pose.rotation.angle_z.radians + orient_diff)\n\n def update_perched_cameras(self):\n if self.robot.world.server.started:\n pool = self.robot.world.server.camera_landmark_pool\n for key, val in pool.get(self.robot.aruco_id,{}).items():\n if key.startswith('Video'):\n if key in self.objects:\n self.objects[key].update(x=val[0][0,0], y=val[0][1,0], z=val[1][0],\n theta=val[1][2], phi=val[1][1])\n else:\n # last digit of capture id as camera key\n self.objects[key] = \\\n CameraObj(id=int(key[-2]), x=val[0][0,0], y=val[0][1,0],\n z=val[1][0], theta=val[1][2], phi=val[1][1])\n else:\n for key, val in self.robot.world.particle_filter.sensor_model.landmarks.items():\n if key.startswith('Video'):\n if key in self.objects:\n self.objects[key].update(x=val[0][0,0], y=val[0][1,0], z=val[1][0],\n theta=val[1][2], phi=val[1][1])\n else:\n # last digit of capture id as camera key\n self.objects[key] = \\\n CameraObj(id=int(key[-2]), x=val[0][0,0], y=val[0][1,0],\n z=val[1][0], theta=val[1][2], phi=val[1][1])\n\n def invalidate_poses(self):\n # *** This medthod is not currently used. ***\n for wmobj in self.robot.world.world_map.objects.values():\n if not wmobj.is_fixed:\n wmobj.pose_confidence = -1\n\n def show_objects(self):\n objs = self.objects\n print('%d object%s in the world map:' %\n (len(objs), '' if len(objs) == 1 else 's'))\n basics = ['Charger', 'Cube-1', 'Cube-2', 'Cube-3']\n ordered_keys = []\n for key in basics:\n if key in objs:\n ordered_keys.append(key)\n customs = []\n arucos = []\n walls = []\n misc = []\n for (key,value) in objs.items():\n if key in basics:\n pass\n elif isinstance(value, CustomMarkerObj):\n customs.append(key)\n elif isinstance(value, ArucoMarkerObj):\n arucos.append(key)\n elif isinstance(value, WallObj):\n walls.append(key)\n else:\n misc.append(key)\n arucos.sort()\n walls.sort()\n ordered_keys = ordered_keys + customs + arucos + walls + misc\n for key in ordered_keys:\n print(' ', objs[key])\n print()\n\n def show_pose(self):\n print('robot.pose is: %6.1f %6.1f @ %6.1f deg.' %\n (self.robot.pose.position.x,\n self.robot.pose.position.y,\n self.robot.pose_angle.degrees))\n print('particle filter: %6.1f %6.1f @ %6.1f deg. [%s]' %\n (*self.robot.world.particle_filter.pose[0:2],\n self.robot.world.particle_filter.pose[2]*180/pi,\n self.robot.world.particle_filter.state))\n print()\n\n def generate_doorway_list(self):\n \"Used by path-planner.py\"\n doorways = []\n for (key,obj) in self.objects.items():\n if isinstance(obj,DoorwayObj):\n w = obj.door_width / 2\n doorway_threshold_theta = obj.theta + pi/2\n dx = w * cos(doorway_threshold_theta)\n dy = w * sin(doorway_threshold_theta)\n doorways.append((obj, ((obj.x-dx, obj.y-dy), (obj.x+dx, obj.y+dy))))\n return doorways\n\n#================ Event Handlers ================\n\n def handle_object_observed(self, evt, **kwargs):\n if isinstance(evt.obj, LightCube):\n # print('observed: ',evt.obj)\n self.update_cube(evt.obj)\n elif isinstance(evt.obj, CustomObject):\n self.update_custom_object(evt.obj)\n elif isinstance(evt.obj, Face):\n self.update_face(evt.obj)\n\n def handle_object_move_started(self, evt, **kwargs):\n cube = evt.obj\n if (self.robot.carrying and self.robot.carrying.sdk_obj is cube) or \\\n (self.robot.fetching and self.robot.fetching.sdk_obj is cube):\n return\n cube.movement_start_time = time.time()\n cube_id = 'Cube-' + str(cube.cube_id)\n try: # wmobj may not have been created yet\n wmobject = self.robot.world.world_map.objects[cube_id]\n wmobject.pose_confidence = min(0, wmobject.pose_confidence)\n except:\n pass\n\n def handle_object_move_stopped(self, evt, **kwargs):\n cube = evt.obj\n cube.movement_start_time = None\n\n#================ Wall Specification ================\n\n# WallSpec is used in wall_defs.py\n\nwall_marker_dict = dict()\n\nclass WallSpec():\n def __init__(self, label=None, length=100, height=210, door_width=77, door_height=105,\n marker_specs=dict(), doorways=[], door_ids=[]):\n self.length = length\n self.height = height\n self.door_width = door_width\n self.door_height = door_height\n self.marker_specs = marker_specs\n self.doorways = doorways\n self.door_ids = door_ids\n marker_id_numbers = [int(marker_id[1+marker_id.rfind('-'):]) for marker_id in marker_specs.keys()]\n if label and len(marker_id_numbers) == 0:\n self.spec_id = 'Wall-' + label # 'Wall-A' for 'A'\n label = 'Wall-' + label\n elif len(marker_id_numbers) > 0 and not label:\n lowest_marker_id = 'Aruco-%d' % min(marker_id_numbers)\n self.spec_id = 'Wall-%d' % min(marker_id_numbers)\n label = self.spec_id # 'Wall-37' for 'Aruco-37'\n else:\n raise ValueError(\"Don't know how to label wall '%s'\" % label)\n self.label = label\n global wall_marker_dict\n for id in marker_specs.keys():\n wall_marker_dict[id] = self\n wall_marker_dict[label] = self\n"
},
{
"alpha_fraction": 0.5374138951301575,
"alphanum_fraction": 0.5504705309867859,
"avg_line_length": 43.56272506713867,
"blob_id": "40283054ec82608150eede0c8780eace9976e7fb",
"content_id": "d17b858162b49f83ea2add197628e0051cfa3b93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37299,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 837,
"path": "/cozmo_fsm/pilot.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import math\nimport time\nimport sys\nimport asyncio\n\nfrom .base import *\nfrom .rrt import *\n#from .nodes import ParentFails, ParentCompletes, DriveArc, DriveContinuous, Forward, Turn\nfrom .nodes import *\nfrom .events import PilotEvent\n#from .transitions import CompletionTrans, FailureTrans, SuccessTrans, DataTrans, NullTrans\nfrom .transitions import *\nfrom .cozmo_kin import wheelbase, center_of_rotation_offset\nfrom .worldmap import WorldObject, DoorwayObj\nfrom .path_planner import PathPlannerProcess, PathPlanner\nfrom .geometry import segment_intersect_test\nfrom .doorpass import DoorPass\nfrom .pilot0 import *\n\nfrom cozmo.util import Pose, distance_mm, radians, degrees, speed_mmps\n\n#---------------- Pilot Exceptions and Events ----------------\n\nclass PilotException(Exception):\n def __str__(self):\n return self.__repr__()\n\nclass InvalidPose(PilotException): pass\nclass CollisionDetected(PilotException): pass\n\n# Note: StartCollides, GoalCollides, and MaxIterations exceptions are defined in rrt.py.\n\nclass ParentPilotEvent(StateNode):\n \"\"\"Receive a PilotEvent and repost it from the receiver's parent. This allows\n derived classes that use the Pilot to make its PilotEvents visible.\"\"\"\n def start(self,event):\n super().start(event)\n if not isinstance(event,PilotEvent):\n raise TypeError(\"ParentPilotEvent must be invoked with a PilotEvent, not %s\" % event)\n if 'grid_display' in event.args:\n self.robot.world.rrt.grid_display = event.args['grid_display']\n event2 = PilotEvent(event.status)\n event2.args = event.args\n self.parent.post_event(event2)\n\n#---------------- PilotBase ----------------\n\nclass PilotBase(StateNode):\n\n \"\"\"Base class for PilotToObject, PilotToPose, etc.\"\"\"\n\n class ClearDisplays(StateNode):\n def start(self,event=None):\n super().start()\n if self.robot.world.path_viewer:\n self.robot.world.path_viewer.clear()\n\n class SendObject(StateNode):\n def start(self,event=None):\n super().start()\n object = self.parent.object\n if object.pose_confidence < 0:\n self.parent.post_event(PilotEvent(NotLocalized,object=object))\n self.parent.post_failure()\n return\n self.post_event(DataEvent(self.parent.object))\n\n class ReceivePlan(StateNode):\n def start(self, event=None):\n super().start(event)\n if not isinstance(event, DataEvent):\n raise ValueError(event)\n (navplan, grid_display) = event.data\n\n self.robot.world.rrt.draw_path = navplan.extract_path()\n #print('ReceivePlan: draw_path=', self.robot.world.rrt.draw_path)\n self.robot.world.rrt.grid_display = grid_display\n self.post_event(DataEvent(navplan))\n\n class PilotExecutePlan(StateNode):\n def start(self, event=None):\n if not isinstance(event, DataEvent) and isinstance(event.data, NavPlan):\n raise ValueError(event)\n self.navplan = event.data\n self.index = 0\n super().start(event)\n\n class DispatchStep(StateNode):\n def start(self, event=None):\n super().start(event)\n step = self.parent.navplan.steps[self.parent.index]\n print('nav step', step)\n self.post_event(DataEvent(step.type))\n\n class ExecuteDrive(DriveContinuous):\n def start(self, event=None):\n step = self.parent.navplan.steps[self.parent.index]\n super().start(DataEvent(step.param))\n\n class ExecuteDoorPass(DoorPass):\n def start(self, event=None):\n step = self.parent.navplan.steps[self.parent.index]\n super().start(DataEvent(step.param))\n\n class ExecuteBackup(Forward):\n def start(self, event=None):\n step = self.parent.navplan.steps[self.parent.index]\n if len(step.param) > 1:\n print('***** WARNING: extra backup steps not being processed *****')\n node = step.param[0]\n dx = node.x - self.robot.world.particle_filter.pose[0]\n dy = node.y - self.robot.world.particle_filter.pose[1]\n self.distance = distance_mm(- sqrt(dx*dx + dy*dy))\n super().start(event)\n\n class NextStep(StateNode):\n def start(self, event=None):\n super().start(event)\n self.parent.index += 1\n if self.parent.index < len(self.parent.navplan.steps):\n self.post_success()\n else:\n self.post_completion()\n\n def setup(self):\n # # PilotExecutePlan machine\n # \n # dispatch: self.DispatchStep()\n # dispatch =D(NavStep.DRIVE)=> drive\n # dispatch =D(NavStep.DOORPASS)=> doorpass\n # dispatch =D(NavStep.BACKUP)=> backup\n # \n # drive: self.ExecuteDrive()\n # drive =C=> next\n # drive =F=> ParentFails()\n # \n # doorpass: self.ExecuteDoorPass()\n # doorpass =C=> next\n # doorpass =F=> ParentFails()\n # \n # backup: self.ExecuteBackup()\n # backup =C=> next\n # backup =F=> ParentFails()\n # \n # next: self.NextStep()\n # next =S=> dispatch\n # next =C=> ParentCompletes()\n \n # Code generated by genfsm on Sat Feb 18 04:45:30 2023:\n \n dispatch = self.DispatchStep() .set_name(\"dispatch\") .set_parent(self)\n drive = self.ExecuteDrive() .set_name(\"drive\") .set_parent(self)\n parentfails1 = ParentFails() .set_name(\"parentfails1\") .set_parent(self)\n doorpass = self.ExecuteDoorPass() .set_name(\"doorpass\") .set_parent(self)\n parentfails2 = ParentFails() .set_name(\"parentfails2\") .set_parent(self)\n backup = self.ExecuteBackup() .set_name(\"backup\") .set_parent(self)\n parentfails3 = ParentFails() .set_name(\"parentfails3\") .set_parent(self)\n next = self.NextStep() .set_name(\"next\") .set_parent(self)\n parentcompletes1 = ParentCompletes() .set_name(\"parentcompletes1\") .set_parent(self)\n \n datatrans1 = DataTrans(NavStep.DRIVE) .set_name(\"datatrans1\")\n datatrans1 .add_sources(dispatch) .add_destinations(drive)\n \n datatrans2 = DataTrans(NavStep.DOORPASS) .set_name(\"datatrans2\")\n datatrans2 .add_sources(dispatch) .add_destinations(doorpass)\n \n datatrans3 = DataTrans(NavStep.BACKUP) .set_name(\"datatrans3\")\n datatrans3 .add_sources(dispatch) .add_destinations(backup)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(drive) .add_destinations(next)\n \n failuretrans1 = FailureTrans() .set_name(\"failuretrans1\")\n failuretrans1 .add_sources(drive) .add_destinations(parentfails1)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(doorpass) .add_destinations(next)\n \n failuretrans2 = FailureTrans() .set_name(\"failuretrans2\")\n failuretrans2 .add_sources(doorpass) .add_destinations(parentfails2)\n \n completiontrans3 = CompletionTrans() .set_name(\"completiontrans3\")\n completiontrans3 .add_sources(backup) .add_destinations(next)\n \n failuretrans3 = FailureTrans() .set_name(\"failuretrans3\")\n failuretrans3 .add_sources(backup) .add_destinations(parentfails3)\n \n successtrans1 = SuccessTrans() .set_name(\"successtrans1\")\n successtrans1 .add_sources(next) .add_destinations(dispatch)\n \n completiontrans4 = CompletionTrans() .set_name(\"completiontrans4\")\n completiontrans4 .add_sources(next) .add_destinations(parentcompletes1)\n \n return self\n\n # End of PilotExecutePlan\n # End of PilotBase\n\n\n#---------------- PilotToObject ----------------\n\nclass PilotToObject(PilotBase):\n \"Use the wavefront planner to navigate to a distant object.\"\n def __init__(self, object=None):\n super().__init__()\n self.object=object\n\n def start(self, event=None):\n if isinstance(event,DataEvent):\n if isinstance(event.data, WorldObject):\n self.object = event.data\n else:\n raise ValueError('DataEvent to PilotToObject must be a WorldObject', event.data)\n if not isinstance(self.object, WorldObject):\n if hasattr(self.object, 'wm_obj'):\n self.object = self.object.wm_obj\n else:\n raise ValueError('Argument to PilotToObject constructor must be a WorldObject or SDK object', self.object)\n super().start(event)\n\n class CheckArrival(StateNode):\n def start(self, event=None):\n super().start(event)\n pf_pose = self.robot.world.particle_filter.pose\n if True: # *** TODO: check if we've arrived at the target shape\n self.post_success()\n else:\n self.post_failure()\n\n def setup(self):\n # # PilotToObject machine\n # \n # launch: self.ClearDisplays() =N=> self.SendObject() =D=> planner\n # \n # planner: PathPlannerProcess() =D=> recv\n # planner =PILOT=> ParentPilotEvent() =N=> Print('Path planner failed')\n # \n # recv: self.ReceivePlan() =D=> exec\n # \n # exec: self.PilotExecutePlan()\n # exec =C=> check\n # exec =F=> ParentFails()\n # \n # check: self.CheckArrival()\n # check =S=> ParentCompletes()\n # check =F=> planner\n \n # Code generated by genfsm on Sat Feb 18 04:45:30 2023:\n \n launch = self.ClearDisplays() .set_name(\"launch\") .set_parent(self)\n sendobject1 = self.SendObject() .set_name(\"sendobject1\") .set_parent(self)\n planner = PathPlannerProcess() .set_name(\"planner\") .set_parent(self)\n parentpilotevent1 = ParentPilotEvent() .set_name(\"parentpilotevent1\") .set_parent(self)\n print1 = Print('Path planner failed') .set_name(\"print1\") .set_parent(self)\n recv = self.ReceivePlan() .set_name(\"recv\") .set_parent(self)\n exec = self.PilotExecutePlan() .set_name(\"exec\") .set_parent(self)\n parentfails4 = ParentFails() .set_name(\"parentfails4\") .set_parent(self)\n check = self.CheckArrival() .set_name(\"check\") .set_parent(self)\n parentcompletes2 = ParentCompletes() .set_name(\"parentcompletes2\") .set_parent(self)\n \n nulltrans1 = NullTrans() .set_name(\"nulltrans1\")\n nulltrans1 .add_sources(launch) .add_destinations(sendobject1)\n \n datatrans4 = DataTrans() .set_name(\"datatrans4\")\n datatrans4 .add_sources(sendobject1) .add_destinations(planner)\n \n datatrans5 = DataTrans() .set_name(\"datatrans5\")\n datatrans5 .add_sources(planner) .add_destinations(recv)\n \n pilottrans1 = PilotTrans() .set_name(\"pilottrans1\")\n pilottrans1 .add_sources(planner) .add_destinations(parentpilotevent1)\n \n nulltrans2 = NullTrans() .set_name(\"nulltrans2\")\n nulltrans2 .add_sources(parentpilotevent1) .add_destinations(print1)\n \n datatrans6 = DataTrans() .set_name(\"datatrans6\")\n datatrans6 .add_sources(recv) .add_destinations(exec)\n \n completiontrans5 = CompletionTrans() .set_name(\"completiontrans5\")\n completiontrans5 .add_sources(exec) .add_destinations(check)\n \n failuretrans4 = FailureTrans() .set_name(\"failuretrans4\")\n failuretrans4 .add_sources(exec) .add_destinations(parentfails4)\n \n successtrans2 = SuccessTrans() .set_name(\"successtrans2\")\n successtrans2 .add_sources(check) .add_destinations(parentcompletes2)\n \n failuretrans5 = FailureTrans() .set_name(\"failuretrans5\")\n failuretrans5 .add_sources(check) .add_destinations(planner)\n \n return self\n\n#---------------- PilotToPose ----------------\n\nclass PilotToPose(PilotBase):\n \"Use the rrt path planner for short-range navigation to a specific pose.\"\n def __init__(self, target_pose=None, verbose=False, max_iter=RRT.DEFAULT_MAX_ITER):\n super().__init__()\n self.target_pose = target_pose\n self.verbose = verbose\n self.max_iter = max_iter\n\n def start(self, event=None):\n if isinstance(event, DataEvent) and isinstance(event.data, Pose):\n self.target_pose = event.data\n self.robot.world.rrt.max_iter = self.max_iter\n super().start(self)\n\n class PilotRRTPlanner(StateNode):\n def planner(self,start_node,goal_node):\n return self.robot.world.rrt.plan_path(start_node,goal_node)\n\n def start(self,event=None):\n super().start(event)\n tpose = self.parent.target_pose\n if tpose is None or (tpose.position.x == 0 and tpose.position.y == 0 and\n tpose.rotation.angle_z.radians == 0 and not tpose.is_valid):\n print(\"Pilot: target pose is invalid: %s\" % tpose)\n self.parent.post_event(PilotEvent(InvalidPose, pose=tpose))\n self.parent.post_failure()\n return\n (pose_x, pose_y, pose_theta) = self.robot.world.particle_filter.pose\n start_node = RRTNode(x=pose_x, y=pose_y, q=pose_theta)\n goal_node = RRTNode(x=tpose.position.x, y=tpose.position.y,\n q=tpose.rotation.angle_z.radians)\n\n start_escape_move = None\n try:\n (treeA, treeB, path) = self.planner(start_node, goal_node)\n\n except StartCollides as e:\n # See if we can escape the start collision using canned headings.\n # This could be made more sophisticated, e.g., using arcs.\n #print('planner',e,'start',start_node)\n escape_distance = 50 # mm\n escape_headings = (0, +30/180.0*pi, -30/180.0*pi, pi, pi/2, -pi/2)\n for phi in escape_headings:\n if phi != pi:\n new_q = wrap_angle(start_node.q + phi)\n d = escape_distance\n else:\n new_q = start_node.q\n d = -escape_distance\n new_start = RRTNode(x=start_node.x + d*cos(new_q),\n y=start_node.y + d*sin(new_q),\n q=new_q)\n # print('trying start escape', new_start)\n if not self.robot.world.rrt.collides(new_start):\n start_escape_move = (phi, start_node, new_start)\n start_node = new_start\n break\n if start_escape_move is None:\n print('PilotRRTPlanner: Start collides!',e)\n self.parent.post_event(PilotEvent(StartCollides, args=e.args))\n self.parent.post_failure()\n return\n try:\n (treeA, treeB, path) = self.planner(start_node, goal_node)\n except GoalCollides as e:\n print('PilotRRTPlanner: Goal collides!',e)\n self.parent.post_event(PilotEvent(GoalCollides, args=e.args))\n self.parent.post_failure()\n return\n except MaxIterations as e:\n print('PilotRRTPlanner: Max iterations %d exceeded!' % e.args[0])\n self.parent.post_event(PilotEvent(MaxIterations, args=e.args))\n self.parent.post_failure()\n return\n #print('replan',path)\n\n except GoalCollides as e:\n print('PilotRRTPlanner: Goal collides!',e)\n self.parent.post_event(PilotEvent(GoalCollides, args=e.args))\n self.parent.post_failure()\n return\n except MaxIterations as e:\n print('PilotRRTPlanner: Max iterations %d exceeded!' % e.args[0])\n self.parent.post_event(PilotEvent(MaxIterations, args=e.args))\n self.parent.post_failure()\n return\n\n if self.parent.verbose:\n print('Path planner generated',len(treeA)+len(treeB),'nodes.')\n if self.parent.robot.world.path_viewer:\n self.parent.robot.world.path_viewer.add_tree(path, (1,0,0,0.75))\n\n self.robot.world.rrt.draw_path = path\n\n # Construct the nav plan\n if self.parent.verbose:\n [print(' ',x) for x in path]\n\n doors = self.robot.world.world_map.generate_doorway_list()\n navplan = PathPlanner.from_path(path, doors)\n print('navplan=',navplan, ' steps=',navplan.steps)\n\n # Insert the StartCollides escape move if there is one\n if start_escape_move:\n phi, start, new_start = start_escape_move\n if phi == pi:\n escape_step = NavStep(NavStep.BACKUP, [new_start])\n navplan.steps.insert(0, escape_step)\n elif navplan.steps[0].type == NavStep.DRIVE:\n # Insert at the beginning the original start node we replaced with new_start\n navplan.steps[0].param.insert(0, start_node)\n else:\n # Shouldn't get here, but just in case\n escape_step = NavStep(NavStep.DRIVE, (RRTNode(start.x,start.y), RRTNode(new_start.x,new_start.y)))\n navplan.steps.insert(0, escape_step)\n\n #print('finalnavplan steps:', navplan.steps)\n\n # If no doorpass, we're good to go\n last_step = navplan.steps[-1]\n grid_display = None\n if last_step.type != NavStep.DOORPASS:\n self.post_data((navplan,grid_display))\n return\n\n # We planned for a doorpass as the last step; replan to the outer gate.\n door = last_step.param\n last_node = navplan.steps[-2].param[-1]\n gate = DoorPass.calculate_gate((last_node.x, last_node.y), door, DoorPass.OUTER_GATE_DISTANCE)\n goal_node = RRTNode(x=gate[0], y=gate[1], q=gate[2])\n print('new goal is', goal_node)\n try:\n (_, _, path) = self.planner(start_node, goal_node)\n except Exception as e:\n print('Pilot replanning for door gateway failed!', e.args)\n cpath = [(node.x,node.y) for node in path]\n navplan = PathPlanner.from_path(cpath, [])\n navplan.steps.append(last_step) # Add the doorpass step\n self.post_data((navplan,grid_display))\n\n # ----- End of PilotRRTPlanner -----\n\n class CheckArrival(StateNode):\n def start(self, event=None):\n super().start(event)\n pf_pose = self.robot.world.particle_filter.pose\n current_pose = Pose(pf_pose[0], pf_pose[1], 0, angle_z=radians(pf_pose[2]))\n pose_diff = current_pose - self.parent.target_pose\n distance = (pose_diff.position.x**2 + pose_diff.position.y**2) ** 0.5\n MAX_TARGET_DISTANCE = 50.0 # mm\n if distance <= MAX_TARGET_DISTANCE:\n self.post_success()\n else:\n self.post_failure()\n\n\n def setup(self):\n # # PilotToPose machine\n # \n # launch: self.ClearDisplays() =N=> planner\n # \n # planner: self.PilotRRTPlanner() =D=> recv\n # planner =PILOT=> ParentPilotEvent() =N=> Print('Path planner failed')\n # \n # recv: self.ReceivePlan() =D=> exec\n # \n # exec: self.PilotExecutePlan()\n # exec =C=> check\n # exec =F=> ParentFails()\n # \n # check: self.CheckArrival()\n # check =S=> ParentCompletes()\n # check =F=> planner\n \n # Code generated by genfsm on Sat Feb 18 04:45:30 2023:\n \n launch = self.ClearDisplays() .set_name(\"launch\") .set_parent(self)\n planner = self.PilotRRTPlanner() .set_name(\"planner\") .set_parent(self)\n parentpilotevent2 = ParentPilotEvent() .set_name(\"parentpilotevent2\") .set_parent(self)\n print2 = Print('Path planner failed') .set_name(\"print2\") .set_parent(self)\n recv = self.ReceivePlan() .set_name(\"recv\") .set_parent(self)\n exec = self.PilotExecutePlan() .set_name(\"exec\") .set_parent(self)\n parentfails5 = ParentFails() .set_name(\"parentfails5\") .set_parent(self)\n check = self.CheckArrival() .set_name(\"check\") .set_parent(self)\n parentcompletes3 = ParentCompletes() .set_name(\"parentcompletes3\") .set_parent(self)\n \n nulltrans3 = NullTrans() .set_name(\"nulltrans3\")\n nulltrans3 .add_sources(launch) .add_destinations(planner)\n \n datatrans7 = DataTrans() .set_name(\"datatrans7\")\n datatrans7 .add_sources(planner) .add_destinations(recv)\n \n pilottrans2 = PilotTrans() .set_name(\"pilottrans2\")\n pilottrans2 .add_sources(planner) .add_destinations(parentpilotevent2)\n \n nulltrans4 = NullTrans() .set_name(\"nulltrans4\")\n nulltrans4 .add_sources(parentpilotevent2) .add_destinations(print2)\n \n datatrans8 = DataTrans() .set_name(\"datatrans8\")\n datatrans8 .add_sources(recv) .add_destinations(exec)\n \n completiontrans6 = CompletionTrans() .set_name(\"completiontrans6\")\n completiontrans6 .add_sources(exec) .add_destinations(check)\n \n failuretrans6 = FailureTrans() .set_name(\"failuretrans6\")\n failuretrans6 .add_sources(exec) .add_destinations(parentfails5)\n \n successtrans3 = SuccessTrans() .set_name(\"successtrans3\")\n successtrans3 .add_sources(check) .add_destinations(parentcompletes3)\n \n failuretrans7 = FailureTrans() .set_name(\"failuretrans7\")\n failuretrans7 .add_sources(check) .add_destinations(planner)\n \n return self\n\n\nclass PilotPushToPose(PilotToPose):\n def __init__(self,pose):\n super().__init__(pose)\n self.max_turn = 20*(pi/180)\n\n def planner(self,start_node,goal_node):\n self.robot.world.rrt.step_size=20\n return self.robot.world.rrt.plan_push_chip(start_node,goal_node)\n\n\nclass PilotFrustration(StateNode):\n\n def __init__(self, text_template=None):\n super().__init__()\n self.text_template = text_template # contains at most one '%s'\n\n class SayObject(Say):\n def start(self, event=None):\n text_template = self.parent.text_template\n try:\n object_name = self.parent.parent.object.name # for rooms\n except:\n try:\n object_name = self.parent.parent.object.id # for cubes\n except:\n object_name = None\n if text_template is not None:\n if '%' in text_template:\n self.text = text_template % object_name\n else:\n self.text = text_template\n elif object_name is not None:\n self.text = 'Can\\'t reach %s' % object_name\n else:\n self.text = 'stuck'\n self.robot.world.rrt.text = self.text\n super().start(event)\n\n\n def setup(self):\n # launcher: AbortAllActions() =N=> StopAllMotors() =N=> {speak, turn}\n # \n # speak: self.SayObject()\n # \n # turn: StateNode() =RND=> {left, right}\n # \n # left: Turn(5) =C=> left2: Turn(-5)\n # \n # right: Turn(-5) =C=> right2: Turn(5)\n # \n # {speak, left2, right2} =C(2)=> animate\n # \n # animate: AnimationTriggerNode(trigger=cozmo.anim.Triggers.FrustratedByFailure,\n # ignore_body_track=True,\n # ignore_head_track=True,\n # ignore_lift_track=True)\n # animate =C=> done\n # animate =F=> done\n # \n # done: ParentCompletes()\n \n # Code generated by genfsm on Sat Feb 18 04:45:30 2023:\n \n launcher = AbortAllActions() .set_name(\"launcher\") .set_parent(self)\n stopallmotors1 = StopAllMotors() .set_name(\"stopallmotors1\") .set_parent(self)\n speak = self.SayObject() .set_name(\"speak\") .set_parent(self)\n turn = StateNode() .set_name(\"turn\") .set_parent(self)\n left = Turn(5) .set_name(\"left\") .set_parent(self)\n left2 = Turn(-5) .set_name(\"left2\") .set_parent(self)\n right = Turn(-5) .set_name(\"right\") .set_parent(self)\n right2 = Turn(5) .set_name(\"right2\") .set_parent(self)\n animate = AnimationTriggerNode(trigger=cozmo.anim.Triggers.FrustratedByFailure,\n ignore_body_track=True,\n ignore_head_track=True,\n ignore_lift_track=True) .set_name(\"animate\") .set_parent(self)\n done = ParentCompletes() .set_name(\"done\") .set_parent(self)\n \n nulltrans5 = NullTrans() .set_name(\"nulltrans5\")\n nulltrans5 .add_sources(launcher) .add_destinations(stopallmotors1)\n \n nulltrans6 = NullTrans() .set_name(\"nulltrans6\")\n nulltrans6 .add_sources(stopallmotors1) .add_destinations(speak,turn)\n \n randomtrans1 = RandomTrans() .set_name(\"randomtrans1\")\n randomtrans1 .add_sources(turn) .add_destinations(left,right)\n \n completiontrans7 = CompletionTrans() .set_name(\"completiontrans7\")\n completiontrans7 .add_sources(left) .add_destinations(left2)\n \n completiontrans8 = CompletionTrans() .set_name(\"completiontrans8\")\n completiontrans8 .add_sources(right) .add_destinations(right2)\n \n completiontrans9 = CompletionTrans(2) .set_name(\"completiontrans9\")\n completiontrans9 .add_sources(speak,left2,right2) .add_destinations(animate)\n \n completiontrans10 = CompletionTrans() .set_name(\"completiontrans10\")\n completiontrans10 .add_sources(animate) .add_destinations(done)\n \n failuretrans8 = FailureTrans() .set_name(\"failuretrans8\")\n failuretrans8 .add_sources(animate) .add_destinations(done)\n \n return self\n\n\n\n\"\"\"\n\nclass PilotBase(StateNode):\n def __init__(self, verbose=False):\n super().__init__()\n self.verbose = verbose\n self.handle = None\n self.arc_radius = 40\n self.max_turn = pi\n\n def stop(self):\n if self.handle:\n self.handle.cancel()\n self.handle = None\n super().stop()\n\n def planner(self):\n raise ValueError('No planner specified')\n\n def calculate_arc(self, cur_x, cur_y, cur_q, dest_x, dest_y):\n # Compute arc node parameters to get us on a heading toward node_j.\n direct_turn_angle = wrap_angle(atan2(dest_y-cur_y, dest_x-cur_x) - cur_q)\n # find center of arc we'll be moving along\n dir = +1 if direct_turn_angle >=0 else -1\n cx = cur_x + self.arc_radius * cos(cur_q + dir*pi/2)\n cy = cur_y + self.arc_radius * sin(cur_q + dir*pi/2)\n dx = cx - dest_x\n dy = cy - dest_y\n center_dist = sqrt(dx*dx + dy*dy)\n if center_dist < self.arc_radius: # turn would be too wide: punt\n if self.verbose:\n print('*** TURN TOO WIDE ***, center_dist =',center_dist)\n center_dist = self.arc_radius\n # tangent points on arc: outer tangent formula from Wikipedia with r=0\n gamma = atan2(dy, dx)\n beta = asin(self.arc_radius / center_dist)\n alpha1 = gamma + beta\n tang_x1 = cx + self.arc_radius * cos(alpha1 + pi/2)\n tang_y1 = cy + self.arc_radius * sin(alpha1 + pi/2)\n tang_q1 = (atan2(tang_y1-cy, tang_x1-cx) + dir*pi/2)\n turn1 = tang_q1 - cur_q\n if dir * turn1 < 0:\n turn1 += dir * 2 * pi\n alpha2 = gamma - beta\n tang_x2 = cx + self.arc_radius * cos(alpha2 - pi/2)\n tang_y2 = cy + self.arc_radius * sin(alpha2 - pi/2)\n tang_q2 = (atan2(tang_y2-cy, tang_x2-cx) + dir*pi/2)\n turn2 = tang_q2 - cur_q\n if dir * turn2 < 0:\n turn2 += dir * 2 * pi\n # Correct tangent point has shortest turn.\n if abs(turn1) < abs(turn2):\n (tang_x,tang_y,tang_q,turn) = (tang_x1,tang_y1,tang_q1,turn1)\n else:\n (tang_x,tang_y,tang_q,turn) = (tang_x2,tang_y2,tang_q2,turn2)\n return (dir*self.arc_radius, turn)\n\n async def drive_arc(self,radius,angle):\n speed = 50\n l_wheel_speed = speed * (1 - wheelbase / radius)\n r_wheel_speed = speed * (1 + wheelbase / radius)\n last_heading = self.robot.pose.rotation.angle_z.degrees\n traveled = 0\n cor = self.robot.drive_wheels(l_wheel_speed, r_wheel_speed)\n self.handle = self.robot.loop.create_task(cor)\n while abs(traveled) < abs(angle):\n await asyncio.sleep(0.05)\n p0 = last_heading\n p1 = self.robot.pose.rotation.angle_z.degrees\n last_heading = p1\n diff = p1 - p0\n if diff < -90.0:\n diff += 360.0\n elif diff > 90.0:\n diff -= 360.0\n traveled += diff\n self.handle.cancel()\n self.handle = None\n self.robot.stop_all_motors()\n if self.verbose:\n print('drive_arc angle=',angle,'deg., traveled=',traveled,'deg.')\n\nclass PilotToPoseOld(PilotBase):\n def __init__(self, target_pose=None, verbose=False):\n super().__init__(verbose)\n self.target_pose = target_pose\n\n def planner(self,start_node,goal_node):\n return self.robot.world.rrt.plan_path(start_node,goal_node)\n\n def start(self,event=None):\n super().start(event)\n if self.target_pose is None:\n self.post_failure()\n return\n (pose_x, pose_y, pose_theta) = self.robot.world.particle_filter.pose\n start_node = RRTNode(x=pose_x, y=pose_y, q=pose_theta)\n tpose = self.target_pose\n goal_node = RRTNode(x=tpose.position.x, y=tpose.position.y,\n q=tpose.rotation.angle_z.radians)\n\n if self.robot.world.path_viewer:\n self.robot.world.path_viewer.clear()\n try:\n (treeA, treeB, path) = self.planner(start_node, goal_node)\n except StartCollides as e:\n print('Start collides!',e)\n self.post_event(PilotEvent(StartCollides, e.args))\n self.post_failure()\n return\n except GoalCollides as e:\n print('Goal collides!',e)\n self.post_event(PilotEvent(GoalCollides, e.args))\n self.post_failure()\n return\n except MaxIterations as e:\n print('Max iterations %d exceeded!' % e.args[0])\n self.post_event(PilotEvent(MaxIterations, e.args))\n self.post_failure()\n return\n\n if self.verbose:\n print(len(treeA)+len(treeB),'nodes')\n if self.robot.world.path_viewer:\n self.robot.world.path_viewer.add_tree(path, (1,0,0,0.75))\n\n # Construct and execute nav plan\n if self.verbose:\n [print(x) for x in path]\n self.plan = PathPlanner.from_path(path)\n if self.verbose:\n print('Navigation Plan:')\n [print(y) for y in self.plan.steps]\n self.robot.loop.create_task(self.execute_plan())\n\n async def execute_plan(self):\n print('-------- Executing Nav Plan --------')\n for step in self.plan.steps[1:]:\n if not self.running: return\n self.robot.world.particle_filter.variance_estimate()\n (cur_x,cur_y,cur_hdg) = self.robot.world.particle_filter.pose\n if step.type == NavStep.HEADING:\n (targ_x, targ_y, targ_hdg) = step.params\n # Equation of the line y=ax+c through the target pose\n a = min(1000, max(-1000, math.tan(targ_hdg)))\n c = targ_y - a * targ_x\n # Equation of the line y=bx+d through the present pose\n b = min(1000, max(-1000, math.tan(cur_hdg)))\n d = cur_y - b * cur_x\n # Intersection point\n int_x = (d-c) / (a-b) if abs(a-b) > 1e-5 else math.nan\n int_y = a * int_x + c\n dx = int_x - cur_x\n dy = int_y - cur_y\n dist = sqrt(dx*dx + dy*dy)\n if abs(wrap_angle(atan2(dy,dx) - cur_hdg)) > pi/2:\n dist = - dist\n dist += -center_of_rotation_offset\n if self.verbose:\n print('PRE-TURN: cur=(%.1f,%.1f) @ %.1f deg., int=(%.1f, %.1f) dist=%.1f' %\n (cur_x, cur_y, cur_hdg*180/pi, int_x, int_y, dist))\n if abs(dist) < 2:\n if self.verbose:\n print(' ** SKIPPED **')\n else:\n await self.robot.drive_straight(distance_mm(dist),\n speed_mmps(50)).wait_for_completed()\n (cur_x,cur_y,cur_hdg) = self.robot.world.particle_filter.pose\n turn_angle = wrap_angle(targ_hdg - cur_hdg)\n if self.verbose:\n print('TURN: cur=(%.1f,%.1f) @ %.1f deg., targ=(%.1f,%.1f) @ %.1f deg, turn_angle=%.1f deg.' %\n (cur_x, cur_y, cur_hdg*180/pi,\n targ_x, targ_y, targ_hdg*180/pi, turn_angle*180/pi))\n await self.robot.turn_in_place(cozmo.util.radians(turn_angle)).wait_for_completed()\n continue\n elif step.type == NavStep.FORWARD:\n (targ_x, targ_y, targ_hdg) = step.params\n dx = targ_x - cur_x\n dy = targ_y - cur_y\n course = atan2(dy,dx)\n turn_angle = wrap_angle(course - cur_hdg)\n if self.verbose:\n print('FWD: cur=(%.1f,%.1f)@%.1f\\N{degree sign} targ=(%.1f,%.1f)@%.1f\\N{degree sign} turn=%.1f\\N{degree sign}' %\n (cur_x,cur_y,cur_hdg*180/pi,\n targ_x,targ_y,targ_hdg*180/pi,turn_angle*180/pi),\n end='')\n sys.stdout.flush()\n if abs(turn_angle) > self.max_turn:\n turn_angle = self.max_turn if turn_angle > 0 else -self.max_turn\n if self.verbose:\n print(' ** TURN ANGLE SET TO', turn_angle*180/pi)\n # *** HACK: skip node if it requires unreasonable turn\n if abs(turn_angle) < 2*pi/180 or abs(wrap_angle(course-targ_hdg)) > pi/2:\n if self.verbose:\n print(' ** SKIPPED TURN **')\n else:\n await self.robot.turn_in_place(cozmo.util.radians(turn_angle)).wait_for_completed()\n if not self.running: return\n (cur_x,cur_y,cur_hdg) = self.robot.world.particle_filter.pose\n dx = targ_x - cur_x\n dy = targ_y - cur_y\n dist = sqrt(dx**2 + dy**2)\n if self.verbose:\n print(' dist=%.1f' % dist)\n await self.robot.drive_straight(distance_mm(dist),\n speed_mmps(50)).wait_for_completed()\n elif step.type == NavStep.ARC:\n (targ_x, targ_y, targ_hdg, radius) = step.params\n if self.verbose:\n print('ARC: cur=(%.1f,%.1f) @ %.1f deg., targ=(%.1f,%.1f), targ_hdg=%.1f deg., radius=%.1f' %\n (cur_x,cur_y,cur_hdg*180/pi,targ_x,targ_y,targ_hdg*180/pi,radius))\n (actual_radius, actual_angle) = \\\n self.calculate_arc(cur_x, cur_y, cur_hdg, targ_x, targ_y)\n if self.verbose:\n print(' ** actual_radius =', actual_radius, ' actual_angle=', actual_angle*180/pi)\n await self.drive_arc(actual_radius, math.degrees(abs(actual_angle)))\n else:\n raise ValueError('Invalid NavStep',step)\n if self.verbose:\n print('done executing')\n self.post_completion()\n\"\"\"\n"
},
{
"alpha_fraction": 0.6367231607437134,
"alphanum_fraction": 0.6581920981407166,
"avg_line_length": 41.14285659790039,
"blob_id": "9ca0728a6bbfd2662a88f86d543de10bb53a01f0",
"content_id": "1f771f06b0479b390cca5e69a038ce987c4cf779",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1770,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 42,
"path": "/cozmo_fsm/examples/BackItUp.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n The BackItUp demo illustrates the use of fork/join to launch\n parallel actions and synchronize them again. The fork is performed\n by the NullTrans transition with two destinations, while the join is\n performed by the CompletionTrans transition with two sources.\n\n Behavior: Cozmo backs up by 100 mm while simultaneously beeping. He\n uses DriveForward instead of Forward to avoid conflict with the Say\n action. When he's done backing up, he stops beeping and says 'Safety first'.\n\"\"\"\n\nfrom cozmo_fsm import *\n\nclass BackItUp(StateMachineProgram):\n def setup(self):\n \"\"\"\n launcher: StateNode() =N=> {driver, speaker}\n \n driver: Forward(-100,10)\n speaker: Say('beep',duration_scalar=0.8,abort_on_stop=True) =C=> speaker\n \n {driver,speaker} =C=> finisher: Say('Safety first!')\n \n \"\"\"\n \n # Code generated by genfsm on Mon Feb 17 03:10:16 2020:\n \n launcher = StateNode() .set_name(\"launcher\") .set_parent(self)\n driver = Forward(-100,10) .set_name(\"driver\") .set_parent(self)\n speaker = Say('beep',duration_scalar=0.8,abort_on_stop=True) .set_name(\"speaker\") .set_parent(self)\n finisher = Say('Safety first!') .set_name(\"finisher\") .set_parent(self)\n \n nulltrans1 = NullTrans() .set_name(\"nulltrans1\")\n nulltrans1 .add_sources(launcher) .add_destinations(driver,speaker)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(speaker) .add_destinations(speaker)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(driver,speaker) .add_destinations(finisher)\n \n return self\n"
},
{
"alpha_fraction": 0.5290983319282532,
"alphanum_fraction": 0.5481557250022888,
"avg_line_length": 41.068965911865234,
"blob_id": "68f2bafc448cbb4ba497a7658566591b4345953b",
"content_id": "dc0e374b5ba7d4743a8b3bc20fd9ceb949409422",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4880,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 116,
"path": "/cozmo_fsm/aruco.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "try: import cv2\nexcept: pass\n\nimport math\nfrom numpy import sqrt, arctan2, array, multiply\n\nARUCO_MARKER_SIZE = 44\n\nclass ArucoMarker(object):\n def __init__(self, aruco_parent, marker_id, bbox, translation, rotation):\n self.id = marker_id\n self.id_string = 'Aruco-' + str(marker_id)\n self.bbox = bbox\n self.aruco_parent = aruco_parent\n\n # OpenCV Pose information\n self.opencv_translation = translation\n self.opencv_rotation = (180/math.pi)*rotation\n\n # Marker coordinates in robot's camera reference frame\n self.camera_coords = (-translation[0], -translation[1], translation[2])\n\n # Distance in the x-y plane; particle filter ignores height so don't include it\n self.camera_distance = math.sqrt(translation[0]*translation[0] +\n # translation[1]*translation[1] +\n translation[2]*translation[2])\n # Conversion to euler angles\n self.euler_rotation = self.rotationMatrixToEulerAngles(\n cv2.Rodrigues(rotation)[0])*(180/math.pi)\n\n def __str__(self):\n return \"<ArucoMarker id=%d trans=(%d,%d,%d) rot=(%d,%d,%d) erot=(%d,%d,%d)>\" % \\\n (self.id, *self.opencv_translation, *self.opencv_rotation, *self.euler_rotation)\n\n def __repr__(self):\n return self.__str__()\n\n @staticmethod\n def rotationMatrixToEulerAngles(R) :\n sy = sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n if not singular:\n x = arctan2(R[2,1] , R[2,2])\n y = arctan2(-R[2,0], sy)\n z = arctan2(R[1,0], R[0,0])\n else:\n x = arctan2(-R[1,2], R[1,1])\n y = arctan2(-R[2,0], sy)\n z = 0\n\n return array([x, y, z])\n\nclass Aruco(object):\n def __init__(self, robot, arucolibname, marker_size=ARUCO_MARKER_SIZE, disabled_ids=[]):\n self.arucolibname = arucolibname\n if arucolibname is not None:\n self.aruco_lib = cv2.aruco.getPredefinedDictionary(arucolibname)\n self.seen_marker_ids = []\n self.seen_marker_objects = dict()\n self.disabled_ids = disabled_ids # disable markers with high false detection rates\n self.ids = []\n self.corners = []\n\n if robot.camera is None: return # robot is a SimRobot\n\n # Added for pose estimation\n self.marker_size = marker_size #these units will be pose est units!!\n self.image_size = (320,240)\n focal_len = robot.camera._config._focal_length\n self.camera_matrix = \\\n array([[focal_len.x , 0, self.image_size[0]/2],\n [0, -focal_len.y, self.image_size[1]/2],\n [0, 0, 1]]).astype(float)\n self.distortion_array = array([[0,0,0,0,0]]).astype(float)\n\n def process_image(self,gray):\n self.seen_marker_ids = []\n self.seen_marker_objects = dict()\n (self.corners,self.ids,_) = \\\n cv2.aruco.detectMarkers(gray, self.aruco_lib)\n if self.ids is None: return\n\n # Estimate poses\n # Warning: OpenCV 3.2 estimate returns a pair; 3.3 returns a triplet\n estimate = \\\n cv2.aruco.estimatePoseSingleMarkers(self.corners,\n self.marker_size,\n self.camera_matrix,\n self.distortion_array)\n\n self.rvecs = estimate[0]\n self.tvecs = estimate[1]\n for i in range(len(self.ids)):\n id = int(self.ids[i][0])\n if id in self.disabled_ids: continue\n tvec = self.tvecs[i][0]\n rvec = self.rvecs[i][0]\n if rvec[2] > math.pi/2 or rvec[2] < -math.pi/2:\n # can't see a marker facing away from us, so bogus\n print('Marker rejected! id=', id, 'tvec=', tvec, 'rvec=', rvec)\n continue\n marker = ArucoMarker(self, id,\n self.corners[i], self.tvecs[i][0], self.rvecs[i][0])\n self.seen_marker_ids.append(marker.id)\n self.seen_marker_objects[marker.id] = marker\n\n def annotate(self, image, scale_factor):\n scaled_corners = [ multiply(corner, scale_factor) for corner in self.corners ]\n displayim = cv2.aruco.drawDetectedMarkers(image, scaled_corners, self.ids)\n\n #add poses currently fails since image is already scaled. How to scale camMat?\n #if(self.ids is not None):\n # for i in range(len(self.ids)):\n # displayim = cv2.aruco.drawAxis(displayim,self.cameraMatrix,\n # self.distortionArray,self.rvecs[i],self.tvecs[i]*scale_factor,self.axisLength*scale_factor)\n return displayim\n"
},
{
"alpha_fraction": 0.5570614337921143,
"alphanum_fraction": 0.581549346446991,
"avg_line_length": 31.79292869567871,
"blob_id": "d582534bc4730730e8e1524bc458e1348eb6f645",
"content_id": "3b22a475a30a93477d092dc99b03c167b5367a4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6493,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 198,
"path": "/cozmo_fsm/cam_viewer.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nOpenGL based CamViewer\n\"\"\"\n\nimport numpy as np\nimport math\nimport random\nimport time\nimport cozmo\nfrom cozmo.util import degrees, distance_mm, speed_mmps\n\ntry:\n import cv2\n from PIL import Image\n from OpenGL.GLUT import *\n from OpenGL.GL import *\n from OpenGL.GLU import *\nexcept:\n pass\n\nfrom . import opengl\nfrom . import program\n\n#For capturing images\nglobal snapno, path\nsnapno = 0\npath = 'snap/'\n\nWINDOW = None\n\nclass CamViewer():\n def __init__(self, robot, width=640, height=480,\n windowName=\"Cozmo's World\",\n bgcolor=(0, 0, 0)):\n self.robot = robot\n self.width = width\n self.height = height\n self.aspect = self.width/self.height\n self.windowName = windowName\n self.bgcolor = bgcolor\n self.scale = 1\n self.show_axes = True\n self.show_memory_map = False\n\n def process_image(self):\n raw = self.robot.world.latest_image.raw_image\n curim = np.array(raw)\n gray = cv2.cvtColor(curim,cv2.COLOR_BGR2GRAY)\n\n running_fsm = program.running_fsm\n\n # Aruco image processing\n if running_fsm.aruco:\n running_fsm.robot.world.aruco.process_image(gray)\n # Other image processors can run here if the user supplies them.\n running_fsm.user_image(curim,gray)\n # Done with image processing\n\n # Annotate and display image if requested\n if running_fsm.force_annotation or running_fsm.cam_viewer is not None:\n scale = running_fsm.annotated_scale_factor\n # Apply Cozmo SDK annotations and rescale.\n if running_fsm.annotate_sdk:\n coz_ann = self.robot.world.latest_image.annotate_image(scale=scale)\n annotated_im = np.array(coz_ann)\n elif scale != 1:\n shape = curim.shape\n dsize = (scale*shape[1], scale*shape[0])\n annotated_im = cv2.resize(curim, dsize)\n else:\n annotated_im = curim\n # Aruco annotation\n if running_fsm.aruco and \\\n len(running_fsm.robot.world.aruco.seen_marker_ids) > 0:\n annotated_im = running_fsm.robot.world.aruco.annotate(annotated_im,scale)\n # Other annotators can run here if the user supplies them.\n annotated_im = running_fsm.user_annotate(annotated_im)\n # Done with annotation\n # Yellow viewer crosshairs\n if running_fsm.viewer_crosshairs:\n shape = annotated_im.shape\n cv2.line(annotated_im, (int(shape[1]/2),0), (int(shape[1]/2),shape[0]), (0,255,255), 1)\n cv2.line(annotated_im, (0,int(shape[0]/2)), (shape[1],int(shape[0]/2)), (0,255,255), 1)\n image = annotated_im\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, self.width, self.height,0,GL_RGB, GL_UNSIGNED_BYTE, image)\n glutPostRedisplay()\n\n # ================ Window Setup ================\n def window_creator(self):\n global WINDOW\n #glutInit(sys.argv)\n WINDOW = opengl.create_window(\n bytes(self.windowName, 'utf-8'), (self.width, self.height))\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(self.width, self.height)\n glutInitWindowPosition(100, 100)\n glClearColor(0.0, 0.0, 0.0, 1.0)\n glutDisplayFunc(self.display)\n glutReshapeFunc(self.reshape)\n glutKeyboardFunc(self.keyPressed)\n glutSpecialFunc(self.specialKeyPressed)\n glutSpecialUpFunc(self.specialKeyUp)\n\n def start(self): # Displays in background\n if not WINDOW:\n opengl.init()\n opengl.CREATION_QUEUE.append(self.window_creator)\n\n def display(self):\n self.process_image()\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glEnable(GL_TEXTURE_2D)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n\n # Set Projection Matrix\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0, self.width, 0, self.height)\n\n glMatrixMode(GL_TEXTURE)\n glLoadIdentity()\n glScalef(1.0, -1.0, 1.0)\n glMatrixMode(GL_MODELVIEW)\n glBegin(GL_QUADS)\n glTexCoord2f(0.0, 0.0)\n glVertex2f(0.0, 0.0)\n glTexCoord2f(1.0, 0.0)\n glVertex2f(self.width, 0.0)\n glTexCoord2f(1.0, 1.0)\n glVertex2f(self.width, self.height)\n glTexCoord2f(0.0, 1.0)\n glVertex2f(0.0, self.height)\n glEnd()\n\n glFlush()\n glutSwapBuffers()\n\n def reshape(self, w, h):\n if h == 0:\n h = 1\n\n glViewport(0, 0, w, h)\n glMatrixMode(GL_PROJECTION)\n\n glLoadIdentity()\n nRange = 1.0\n if w <= h:\n glOrtho(-nRange, nRange, -nRange*h/w, nRange*h/w, -nRange, nRange)\n else:\n glOrtho(-nRange*w/h, nRange*w/h, -nRange, nRange, -nRange, nRange)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n def keyPressed(self, key, x, y):\n if ord(key) == 27:\n print(\"Use 'exit' to quit.\")\n #return\n if key == b'c':\n print(\"Taking a snap\")\n self.capture()\n self.display()\n\n def specialKeyPressed(self, key, x, y):\n global leftorrightindicate, globthres\n if key == GLUT_KEY_LEFT:\n self.robot.drive_wheels(-100, 100)\n leftorrightindicate = True\n globthres=100\n elif key == GLUT_KEY_RIGHT:\n self.robot.drive_wheels(100, -100)\n leftorrightindicate = True\n globthres = 100\n elif key == GLUT_KEY_UP:\n self.robot.drive_wheels(200, 200)\n leftorrightindicate = False\n globthres = 100\n elif key == GLUT_KEY_DOWN:\n self.robot.drive_wheels(-200, -200)\n leftorrightindicate = True\n globthres = 100\n glutPostRedisplay()\n\n def specialKeyUp(self, key, x, y):\n global leftorrightindicate, go_forward\n self.robot.drive_wheels(0, 0)\n leftorrightindicate = True\n go_forward = GLUT_KEY_UP\n glutPostRedisplay()\n\n def capture(self, name='cozmo_snap'):\n global snapno, path\n if not os.path.exists(path):\n os.makedirs(path)\n\n image = np.array(self.robot.world.latest_image.raw_image)\n Image.fromarray(image).save(path + '/' + name + str(snapno) + '.jpg')\n snapno +=1\n"
},
{
"alpha_fraction": 0.5864421129226685,
"alphanum_fraction": 0.5924276113510132,
"avg_line_length": 37.11140441894531,
"blob_id": "864fa9f5ca58482e39c4229ba854a642f21cbe10",
"content_id": "955ec50a49fc6b7877bd0d9d3fed132c7f71a2e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14368,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 377,
"path": "/cozmo_fsm/program.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport functools\nimport inspect\nimport os\nimport time\n\nimport numpy\nimport numpy as np\n\ntry:\n import cv2\n ARUCO_DICT_4x4_100 = cv2.aruco.DICT_4X4_100\nexcept:\n ARUCO_DICT_4x4_100 = None\n\nimport cozmo\nfrom cozmo.util import degrees, distance_mm, speed_mmps\nfrom .evbase import EventRouter\nfrom .base import StateNode\nfrom .aruco import *\nfrom .particle import *\nfrom .cozmo_kin import *\nfrom .particle_viewer import ParticleViewer\nfrom .worldmap import WorldMap\nfrom .rrt import RRT\nfrom .path_viewer import PathViewer\nfrom .worldmap_viewer import WorldMapViewer\nfrom .cam_viewer import CamViewer\nfrom .speech import SpeechListener, Thesaurus\nfrom . import opengl\nfrom . import custom_objs\nfrom .perched import *\nfrom .sharedmap import *\nfrom .cam_viewer import CamViewer\n\nrunning_fsm = None\ncharger_warned = False\n\nclass StateMachineProgram(StateNode):\n def __init__(self,\n kine_class = CozmoKinematics,\n cam_viewer = True,\n force_annotation = False, # set to True for annotation even without cam_viewer\n annotate_sdk = True, # include SDK's own image annotations\n annotated_scale_factor = 2, # set to 1 to avoid cost of resizing images\n viewer_crosshairs = False, # set to True to draw viewer crosshairs\n\n particle_filter = True,\n landmark_test = SLAMSensorModel.is_solo_aruco_landmark,\n particle_viewer = False,\n particle_viewer_scale = 1.0,\n\n aruco = True,\n arucolibname = ARUCO_DICT_4x4_100,\n aruco_disabled_ids = (17, 37),\n aruco_marker_size = ARUCO_MARKER_SIZE,\n\n perched_cameras = False,\n\n world_map = None,\n worldmap_viewer = False,\n\n rrt = None,\n path_viewer = False,\n\n speech = False,\n speech_debug = False,\n thesaurus = Thesaurus(),\n\n simple_cli_callback = None\n ):\n super().__init__()\n self.name = self.__class__.__name__.lower()\n self.parent = None\n self.simple_cli_callback = simple_cli_callback\n\n if not hasattr(self.robot, 'erouter'):\n self.robot.erouter = EventRouter()\n self.robot.erouter.robot = self.robot\n self.robot.erouter.start()\n else:\n self.robot.erouter.clear()\n\n # Reset custom objects\n cor = self.robot.world.undefine_all_custom_marker_objects()\n if inspect.iscoroutine(cor):\n asyncio.ensure_future(cor)\n self.robot.loop.create_task(custom_objs.declare_objects(self.robot))\n time.sleep(0.25) # need time for custom objects to be transmitted\n\n self.kine_class = kine_class\n\n self.cam_viewer = cam_viewer\n self.viewer = None\n self.annotate_sdk = annotate_sdk\n self.force_annotation = force_annotation\n self.annotated_scale_factor = annotated_scale_factor\n self.viewer_crosshairs = viewer_crosshairs\n\n self.particle_filter = particle_filter\n self.landmark_test = landmark_test\n self.particle_viewer = particle_viewer\n self.particle_viewer_scale = particle_viewer_scale\n self.picked_up_callback = self.robot_picked_up\n self.put_down_handler = self.robot_put_down\n\n self.aruco = aruco\n self.aruco_marker_size = aruco_marker_size\n if self.aruco:\n self.robot.world.aruco = \\\n Aruco(self.robot, arucolibname, aruco_marker_size, aruco_disabled_ids)\n\n self.perched_cameras = perched_cameras\n if self.perched_cameras:\n self.robot.world.perched = PerchedCameraThread(self.robot)\n\n self.robot.aruco_id = -1\n self.robot.use_shared_map = False\n self.robot.world.server = ServerThread(self.robot)\n self.robot.world.client = ClientThread(self.robot)\n self.robot.world.is_server = True # Writes directly into perched.camera_pool\n\n self.world_map = world_map\n self.worldmap_viewer = worldmap_viewer\n\n self.rrt = rrt\n self.path_viewer = path_viewer\n\n self.speech = speech\n self.speech_debug = speech_debug\n self.thesaurus = thesaurus\n\n def start(self):\n global running_fsm\n running_fsm = self\n # Create a particle filter\n if not isinstance(self.particle_filter,ParticleFilter):\n self.particle_filter = SLAMParticleFilter(self.robot, landmark_test=self.landmark_test)\n elif isinstance(self.particle_filter,SLAMParticleFilter):\n self.particle_filter.clear_landmarks()\n pf = self.particle_filter\n self.robot.world.particle_filter = pf\n\n # Set up kinematics\n self.robot.kine = self.kine_class(self.robot)\n self.robot.was_picked_up = False\n self.robot.carrying = None\n self.robot.fetching = None\n\n # robot.is_picked_up uses just the cliff detector, and can be fooled.\n # robot.pose.rotation does not encode pitch or roll, only yaw.\n # So use accelerometer data as our backup method.\n self.robot.really_picked_up = \\\n (lambda robot :\n (lambda :\n robot.is_picked_up\n or (not robot.is_moving\n and (robot.accelerometer.z < 5000\n or robot.accelerometer.z > 13000))))(self.robot)\n# and (robot.accelerometer.z < 5000\n# or robot.accelerometer.z > 10300))))(self.robot)\n\n # World map and path planner\n self.robot.enable_facial_expression_estimation(True)\n self.robot.world.world_map = \\\n self.world_map or WorldMap(self.robot)\n self.robot.world.world_map.clear()\n self.robot.world.rrt = self.rrt or RRT(self.robot)\n\n if self.simple_cli_callback:\n self.make_cubes_available()\n\n # Polling\n self.set_polling_interval(0.025) # for kine and motion model update\n\n # Launch viewers\n if self.cam_viewer:\n if self.cam_viewer is True:\n self.cam_viewer = CamViewer(self.robot)\n self.cam_viewer.start()\n self.robot.world.cam_viewer = self.cam_viewer\n\n if self.particle_viewer:\n if self.particle_viewer is True:\n self.particle_viewer = \\\n ParticleViewer(self.robot, scale=self.particle_viewer_scale)\n self.particle_viewer.start()\n self.robot.world.particle_viewer = self.particle_viewer\n\n if self.path_viewer:\n if self.path_viewer is True:\n self.path_viewer = PathViewer(self.robot, self.robot.world.rrt)\n else:\n self.path_viewer.set_rrt(self.robot.world.rrt)\n self.path_viewer.start()\n self.robot.world.path_viewer = self.path_viewer\n\n if self.worldmap_viewer:\n if self.worldmap_viewer is True:\n self.worldmap_viewer = WorldMapViewer(self.robot)\n self.worldmap_viewer.start()\n self.robot.world.worldmap_viewer = self.worldmap_viewer\n\n # Request camera image and object recognition streams\n self.robot.camera.image_stream_enabled = True\n self.robot.world.add_event_handler(cozmo.world.EvtNewCameraImage,\n self.process_image)\n\n self.robot.world.add_event_handler(\n cozmo.objects.EvtObjectObserved,\n self.robot.world.world_map.handle_object_observed)\n\n # Set up cube motion detection\n cubes = self.robot.world.light_cubes\n for i in cubes:\n cubes[i].movement_start_time = None\n\n self.robot.world.add_event_handler(\n cozmo.objects.EvtObjectMovingStarted,\n self.robot.world.world_map.handle_object_move_started)\n\n self.robot.world.add_event_handler(\n cozmo.objects.EvtObjectMovingStopped,\n self.robot.world.world_map.handle_object_move_stopped)\n\n # Start speech recognition if requested\n if self.speech:\n self.speech_listener = SpeechListener(self.robot,self.thesaurus,debug=self.speech_debug)\n self.speech_listener.start()\n\n # Call parent's start() to launch the state machine by invoking the start node.\n super().start()\n\n def make_cubes_available(self):\n # Make worldmap cubes and charger accessible to simple_cli\n cubes = self.robot.world.light_cubes\n wc1 = wc2 = wc3 = wcharger = None\n if 1 in cubes:\n wc1 = self.robot.world.world_map.update_cube(cubes[1])\n if 2 in cubes:\n wc2 = self.robot.world.world_map.update_cube(cubes[2])\n if 3 in cubes:\n wc3 = self.robot.world.world_map.update_cube(cubes[3])\n if self.robot.world.charger is not None:\n wcharger = self.robot.world.world_map.update_charger()\n self.simple_cli_callback(wc1, wc2, wc3, wcharger)\n\n def robot_picked_up(self):\n print('** Robot was picked up!', self.robot.accelerometer)\n self.robot.stop_all_motors()\n self.run_picked_up_handler(self)\n\n def run_picked_up_handler(self,node):\n \"\"\"Complex state machines use a picked_up_handler to abort the machine\n gracefully, usually by posting a failure event from the parent.\"\"\"\n if node.running and hasattr(node,'picked_up_handler'):\n node.picked_up_handler()\n else:\n for child in node.children.values():\n self.run_picked_up_handler(child)\n\n def robot_put_down(self):\n print('** Robot was put down.')\n pf = self.robot.world.particle_filter\n pf.delocalize()\n\n def stop(self):\n super().stop()\n self.robot.erouter.clear()\n try:\n self.robot.world.remove_event_handler(cozmo.world.EvtNewCameraImage,\n self.process_image)\n except: pass\n\n def poll(self):\n global charger_warned\n # Invalidate cube pose if cube has been moving and isn't seen\n move_duration_regular_threshold = 0.5 # seconds\n move_duration_fetch_threshold = 1 # seconds\n cubes = self.robot.world.light_cubes\n now = None\n for i in cubes:\n cube = cubes[i]\n if self.robot.carrying and self.robot.carrying.sdk_obj is cube:\n continue\n if cube.movement_start_time is not None and not cube.is_visible:\n now = now or time.time()\n if self.robot.fetching and self.robot.fetching.sdk_obj is cube:\n threshold = move_duration_fetch_threshold\n else:\n threshold = move_duration_regular_threshold\n if (now - cube.movement_start_time) > threshold:\n cube_id = 'Cube-' + str(i)\n wcube = self.robot.world.world_map.objects[cube_id]\n print('Invalidating pose of', wcube)\n wcube.pose_confidence = -1\n cube.movement_start_time = None\n\n if self.simple_cli_callback:\n self.make_cubes_available()\n\n # Update robot kinematic description\n self.robot.kine.get_pose()\n\n # Handle robot being picked up or put down\n if self.robot.really_picked_up():\n # robot is in the air\n if self.robot.was_picked_up:\n pass # we already knew that\n else:\n self.picked_up_callback()\n else: # robot is on the ground\n pf = self.robot.world.particle_filter\n if pf:\n if self.robot.was_picked_up:\n self.put_down_handler()\n else:\n pf.move()\n self.robot.was_picked_up = self.robot.really_picked_up()\n\n # Handle robot being placed on the charger\n if self.robot.is_on_charger:\n if not charger_warned:\n print(\"\\n** On charger. Type robot.drive_off_charger_contacts() to enable motion.\")\n charger_warned = True\n else:\n charger_warned = False\n\n def user_image(self,image,gray): pass\n\n def user_annotate(self,image):\n return image\n\n def process_image(self,event,**kwargs):\n if self.cam_viewer:\n # if show cam_viewer, run the process_image under cam_viewer\n pass\n else:\n curim = numpy.array(event.image.raw_image) #cozmo-raw image\n gray = cv2.cvtColor(curim,cv2.COLOR_BGR2GRAY)\n\n # Aruco image processing\n if self.aruco:\n self.robot.world.aruco.process_image(gray)\n # Other image processors can run here if the user supplies them.\n self.user_image(curim,gray)\n # Done with image processing\n\n # Annotate and display image if requested\n if self.force_annotation or self.viewer is not None:\n scale = self.annotated_scale_factor\n # Apply Cozmo SDK annotations and rescale.\n if self.annotate_sdk:\n coz_ann = event.image.annotate_image(scale=scale)\n annotated_im = numpy.array(coz_ann)\n elif scale != 1:\n shape = curim.shape\n dsize = (scale*shape[1], scale*shape[0])\n annotated_im = cv2.resize(curim, dsize)\n else:\n annotated_im = curim\n # Aruco annotation\n if self.aruco and \\\n len(self.robot.world.aruco.seen_marker_ids) > 0:\n annotated_im = self.robot.world.aruco.annotate(annotated_im,scale)\n # Other annotators can run here if the user supplies them.\n annotated_im = self.user_annotate(annotated_im)\n # Done with annotation\n annotated_im = cv2.cvtColor(annotated_im,cv2.COLOR_RGB2BGR)\n\n # Use this heartbeat signal to look for new landmarks\n pf = self.robot.world.particle_filter\n if pf and not self.robot.really_picked_up():\n pf.look_for_new_landmarks()\n\n # Finally update the world map\n self.robot.world.world_map.update_map()\n"
},
{
"alpha_fraction": 0.78125,
"alphanum_fraction": 0.78125,
"avg_line_length": 25.5,
"blob_id": "d2949cbaed91ddc3ea9d0090ee2c786520f7c676",
"content_id": "de36d44253a25baa763d9360cd10438fa8f197da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 6,
"path": "/cozmo_fsm/README.md",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "# cozmo_fsm\n\n## Finite State Machine package for Cozmo, inspired by Tekkotsu.\n\nThis package is still in development. More documentation will be\nprovided soon.\n\n"
},
{
"alpha_fraction": 0.5964567065238953,
"alphanum_fraction": 0.6338582634925842,
"avg_line_length": 32.86666488647461,
"blob_id": "1829ae63f0cdee468596f843867eece3e3c143b1",
"content_id": "12a69c43d5e1fd467dc9b5973fcdff75e889a901",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1524,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 45,
"path": "/cozmo_fsm/examples/CV_Canny.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n CV_Canny demonstrates image thresholding in OpenCV, and\n independently, the Canny edge detector.\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom cozmo_fsm import *\n\nclass CV_Canny(StateMachineProgram):\n def __init__(self):\n super().__init__(aruco=False, particle_filter=False, cam_viewer=False,\n annotate_sdk=False)\n\n def start(self):\n dummy = numpy.array([[0]], dtype='uint8')\n super().start()\n\n cv2.namedWindow('edges')\n cv2.imshow('edges',dummy)\n\n cv2.namedWindow('threshold')\n cv2.imshow('threshold',dummy)\n\n cv2.createTrackbar('thresh','threshold',0,255,lambda self: None)\n cv2.setTrackbarPos('thresh', 'threshold', 100)\n\n cv2.createTrackbar('thresh1','edges',0,255,lambda self: None)\n cv2.createTrackbar('thresh2','edges',0,255,lambda self: None)\n cv2.setTrackbarPos('thresh1', 'edges', 50)\n cv2.setTrackbarPos('thresh2', 'edges', 150)\n\n def user_image(self,image,gray):\n cv2.waitKey(1)\n # Thresholding\n self.thresh = cv2.getTrackbarPos('thresh','threshold')\n ret, self.im_thresh = cv2.threshold(gray, self.thresh, 255, cv2.THRESH_BINARY)\n\n # Canny edge detection\n self.thresh1 = cv2.getTrackbarPos('thresh1','edges')\n self.thresh2 = cv2.getTrackbarPos('thresh2','edges')\n self.im_edges = cv2.Canny(gray, self.thresh1, self.thresh2, apertureSize=3)\n\n cv2.imshow('threshold',self.im_thresh)\n cv2.imshow('edges',self.im_edges)\n"
},
{
"alpha_fraction": 0.5261793732643127,
"alphanum_fraction": 0.5479195713996887,
"avg_line_length": 37.163002014160156,
"blob_id": "242d8c4bcac58a747e9bf94cbe54537c169f19ee",
"content_id": "76be24844f3b7f22abf793479e925be735d93e56",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20837,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 546,
"path": "/cozmo_fsm/particle_viewer.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nParticle filter display in OpenGL.\n\"\"\"\n\ntry:\n from OpenGL.GLUT import *\n from OpenGL.GL import *\n from OpenGL.GLU import *\nexcept:\n pass\n\nimport time\nimport math\nfrom math import sin, cos, pi, atan2, sqrt\nimport array\nimport numpy as np\nimport platform\n\nimport cozmo\nfrom cozmo.util import distance_mm, speed_mmps, degrees\n\nfrom . import opengl\nfrom .worldmap import ArucoMarkerObj\n\nREDISPLAY = True # toggle this to suspend constant redisplay\nWINDOW = None\n\nhelp_text = \"\"\"\nParticle viewer commands:\n w/a/s/d Drive robot +/- 10 mm or turn +/- 22.5 degrees\n W/A/S/D Drive robot +/- 40 mm or turn +/- 90 degrees\n i/k/I/K Head up/down 5 or 20 degrees\n u/j/U/J Lift up/down 5 or max degrees\n e Evaluate particles using current sensor info\n r Resample particles (evaluates first)\n z Reset particle positions (randomize, or all 0 for SLAM)\n c Clear landmarks (for SLAM)\n o Show objects\n p Show best particle\n arrows Translate the view up/down/left/right\n Home Center the view (zero translation)\n < Zoom in\n > Zoom out\n $ Toggle redisplay (for debugging)\n v Toggle verbosity\n V Display weight variance\n h Print this help text\n\"\"\"\n\nhelp_text_mac = \"\"\"\nParticle viewer commands:\n option + w/a/s/d Drive robot +/- 10 mm or turn +/- 22.5 degrees\n option + W/A/S/D Drive robot +/- 40 mm or turn +/- 90 degrees\n option + i/k Head up/down 5 degrees\n option + I/K Head up/down 20 degrees\n option + e Evaluate particles using current sensor info\n option + r Resample particles (evaluates first)\n option + z Reset particle positions (randomize, or all 0 for SLAM)\n option + c Clear landmarks (for SLAM)\n option + o Show objects\n option + p Show best particle\n arrows Translate the view up/down/left/right\n fn + left-arrow Center the view (zero translation)\n option + < Zoom in\n option + > Zoom out\n option + $ Toggle redisplay (for debugging)\n option + v Toggle verbosity\n option + V Display weight variance\n option + h Print this help text\n\"\"\"\n\n\nclass ParticleViewer():\n def __init__(self, robot,\n width=512, height=512, scale=0.64,\n windowName = \"particle viewer\",\n bgcolor = (0,0,0)):\n self.robot=robot\n self.width = width\n self.height = height\n self.bgcolor = bgcolor\n self.aspect = self.width/self.height\n self.translation = [200., 0.] # Translation in mm\n self.scale = scale\n self.verbose = False\n self.windowName = windowName\n\n def window_creator(self):\n global WINDOW\n WINDOW = opengl.create_window(bytes(self.windowName, 'utf-8'), (self.width,self.height))\n glutDisplayFunc(self.display)\n glutReshapeFunc(self.reshape)\n glutKeyboardFunc(self.keyPressed)\n glutSpecialFunc(self.specialKeyPressed)\n glViewport(0,0,self.width,self.height)\n glClearColor(*self.bgcolor, 0)\n # Enable transparency\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n\n def start(self): # Displays in background\n if not WINDOW:\n opengl.init()\n opengl.CREATION_QUEUE.append(self.window_creator)\n if platform.system() == 'Darwin':\n print(\"Type 'option' + 'h' in the particle viewer window for help.\")\n else:\n print(\"Type 'h' in the particle viewer window for help.\")\n\n def draw_rectangle(self, center, size=(10,10),\n angle=0, color=(1,1,1), fill=True):\n # Default to solid color and square window\n if len(color)==3:\n color = (*color,1)\n\n # Calculate vertices as offsets from center\n w = size[0]/2; h = size[1]/2\n v1 = (-w,-h); v2 = (w,-h); v3 = (w,h); v4 = (-w,h)\n\n # Draw the rectangle\n glPushMatrix()\n if fill:\n glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)\n glColor4f(color[0],color[1],color[2],color[3])\n glTranslatef(*center,0)\n glRotatef(angle,0,0,1)\n glBegin(GL_QUADS)\n glVertex2f(*v1)\n glVertex2f(*v2)\n glVertex2f(*v3)\n glVertex2f(*v4)\n glEnd()\n glPopMatrix()\n\n def draw_triangle(self, center, height=1, angle=0, tip_offset=0,\n color=(1,1,1), fill=True):\n half = height / 2\n aspect = 3/5\n if len(color) == 3:\n color = (*color,1)\n\n glPushMatrix()\n if fill:\n glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)\n glColor4f(*color)\n glTranslatef(*center,0)\n glRotatef(angle,0,0,1)\n glTranslatef(tip_offset,0,0)\n glBegin(GL_TRIANGLES)\n glVertex2f( half, 0.)\n glVertex2f(-half, -aspect*half)\n glVertex2f(-half, aspect*half)\n glEnd()\n glPopMatrix()\n\n def draw_ellipse(self, center, scale, orient=0, color=(1,1,1), fill=False):\n if len(color) == 3:\n color = (*color,1)\n glPushMatrix()\n glTranslatef(*center,0)\n glRotatef(orient,0,0,1)\n glColor4f(*color)\n if fill:\n glBegin(GL_TRIANGLE_FAN)\n glVertex2f(0,0)\n else:\n glBegin(GL_LINE_LOOP)\n for t in range(0,361):\n theta = t/180*pi\n glVertex2f(scale[0]*cos(theta), scale[1]*sin(theta))\n glEnd()\n glPopMatrix()\n\n def draw_wedge(self, center, radius, orient, span, color=(1,1,1), fill=True):\n if len(color) == 3:\n color = (*color,1)\n glPushMatrix()\n glTranslatef(*center,0)\n glRotatef(orient,0,0,1)\n glColor4f(*color)\n if fill:\n glBegin(GL_TRIANGLE_FAN)\n else:\n glBegin(GL_LINE_LOOP)\n glVertex2f(0,0)\n for t in range(round(-span/2), round(span/2)):\n theta = t/180*pi\n glVertex2f(radius*cos(theta), radius*sin(theta))\n glEnd()\n glPopMatrix()\n\n def draw_landmarks(self):\n landmarks = self.robot.world.particle_filter.sensor_model.landmarks.copy()\n if not landmarks: return\n # Extract values as quickly as we can because\n # dictionary can change while we're iterating.\n objs = self.robot.world.world_map.objects.copy()\n arucos = [(marker.id, (np.array([[marker.x], [marker.y]]), marker.theta, None))\n for marker in objs.values()\n if isinstance(marker, ArucoMarkerObj)]\n all_specs = list(landmarks.items()) + \\\n [marker for marker in arucos if marker[0] not in landmarks]\n for (id,specs) in all_specs:\n if not isinstance(id,str):\n raise TypeError(\"Landmark id's must be strings: %r\" % id)\n color = None\n if id.startswith('Aruco-'):\n label = id[6:]\n num = int(label)\n seen = num in self.robot.world.aruco.seen_marker_ids\n elif id.startswith('Cube-'):\n label = id[5:]\n num = int(label)\n cube = self.robot.world.light_cubes[num]\n seen = cube.is_visible\n if seen:\n color = (0.5, 0.3, 1, 0.75)\n else:\n color = (0, 0, 0.5, 0.75)\n elif id.startswith('Wall-'):\n label = 'W' + id[id.find('-')+1:]\n try:\n seen = self.robot.world.world_map.objects[id].is_visible\n except:\n seen = False\n if seen:\n color = (1, 0.5, 0.3, 0.75)\n else:\n color = (0.5, 0, 0, 0.75)\n elif id.startswith('Video'):\n seen = self.robot.aruco_id in self.robot.world.perched.camera_pool and \\\n id in self.robot.world.perched.camera_pool[self.robot.aruco_id]\n label = id\n if color is None:\n if seen:\n color = (0.5, 1, 0.3, 0.75)\n else:\n color = (0, 0.5, 0, 0.75)\n if isinstance(specs, cozmo.util.Pose):\n self.draw_landmark_from_pose(id, specs, label, color)\n else:\n self.draw_landmark_from_particle(id, specs, label, color)\n\n def draw_landmark_from_pose(self, id, specs, label, color):\n coords = (specs.position.x, specs.position.y)\n angle = specs.rotation.angle_z.degrees\n if id.startswith('LightCube'):\n size = (44,44)\n angle_adjust = 0\n else: # Aruco\n size = (20,50)\n angle_adjust = 90\n glPushMatrix()\n glColor4f(*color)\n self.draw_rectangle(coords, size=size, angle=angle, color=color)\n glColor4f(0., 0., 0., 1.)\n glTranslatef(*coords,0)\n glRotatef(angle + angle_adjust, 0., 0., 1.)\n glTranslatef(3.-7*len(label), -5., 0.)\n glScalef(0.1,0.1,0.1)\n for char in label:\n glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN, ord(char))\n glPopMatrix()\n\n def draw_landmark_from_particle(self, id, specs, label, color):\n (lm_mu, lm_orient, lm_sigma) = specs\n coords = (lm_mu[0,0], lm_mu[1,0])\n glPushMatrix()\n glColor4f(*color)\n if id.startswith('Cube'):\n size = (44,44)\n angle_offset = -90\n translate = 0\n elif id.startswith('Wall'):\n try:\n wall = self.robot.world.world_map.objects[id]\n except KeyError: # race condition: not in worldmap yet\n return\n size = (20, wall.length)\n angle_offset = 90\n translate = 0\n else: # Aruco\n size = (20,50)\n angle_offset = 90\n translate = 15\n if id.startswith('Video'):\n self.draw_triangle(coords, height=75, angle=lm_orient[1]*(180/pi),\n color=color, fill=True)\n glColor4f(0., 0., 0., 1.)\n glTranslatef(*coords,0)\n glRotatef(lm_orient[1]*(180/pi)+angle_offset, 0., 0., 1.)\n else:\n glTranslatef(*coords,0.)\n glRotatef(lm_orient*180/pi, 0., 0., 1.)\n glTranslatef(translate, 0., 0.)\n self.draw_rectangle([0,0], size=size, angle=0, color=color)\n #self.draw_rectangle(coords, size=size, angle=lm_orient*(180/pi), color=color)\n glColor4f(0., 0., 0., 1.)\n #glTranslatef(*coords,0)\n #glRotatef(lm_orient*(180/pi)+angle_offset, 0., 0., 1.)\n glRotatef(angle_offset, 0., 0., 1.)\n glTranslatef(3.0-7*len(label), -5.0, 0.0)\n glScalef(0.1, 0.1, 0.1)\n for char in label:\n glutStrokeCharacter(GLUT_STROKE_MONO_ROMAN, ord(char))\n glPopMatrix()\n ellipse_color = (color[1], color[2], color[0], 1)\n self.draw_particle_landmark_ellipse(lm_mu, lm_sigma, ellipse_color)\n\n def draw_particle_landmark_ellipse(self, coords, sigma, color):\n if sigma is None: return # Arucos that are not solo landmarks\n (w,v) = np.linalg.eigh(sigma[0:2,0:2])\n alpha = atan2(v[1,0],v[0,0])\n self.draw_ellipse(coords, abs(w)**0.5, alpha*(180/pi), color=color)\n\n def display(self):\n global REDISPLAY\n if not REDISPLAY: return\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n w = self.width / 2\n glOrtho(-w, w, -w, w, 1, -1)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glRotatef(90,0,0,1)\n glScalef(self.scale, self.scale, self.scale)\n glTranslatef(-self.translation[0], -self.translation[1], 0.)\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n\n # Draw the particles\n for p in self.robot.world.particle_filter.particles:\n pscale = 1 - p.weight\n color=(1,pscale,pscale)\n self.draw_triangle((p.x,p.y), height=10, angle=math.degrees(p.theta),\n color=color, fill=True)\n\n # Draw the robot at the best particle location\n (rx,ry,theta) = self.robot.world.particle_filter.pose\n (xy_var, theta_var) = self.robot.world.particle_filter.variance\n hdg = math.degrees(theta)\n self.draw_triangle((rx,ry), height=100, angle=hdg, tip_offset=-10,\n color=(1,1,0,0.7))\n\n # Draw the error ellipse and heading error wedge\n (w,v) = np.linalg.eigh(xy_var)\n alpha = atan2(v[1,0],v[0,0])\n self.draw_ellipse((rx,ry), abs(w)**0.5, alpha/pi*180, color=(0,1,1))\n self.draw_wedge((rx,ry), 75, hdg, max(5, sqrt(theta_var)*360),\n color=(0,1,1,0.4))\n\n # Draw the landmarks last, so they go on top of the particles\n self.draw_landmarks()\n\n glutSwapBuffers()\n\n def reshape(self,width,height):\n glViewport(0,0,width,height)\n self.width = width\n self.height = height\n self.aspect = self.width/self.height\n self.display()\n glutPostRedisplay()\n\n def report_variance(self,pf):\n weights = np.empty(pf.num_particles)\n for i in range(pf.num_particles):\n weights[i] = pf.particles[i].weight\n weights.sort()\n var = np.var(weights)\n print('weights: min = %3.3e max = %3.3e med = %3.3e variance = %3.3e' %\n (weights[0], weights[-1], weights[pf.num_particles//2], var))\n (xy_var, theta_var) = pf.variance\n print ('xy_var=', xy_var, ' theta_var=', theta_var)\n\n def report_pose(self):\n (x,y,theta) = self.robot.world.particle_filter.pose\n hdg = math.degrees(theta)\n if self.verbose:\n print('Pose = (%5.1f, %5.1f) @ %3d deg.' % (x, y, hdg))\n\n async def forward(self,distance):\n handle = self.robot.drive_straight(distance_mm(distance), speed_mmps(50),\n in_parallel=True,\n should_play_anim=False)\n await handle.wait_for_completed()\n pf = self.robot.world.particle_filter\n self.robot.loop.call_later(0.1, pf.look_for_new_landmarks)\n self.report_pose()\n\n async def turn(self,angle):\n handle = self.robot.turn_in_place(degrees(angle), in_parallel=True)\n await handle.wait_for_completed()\n pf = self.robot.world.particle_filter\n self.robot.loop.call_later(0.1, pf.look_for_new_landmarks)\n self.report_pose()\n\n async def look(self,angle):\n handle = self.robot.set_head_angle(degrees(angle), in_parallel=True)\n await handle.wait_for_completed()\n pf = self.robot.world.particle_filter\n self.robot.loop.call_later(0.1, pf.look_for_new_landmarks)\n self.report_pose()\n\n async def lift_to(self,angle):\n min_theta = cozmo.robot.MIN_LIFT_ANGLE.degrees\n max_theta = cozmo.robot.MAX_LIFT_ANGLE.degrees\n angle_range = max_theta - min_theta\n raw_height = (angle - min_theta) / angle_range\n height = min(1.0, max(0.0, raw_height))\n handle = self.robot.set_lift_height(height, in_parallel=True)\n await handle.wait_for_completed()\n pf = self.robot.world.particle_filter\n self.robot.loop.call_later(0.1, pf.look_for_new_landmarks)\n self.report_pose()\n\n def keyPressed(self,key,mouseX,mouseY):\n pf = self.robot.world.particle_filter\n translate_wasd = 10 # millimeters\n translate_WASD = 40\n rotate_wasd = 22.5 # degrees\n rotate_WASD = 90\n global particles\n if key == b'e': # evaluate\n pf.sensor_model.evaluate(pf.particles,force=True)\n pf.update_weights()\n elif key == b'r': # resample\n pf.sensor_model.evaluate(pf.particles,force=True)\n pf.update_weights()\n pf.resample()\n elif key == b'w': # forward\n self.robot.loop.create_task(self.forward(translate_wasd))\n elif key == b'W': # forward\n self.robot.loop.create_task(self.forward(translate_WASD))\n elif key == b's': # back\n self.robot.loop.create_task(self.forward(-translate_wasd))\n elif key == b'S': # back\n self.robot.loop.create_task(self.forward(-translate_WASD))\n elif key == b'a': # left\n self.robot.loop.create_task(self.turn(rotate_wasd))\n elif key == b'A': # left\n self.robot.loop.create_task(self.turn(rotate_WASD))\n elif key == b'd': # right\n self.robot.loop.create_task(self.turn(-rotate_wasd))\n elif key == b'D': # right\n self.robot.loop.create_task(self.turn(-rotate_WASD))\n elif key == b'i': # head up\n ang = self.robot.head_angle.degrees + 5\n self.robot.loop.create_task(self.look(ang))\n elif key == b'k': # head down\n ang = self.robot.head_angle.degrees - 5\n self.robot.loop.create_task(self.look(ang))\n elif key == b'I': # head up\n ang = self.robot.head_angle.degrees + 20\n self.robot.loop.create_task(self.look(ang))\n elif key == b'K': # head down\n ang = self.robot.head_angle.degrees - 20\n self.robot.loop.create_task(self.look(ang))\n elif key == b'u': # lift up\n ang = self.robot.lift_angle.degrees + 5\n self.robot.loop.create_task(self.lift_to(ang))\n elif key == b'j': # lift down\n ang = self.robot.lift_angle.degrees - 5\n self.robot.loop.create_task(self.lift_to(ang))\n elif key == b'U': # lift up\n ang = self.robot.lift_angle.degrees + 60\n self.robot.loop.create_task(self.lift_to(ang))\n elif key == b'J': # lift down\n ang = self.robot.lift_angle.degrees - 60\n self.robot.loop.create_task(self.lift_to(ang))\n elif key == b'z': # delocalize\n pf.delocalize()\n #pf.initializer.initialize(self.robot)\n elif key == b'Z': # randomize\n pf.increase_variance()\n elif key == b'c': # clear landmarks\n pf.clear_landmarks()\n print('Landmarks cleared.')\n elif key == b'o': # show objects\n self.robot.world.world_map.show_objects()\n elif key == b'p': # show particle\n self.robot.world.particle_filter.show_particle()\n elif key == b'l': # show landmarks\n self.robot.world.particle_filter.show_landmarks()\n elif key == b'V': # display weight variance\n self.report_variance(pf)\n elif key == b'<': # zoom in\n self.scale *= 1.25\n self.print_display_params()\n return\n elif key == b'>': # zoom out\n self.scale /= 1.25\n self.print_display_params()\n return\n elif key == b'v': # toggle verbose mode\n self.verbose = not self.verbose\n self.report_pose()\n return\n elif key == b'h': # print help\n self.print_help()\n return\n elif key == b'$': # toggle redisplay for debugging\n global REDISPLAY\n REDISPLAY = not REDISPLAY\n print('Redisplay ',('off','on')[REDISPLAY],'.',sep='')\n elif key == b'q': #kill window\n global WINDOW\n glutDestroyWindow(WINDOW)\n glutLeaveMainLoop()\n glutPostRedisplay()\n self.report_pose()\n\n def specialKeyPressed(self, key, mouseX, mouseY):\n pf = self.robot.world.particle_filter\n # arrow keys for translation\n incr = 25.0 # millimeters\n if key == GLUT_KEY_UP:\n self.translation[0] += incr / self.scale\n elif key == GLUT_KEY_DOWN:\n self.translation[0] -= incr / self.scale\n elif key == GLUT_KEY_LEFT:\n self.translation[1] += incr / self.scale\n elif key == GLUT_KEY_RIGHT:\n self.translation[1] -= incr / self.scale\n elif key == GLUT_KEY_HOME:\n self.translation = [0., 0.]\n self.print_display_params()\n glutPostRedisplay()\n\n def print_display_params(self):\n if self.verbose:\n print('scale=%.2f translation=[%.1f, %.1f]' %\n (self.scale, *self.translation))\n glutPostRedisplay()\n\n def print_help(self):\n if platform.system() == 'Darwin':\n print(help_text_mac)\n else:\n print(help_text)\n"
},
{
"alpha_fraction": 0.7360405921936035,
"alphanum_fraction": 0.7411167621612549,
"avg_line_length": 18.700000762939453,
"blob_id": "16c405b83529147f5475266c2299ce321e3fd0ec",
"content_id": "7a16421264558bc03ceb7b24768e4a32fe3d0ff8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 10,
"path": "/cozmo_fsm/examples/__init__.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Example demos for the cozmo_fsm finite state machine package\n\"\"\"\n\nfrom . import BackItUp\nfrom . import Boo\nfrom . import Greet\nfrom . import Look5\nfrom . import Nested\nfrom . import TapSpeak\n"
},
{
"alpha_fraction": 0.6693829894065857,
"alphanum_fraction": 0.6740396022796631,
"avg_line_length": 23.898550033569336,
"blob_id": "88117b4da12d868ab7855485a15c4f28f645abc9",
"content_id": "20fdd17683bcb6c86711e1edcc627522751d8ab9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1718,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 69,
"path": "/cozmo_fsm/opengl.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Common code for OpenGL window management\n\"\"\"\n\ntry:\n from OpenGL.GLUT import *\n from OpenGL.GL import *\n from OpenGL.GLU import *\nexcept:\n pass\n\nimport time\n\nfrom threading import Thread # for backgrounding window\n\nINIT_DONE = False\nMAIN_LOOP_LAUNCHED = False\n\n# Maintain a registry of display functions for our windows\nWINDOW_REGISTRY = []\n\n# List of window creation requests that need to be satisfied\nCREATION_QUEUE = []\n\ndef init():\n global INIT_DONE, robot\n if not INIT_DONE:\n INIT_DONE = True\n glutInit()\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH)\n\n # Killing window should not directly kill main program\n glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION)\n launch_event_loop()\n\ndef create_window(name,size=(500,500)):\n global WINDOW_REGISTRY\n glutInitWindowSize(*size)\n w = glutCreateWindow(name)\n #print('request creation of window',w)\n WINDOW_REGISTRY.append(w)\n return w\n\ndef event_loop():\n while True:\n for window in WINDOW_REGISTRY:\n glutSetWindow(window)\n glutPostRedisplay()\n glutMainLoopEvent()\n process_requests()\n time.sleep(0.1)\n\ndef process_requests():\n global CREATION_QUEUE\n # Process any requests for new windows\n queue = CREATION_QUEUE\n CREATION_QUEUE = []\n for req in queue:\n req() # invoke the window creator\n\n\ndef launch_event_loop():\n global MAIN_LOOP_LAUNCHED\n if MAIN_LOOP_LAUNCHED: return\n MAIN_LOOP_LAUNCHED = True\n print('launching opengl event loop')\n thread = Thread(target=event_loop)\n thread.daemon = True #ending fg program will kill bg program\n thread.start()\n"
},
{
"alpha_fraction": 0.8857142925262451,
"alphanum_fraction": 0.8857142925262451,
"avg_line_length": 10.666666984558105,
"blob_id": "8eae818362060e2d7cb816272f4793807008b6d5",
"content_id": "230710d7f861db055fc87734c2ac238482f902b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 140,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 12,
"path": "/requirements.txt",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "cozmo\nnumpy\nPillow\nopencv-python\nopencv-contrib-python\ncolorama\ntermcolor\nPyOpenGL\nPyOpenGL_accelerate\npyreadline\nSpeechRecognition\nPyAudio\n"
},
{
"alpha_fraction": 0.5448232889175415,
"alphanum_fraction": 0.5553745031356812,
"avg_line_length": 39.984100341796875,
"blob_id": "fc5ee0ba929f61cce30ad50ec7685abbb96ac7f6",
"content_id": "55b2d9d521e507895fb1f72e6a52cabcc09ac2c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 25779,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 629,
"path": "/cozmo_fsm/rrt.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from math import pi, sin, cos, inf, asin, atan2, nan, isnan, ceil\nimport numpy as np\nimport random\nimport time\nimport math\n\nimport cozmo_fsm.geometry\nfrom .geometry import wrap_angle\n\nfrom .rrt_shapes import *\nfrom .cozmo_kin import center_of_rotation_offset\nfrom .worldmap import WallObj, wall_marker_dict, RoomObj, LightCubeObj, MapFaceObj\nfrom .worldmap import CustomCubeObj, ChargerObj, CustomMarkerObj, ChipObj, RobotForeignObj\n\n# *** TODO: Collision checking needs to use opposite headings\n# for treeB nodes because robot is asymmetric.\n\n#---------------- RRTNode ----------------\n\nclass RRTNode():\n def __init__(self, parent=None, x=0, y=0, q=0, radius=None):\n self.parent = parent\n self.x = x\n self.y = y\n self.q = q\n self.radius = radius # arc radius\n\n def copy(self):\n return RRTNode(self.parent, self.x, self.y, self.q, self.radius)\n\n def __repr__(self):\n if isnan(self.q):\n return '<RRTNode (%.1f,%.1f)>' % (self.x, self.y)\n elif not self.parent:\n return '<RRTNode (%.1f,%.1f)@%d deg>' % \\\n (self.x, self.y, round(self.q/pi*180))\n elif self.radius is None:\n return '<RRTNode line to (%.1f,%.1f)@%d deg>' % \\\n (self.x, self.y, round(self.q/pi*180))\n else:\n return '<RRTNode arc to (%.1f,%.1f)@%d deg, rad=%d>' % \\\n (self.x, self.y, round(self.q/pi*180), self.radius)\n\n\n#---------------- RRT Path Planner ----------------\n\nclass RRTException(Exception):\n def __str__(self):\n return self.__repr__()\n\nclass StartCollides(RRTException): pass\nclass GoalCollides(RRTException): pass\nclass MaxIterations(RRTException): pass\nclass GoalUnreachable(RRTException): pass\nclass NotLocalized(RRTException): pass\n\nclass RRT():\n DEFAULT_MAX_ITER = 2000\n\n def __init__(self, robot=None, robot_parts=None, bbox=None,\n max_iter=DEFAULT_MAX_ITER, step_size=10, arc_radius=40,\n xy_tolsq=90, q_tol=5*pi/180,\n obstacles=[], auto_obstacles=True,\n bounds=(range(-500,500), range(-500,500))):\n self.robot = robot\n self.max_iter = max_iter\n self.step_size = step_size\n self.max_turn = pi\n self.arc_radius = arc_radius\n self.xy_tolsq = xy_tolsq\n self.q_tol = q_tol\n self.robot_parts = robot_parts if robot_parts is not None else self.make_robot_parts(robot)\n self.bounds = bounds\n self.obstacles = obstacles\n self.auto_obstacles = auto_obstacles\n self.treeA = []\n self.treeB = []\n self.start = None\n self.goal = None\n self.bbox = bbox\n self.path = []\n self.draw_path = []\n self.grid_display = None # *** HACK to display wavefront grid\n self.text = None # *** HACK to display unreachable text\n\n REACHED = 'reached'\n COLLISION = 'collision'\n INTERPOLATE = 'interpolate'\n\n def set_obstacles(self,obstacles):\n self.obstacles = obstacles\n\n def nearest_node(self, tree, target_node):\n best_distance = inf\n closest_node = None\n x = target_node.x\n y = target_node.y\n for this_node in tree:\n distx = this_node.x - x\n disty = this_node.y - y\n distsq = distx*distx + disty*disty\n if distsq < best_distance:\n best_distance = distsq\n closest_node = this_node\n return closest_node\n\n def random_node(self):\n return RRTNode(x=random.choice(self.bounds[0]),\n y=random.choice(self.bounds[1]))\n\n def extend(self, tree, target):\n nearest = self.nearest_node(tree, target)\n status, new_node = self.interpolate(nearest, target)\n if status is not self.COLLISION:\n tree.append(new_node)\n #time.sleep(0.01) # *** FOR ANIMATION PURPOSES\n return (status, new_node)\n\n def interpolate(self, node, target):\n dx = target.x - node.x\n dy = target.y - node.y\n distsq = dx*dx + dy*dy\n q = atan2(dy,dx)\n dq = wrap_angle(q - node.q)\n if abs(dq) > self.max_turn:\n dq = self.max_turn if dq > 0 else -self.max_turn\n q = wrap_angle(node.q + dq)\n if abs(dq) >= self.q_tol:\n # Must be able to turn to the new heading without colliding\n turn_dir = +1 if dq >= 0 else -1\n q_inc = turn_dir * self.q_tol\n while abs(q_inc - dq) > self.q_tol:\n if self.collides(RRTNode(x=node.x, y=node.y, q=node.q+q_inc)):\n return (self.COLLISION, None)\n q_inc += turn_dir * self.q_tol\n if distsq < self.xy_tolsq:\n return (self.REACHED, RRTNode(parent=node, x=target.x, y=target.y,q=q))\n xstep = self.step_size * cos(q)\n ystep = self.step_size * sin(q)\n new_node = RRTNode(parent=node, x=node.x+xstep, y=node.y+ystep, q=q)\n if self.collides(new_node):\n return (self.COLLISION, None)\n else:\n return (self.INTERPOLATE, new_node)\n\n def robot_parts_to_node(self,node):\n parts = []\n for part in self.robot_parts:\n tmat = geometry.aboutZ(part.orient)\n tmat = geometry.translate(part.center[0,0], part.center[1,0]).dot(tmat)\n tmat = geometry.aboutZ(node.q).dot(tmat)\n tmat = geometry.translate(node.x, node.y).dot(tmat)\n this_part = part.instantiate(tmat)\n parts.append(this_part)\n return parts\n\n def collides(self, node):\n for part in self.robot_parts_to_node(node):\n for obstacle in self.obstacles:\n if part.collides(obstacle):\n return obstacle\n return False\n\n def all_colliders(self, node):\n result = []\n for part in self.robot_parts_to_node(node):\n for obstacle in self.obstacles:\n if part.collides(obstacle):\n result.append(part)\n return result\n\n def plan_push_chip(self, start, goal, max_turn=20*(pi/180), arc_radius=40.):\n return self.plan_path(start, goal, max_turn, arc_radius)\n\n def plan_path(self, start, goal, max_turn=pi, arc_radius=40):\n self.max_turn = max_turn\n self.arc_radius = arc_radius\n if self.auto_obstacles:\n obstacle_inflation = 5\n doorway_adjustment = 20 # widen doorways for RRT\n self.generate_obstacles(obstacle_inflation, doorway_adjustment)\n self.start = start\n self.goal = goal\n self.target_heading = goal.q\n self.compute_bounding_box()\n\n # Check for StartCollides\n collider = self.collides(start)\n if collider:\n raise StartCollides(start,collider,collider.obstacle_id)\n\n # Set up treeA with start node\n treeA = [start.copy()]\n self.treeA = treeA\n\n # Set up treeB with goal node(s)\n if not isnan(self.target_heading):\n offset_x = goal.x + center_of_rotation_offset * cos(goal.q)\n offset_y = goal.y + center_of_rotation_offset * sin(goal.q)\n offset_goal = RRTNode(x=offset_x, y=offset_y, q=goal.q)\n collider = self.collides(offset_goal)\n if collider:\n raise GoalCollides(goal,collider,collider.obstacle_id)\n treeB = [offset_goal]\n self.treeB = treeB\n else: # target_heading is nan\n treeB = [goal.copy()]\n self.treeB = treeB\n temp_goal = goal.copy()\n offset_goal = goal.copy()\n for theta in range(0,360,10):\n q = theta/180*pi\n step = max(self.step_size, abs(center_of_rotation_offset))\n temp_goal.x = goal.x + step*cos(q)\n temp_goal.y = goal.y + step*sin(q)\n temp_goal.q = wrap_angle(q+pi)\n collider = self.collides(temp_goal)\n if collider: continue\n offset_goal.x = temp_goal.x + center_of_rotation_offset * cos(q)\n offset_goal.y = temp_goal.y + center_of_rotation_offset * sin(q)\n offset_goal.q = temp_goal.q\n collider = self.collides(offset_goal)\n if not collider:\n treeB.append(RRTNode(parent=treeB[0], x=temp_goal.x, y=temp_goal.y, q=temp_goal.q))\n if len(treeB) == 1:\n raise GoalCollides(goal,collider,collider.obstacle_id)\n\n # Set bounds for search area\n self.compute_world_bounds(start,goal)\n\n # Grow the RRT until trees meet or max_iter exceeded\n swapped = False\n for i in range(self.max_iter):\n r = self.random_node()\n (status, new_node) = self.extend(treeA, r)\n if status is not self.COLLISION:\n (status, new_node) = self.extend(treeB, treeA[-1])\n if status is self.REACHED:\n break\n (treeA, treeB) = (treeB, treeA)\n swapped = not swapped\n # Search terminated. Check for success.\n if swapped:\n (treeA, treeB) = (treeB, treeA)\n if status is self.REACHED:\n return self.get_path(treeA, treeB)\n else:\n raise MaxIterations(self.max_iter)\n\n def compute_world_bounds(self,start,goal):\n xmin = min(start.x, goal.x)\n xmax = max(start.x, goal.x)\n ymin = min(start.y, goal.y)\n ymax = max(start.y, goal.y)\n for obst in self.obstacles:\n if isinstance(obst,Circle):\n xmin = obst.center[0] - obst.radius\n xmax = obst.center[0] + obst.radius\n ymin = obst.center[1] - obst.radius\n ymax = obst.center[1] + obst.radius\n else:\n xmin = min(xmin, np.min(obst.vertices[0]))\n xmax = max(xmax, np.max(obst.vertices[0]))\n ymin = min(ymin, np.min(obst.vertices[1]))\n ymax = max(ymax, np.max(obst.vertices[1]))\n xmin = xmin - 500\n xmax = xmax + 500\n ymin = ymin - 500\n ymax = ymax + 500\n self.bounds = (range(int(xmin), int(xmax)), range(int(ymin), int(ymax)))\n\n def get_path(self, treeA, treeB):\n nodeA = treeA[-1]\n pathA = [nodeA.copy()]\n while nodeA.parent is not None:\n nodeA = nodeA.parent\n pathA.append(nodeA.copy())\n pathA.reverse()\n # treeB was built backwards from the goal, so headings\n # need to be reversed\n nodeB = treeB[-1]\n prev_heading = wrap_angle(nodeB.q + pi)\n if nodeB.parent is None:\n pathB = [nodeB.copy()]\n else:\n pathB = []\n while nodeB.parent is not None:\n nodeB = nodeB.parent\n (nodeB.q, prev_heading) = (prev_heading, wrap_angle(nodeB.q+pi))\n pathB.append(nodeB.copy())\n (pathA,pathB) = self.join_paths(pathA,pathB)\n self.path = pathA + pathB\n self.smooth_path()\n target_q = self.target_heading\n if not isnan(target_q):\n # Last nodes turn to desired final heading\n last = self.path[-1]\n goal = RRTNode(parent=last, x=self.goal.x, y=self.goal.y,\n q=target_q, radius=0)\n self.path.append(goal)\n return (treeA, treeB, self.path)\n\n def join_paths(self,pathA,pathB):\n turn_angle = wrap_angle(pathB[0].q - pathA[-1].q)\n if abs(turn_angle) <= self.max_turn:\n return (pathA,pathB)\n print('*** JOIN PATHS EXCEEDED MAX TURN ANGLE: ', turn_angle*180/pi)\n return (pathA,pathB)\n\n def smooth_path(self):\n \"\"\"Smooth a path by picking random subsequences and replacing\n them with a direct link if there is no collision.\"\"\"\n smoothed_path = self.path\n for _ in range(0,len(smoothed_path)):\n L = len(smoothed_path)\n if L <= 2: break\n i = random.randrange(0,L-2)\n cur_x = smoothed_path[i].x\n cur_y = smoothed_path[i].y\n cur_q = smoothed_path[i].q\n j = random.randrange(i+2, L)\n if j < L-1 and smoothed_path[j+1].radius != None:\n continue # j is parent node of an arc segment: don't touch\n dx = smoothed_path[j].x - cur_x\n dy = smoothed_path[j].y - cur_y\n new_q = atan2(dy,dx)\n dist = sqrt(dx**2 + dy**2)\n turn_angle = wrap_angle(new_q - cur_q)\n if abs(turn_angle) <= self.max_turn:\n result = self.try_linear_smooth(smoothed_path,i,j,cur_x,cur_y,new_q,dist)\n else:\n result = self.try_arc_smooth(smoothed_path,i,j,cur_x,cur_y,cur_q)\n smoothed_path = result or smoothed_path\n self.path = smoothed_path\n\n def try_linear_smooth(self,smoothed_path,i,j,cur_x,cur_y,new_q,dist):\n step_x = self.step_size * cos(new_q)\n step_y = self.step_size * sin(new_q)\n traveled = 0\n while traveled < dist:\n traveled += self.step_size\n cur_x += step_x\n cur_y += step_y\n if self.collides(RRTNode(None, cur_x, cur_y, new_q)):\n return None\n # Since we're arriving at node j via a different heading than\n # before, see if we need to add an arc to get us to node k=j+1\n node_i = smoothed_path[i]\n end_spec = self.calculate_end(smoothed_path, node_i, new_q, j)\n if end_spec is None:\n return None\n # no collision, so snip out nodes i+1 ... j-1\n # print('linear: stitching','%d:'%i,smoothed_path[i],'to %d:'%j,smoothed_path[j])\n if not end_spec:\n smoothed_path[j].parent = smoothed_path[i]\n smoothed_path[j].q = new_q\n smoothed_path[j].radius = None\n smoothed_path = smoothed_path[:i+1] + smoothed_path[j:]\n else:\n (next_node,turn_node) = end_spec\n smoothed_path[j+1].parent = turn_node\n smoothed_path = smoothed_path[:i+1] + \\\n [next_node, turn_node] + \\\n smoothed_path[j+1:]\n return smoothed_path\n\n def try_arc_smooth(self,smoothed_path,i,j,cur_x,cur_y,cur_q):\n if j == i+2 and smoothed_path[i+1].radius != None:\n return None # would be replacing an arc node with itself\n arc_spec = self.calculate_arc(smoothed_path[i], smoothed_path[j])\n if arc_spec is None:\n return None\n (tang_x, tang_y, tang_q, radius) = arc_spec\n ni = smoothed_path[i]\n turn_node1 = RRTNode(ni, tang_x, tang_y, tang_q, radius=radius)\n # Since we're arriving at node j via a different heading than\n # before, see if we need to add an arc at the end to allow us\n # to smoothly proceed to node k=j+1\n end_spec = self.calculate_end(smoothed_path, turn_node1, tang_q, j)\n if end_spec is None:\n return None\n # no collision, so snip out nodes i+1 ... j-1 and insert new node(s)\n # print('arc: stitching','%d:'%i,smoothed_path[i],'to %d:'%j,smoothed_path[j])\n if not end_spec:\n smoothed_path[j].parent = turn_node1\n smoothed_path[j].q = tang_q\n smoothed_path[j].radius = None\n smoothed_path = smoothed_path[:i+1] + [turn_node1] + smoothed_path[j:]\n else:\n (next_node, turn_node2) = end_spec\n smoothed_path[j+1].parent = turn_node2\n smoothed_path = smoothed_path[:i+1] + \\\n [turn_node1, next_node, turn_node2] + \\\n smoothed_path[j+1:]\n return smoothed_path\n\n def calculate_arc(self, node_i, node_j):\n # Compute arc node parameters to get us on a heading toward node_j.\n cur_x = node_i.x\n cur_y = node_i.y\n cur_q = node_i.q\n dest_x = node_j.x\n dest_y = node_j.y\n direct_turn_angle = wrap_angle(atan2(dest_y-cur_y, dest_x-cur_x) - cur_q)\n # find center of arc we'll be moving along\n dir = +1 if direct_turn_angle >=0 else -1\n cx = cur_x + self.arc_radius * cos(cur_q + dir*pi/2)\n cy = cur_y + self.arc_radius * sin(cur_q + dir*pi/2)\n dx = cx - dest_x\n dy = cy - dest_y\n center_dist = sqrt(dx*dx + dy*dy)\n if center_dist < self.arc_radius: # turn would be too wide: punt\n return None\n # tangent points on arc: outer tangent formula from Wikipedia with r=0\n gamma = atan2(dy, dx)\n beta = asin(self.arc_radius / center_dist)\n alpha1 = gamma + beta\n tang_x1 = cx + self.arc_radius * cos(alpha1 + pi/2)\n tang_y1 = cy + self.arc_radius * sin(alpha1 + pi/2)\n tang_q1 = (atan2(tang_y1-cy, tang_x1-cx) + dir*pi/2)\n turn1 = tang_q1 - cur_q\n if dir * turn1 < 0:\n turn1 += dir * 2 * pi\n alpha2 = gamma - beta\n tang_x2 = cx + self.arc_radius * cos(alpha2 - pi/2)\n tang_y2 = cy + self.arc_radius * sin(alpha2 - pi/2)\n tang_q2 = (atan2(tang_y2-cy, tang_x2-cx) + dir*pi/2)\n turn2 = tang_q2 - cur_q\n if dir * turn2 < 0:\n turn2 += dir * 2 * pi\n # Correct tangent point has shortest turn.\n if abs(turn1) < abs(turn2):\n (tang_x,tang_y,tang_q,turn) = (tang_x1,tang_y1,tang_q1,turn1)\n else:\n (tang_x,tang_y,tang_q,turn) = (tang_x2,tang_y2,tang_q2,turn2)\n # Interpolate along the arc and check for collision.\n q_traveled = 0\n while abs(q_traveled) < abs(turn):\n cur_x = cx + self.arc_radius * cos(cur_q + q_traveled)\n cur_y = cy + self.arc_radius * sin(cur_q + q_traveled)\n if self.collides(RRTNode(None, cur_x, cur_y, cur_q+q_traveled)):\n return None\n q_traveled += dir * self.q_tol\n # Now interpolate from the tangent point to the target.\n cur_x = tang_x\n cur_y = tang_y\n dx = dest_x - cur_x\n dy = dest_y - cur_y\n new_q = atan2(dy, dx)\n dist = sqrt(dx*dx + dy*dy)\n step_x = self.step_size * cos(new_q)\n step_y = self.step_size * sin(new_q)\n traveled = 0\n while traveled < dist:\n traveled += self.step_size\n cur_x += step_x\n cur_y += step_y\n if self.collides(RRTNode(None, cur_x, cur_y, new_q)):\n return None\n # No collision, so arc is good.\n return (tang_x, tang_y, tang_q, dir*self.arc_radius)\n\n def calculate_end(self, smoothed_path, parent, new_q, j):\n # Return False if arc not needed, None if arc not possible,\n # or pair of new nodes if arc is required.\n if j == len(smoothed_path)-1:\n return False\n node_j = smoothed_path[j]\n node_k = smoothed_path[j+1]\n next_turn = wrap_angle(node_k.q - new_q)\n if abs(next_turn) <= self.max_turn:\n return False\n dist = sqrt((node_k.x-node_j.x)**2 + (node_k.y-node_j.y)**2)\n if False and dist < self.arc_radius:\n return None\n next_x = node_j.x - self.arc_radius * cos(new_q)\n next_y = node_j.y - self.arc_radius * sin(new_q)\n next_node = RRTNode(parent, next_x, next_y, new_q)\n arc_spec = self.calculate_arc(next_node, node_k)\n if arc_spec is None:\n return None\n (tang_x, tang_y, tang_q, radius) = arc_spec\n turn_node = RRTNode(next_node, tang_x, tang_y, tang_q, radius=radius)\n return (next_node, turn_node)\n\n def coords_to_path(self, coords_pairs):\n \"\"\"\n Transform a path of coordinates pairs to RRTNodes.\n \"\"\"\n path = []\n for (x,y) in coords_pairs:\n node = RRTNode(x=x, y=y, q=math.nan)\n if path:\n node.parent = path[-1]\n path[-1].q = atan2(y - path[-1].y, x - path[-1].x)\n path.append(node)\n return path\n\n\n #---------------- Obstacle Representation ----------------\n\n def generate_obstacles(self, obstacle_inflation=0, wall_inflation=0, doorway_adjustment=0):\n self.robot.world.world_map.update_map()\n obstacles = []\n for obj in self.robot.world.world_map.objects.values():\n if not obj.is_obstacle: continue\n if self.robot.carrying is obj: continue\n if obj.pose_confidence < 0: continue\n if 'unseen' in obj.__dict__ and obj.unseen: continue\n if isinstance(obj, WallObj):\n obstacles = obstacles + \\\n self.generate_wall_obstacles(obj, wall_inflation, doorway_adjustment)\n elif isinstance(obj, (LightCubeObj,CustomCubeObj,ChargerObj)):\n obstacles.append(self.generate_cube_obstacle(obj, obstacle_inflation))\n elif isinstance(obj, CustomMarkerObj):\n obstacles.append(self.generate_marker_obstacle(obj,obstacle_inflation))\n elif isinstance(obj, ChipObj):\n obstacles.append(self.generate_chip_obstacle(obj,obstacle_inflation))\n elif isinstance(obj, RobotForeignObj):\n obstacles.append(self.generate_foreign_obstacle(obj))\n self.obstacles = obstacles\n\n @staticmethod\n def generate_wall_obstacles(wall, wall_inflation, doorway_adjustment):\n wall_spec = wall_marker_dict[wall.spec_id]\n wall_half_length = wall.length / 2\n widths = []\n edges = [ [0, -wall_half_length - wall_inflation, 0., 1.] ]\n last_x = -wall_half_length - wall_inflation\n for (door_center, door_width) in wall_spec.doorways:\n door_width += doorway_adjustment # widen doorways for RRT, narrow for WaveFront\n left_edge = door_center - door_width/2 - wall_half_length\n edges.append([0., left_edge, 0., 1.])\n widths.append(left_edge - last_x)\n right_edge = door_center + door_width/2 - wall_half_length\n edges.append([0., right_edge, 0., 1.])\n last_x = right_edge\n edges.append([0., wall_half_length + wall_inflation, 0., 1.])\n widths.append(wall_half_length + wall_inflation - last_x)\n edges = np.array(edges).T\n edges = geometry.aboutZ(wall.theta).dot(edges)\n edges = geometry.translate(wall.x,wall.y).dot(edges)\n obst = []\n for i in range(0,len(widths)):\n center = edges[:,2*i:2*i+2].mean(1).reshape(4,1)\n dimensions=(4.0+2*wall_inflation, widths[i])\n r = Rectangle(center=center,\n dimensions=dimensions,\n orient=wall.theta )\n r.obstacle_id = wall.id\n obst.append(r)\n return obst\n\n @staticmethod\n def generate_cube_obstacle(obj, obstacle_inflation=0):\n r = Rectangle(center=geometry.point(obj.x, obj.y),\n dimensions=[obj.size[0]+2*obstacle_inflation, obj.size[1]+2*obstacle_inflation],\n orient=obj.theta)\n r.obstacle_id = obj.id\n return r\n\n @staticmethod\n def generate_marker_obstacle(obj, obstacle_inflation=0):\n sx,sy,sz = obj.size\n r = Rectangle(center=geometry.point(obj.x+sx/2, obj.y),\n dimensions=(sx+2*obstacle_inflation,sy+2*obstacle_inflation),\n orient=obj.theta)\n r.obstacle_id = obj.id\n return r\n\n @staticmethod\n def generate_room_obstacle(obj):\n \"\"\"Rooms aren't really obstacles, but this is used by PathPlanner to encode goal locations.\"\"\"\n r = Polygon(vertices=obj.points)\n r.obstacle_id = obj.id\n return r\n\n @staticmethod\n def generate_chip_obstacle(obj, obstacle_inflation=0):\n r = Circle(center=geometry.point(obj.x,obj.y),\n radius=obj.radius+obstacle_inflation)\n r.obstacle_id = obj.id\n return r\n\n @staticmethod\n def generate_foreign_obstacle(obj):\n r = Rectangle(center=geometry.point(obj.x, obj.y),\n dimensions=(obj.size[0:2]),\n orient=obj.theta)\n r.obstacle_id = obj.id\n return r\n\n @staticmethod\n def generate_mapFace_obstacle(obj, obstacle_inflation=0):\n r = Rectangle(center=geometry.point(obj.x,obj.y),\n dimensions=[obj.size[0]+2*obstacle_inflation, obj.size[1]+2*obstacle_inflation])\n r.obstacle_id = obj.id\n return r\n\n @staticmethod\n def make_robot_parts(robot):\n result = []\n for joint in robot.kine.joints.values():\n if joint.collision_model:\n tmat = robot.kine.link_to_base(joint)\n robot_obst = joint.collision_model.instantiate(tmat)\n result.append(robot_obst)\n return result\n\n def compute_bounding_box(self):\n xmin = self.robot.world.particle_filter.pose[0]\n ymin = self.robot.world.particle_filter.pose[1]\n xmax = xmin\n ymax = ymin\n objs = self.robot.world.world_map.objects.values()\n # Rooms aren't obstacles, so include them separately.\n rooms = [obj for obj in objs if isinstance(obj,RoomObj)]\n # Cubes and markers may not be obstacles if they are goal locations, so include them again.\n goals = [ obj for obj in objs if \n (isinstance(obj,(LightCubeObj,CustomMarkerObj)) and obj.pose_confidence >= 0) or\n isinstance(obj,MapFaceObj) ]\n for obj in self.obstacles + rooms + goals:\n ((x0,y0),(x1,y1)) = obj.get_bounding_box()\n xmin = min(xmin, x0)\n ymin = min(ymin, y0)\n xmax = max(xmax, x1)\n ymax = max(ymax, y1)\n self.bbox = ((xmin,ymin), (xmax,ymax))\n return self.bbox\n"
},
{
"alpha_fraction": 0.5445358157157898,
"alphanum_fraction": 0.5573012828826904,
"avg_line_length": 35.479103088378906,
"blob_id": "a898df806ca453f2ca1f76d558e2a98da3b2118f",
"content_id": "aecee6bb0a87d8a2791792b7b96e2640ef97ed60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24441,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 670,
"path": "/genfsm",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\n\"\"\"\nFinite State Machine generator for the cozmo_fsm package.\nModeled after the Tekkotsu stateparser tool.\n\nUsage: genfsm [infile.fsm | -] [outfile.py | -]\n\nUse '-' to indicate standard input or standard output. If a\nsecond argument is not supplied, writes to infile.py, or to\nstandard output if the input was '-'.\n\nTo enter state machine notation use a line that contains\njust $setup ''', followed by the lines of the state machine,\nand ending with a line contaning just '''. This will result\nin a definition of a setup() method for the state node class\nyou are defining. Example:\n\n class MyNode(StateNode):\n $setup '''\n Say(\"Hello\") =C=> Forward(50)\n '''\n\nSee the cozmo_fsm/examples directory for examples of .fsm files\nand the .py files they generate.\n\nNote: install termcolor package to get messages displayed in color.\n\nAuthor: David S. Touretzky, Carnegie Mellon University\n\"\"\"\n\nimport sys, time, re\n\ntry:\n import colorama\n colorama.init()\nexcept: pass\n\ntry:\n from termcolor import cprint\nexcept:\n def cprint(string, color=None, file=None):\n print(string)\n\n\nclass Token:\n def __repr__(self):\n return \"<%s>\" % self.__class__.__name__\n # Lexer tokens\n def isIdentifier(self): return isinstance(self,Identifier)\n def isColon(self): return isinstance(self,Colon)\n def isNewline(self): return isinstance(self,Newline)\n def isEqual(self): return isinstance(self,Equal)\n def isArrowHead(self): return isinstance(self,ArrowHead)\n def isComma(self): return isinstance(self,Comma)\n def isLBrace(self): return isinstance(self,LBrace)\n def isRBrace(self): return isinstance(self,RBrace)\n def isArglist(self): return isinstance(self,Arglist)\n # Parser stage 1 tokens\n def isLabelRef(self): return isinstance(self,LabelRef)\n def isLabelDef(self): return isinstance(self,LabelDef)\n def isConstructorCall(self): return isinstance(self,ConstructorCall)\n def isIdentifierList(self): return isinstance(self,IdentifierList)\n # Parser stage 2 token\n def isTransition(self) : return isinstance(self,Transition)\n # Parser stage 3 tokens\n def isNodeDefinition(self): return isinstance(self,NodeDefinition)\n\n# Lexer tokens:\n\nclass Identifier(Token):\n def __init__(self,name):\n self.name = name\n def __repr__(self):\n return \"<Identifier %s>\" % self.name\n\nclass Colon(Token): pass\nclass Equal(Token): pass\nclass ArrowHead(Token) : pass\nclass Comma(Token) : pass\nclass LBrace(Token) : pass\nclass RBrace(Token) : pass\nclass Newline(Token) : pass\n\nclass Arglist(Token):\n def __init__(self,value):\n self.value = value\n def __repr__(self):\n return \"<Arglist %s>\" % self.value\n\n# Parser stage 1 and 2 tokens:\n\nclass IdentifierList(Token):\n def __init__(self,label_refs):\n self.label_refs = label_refs\n def __repr__(self):\n return \"<IdentifierList %s>\" % ','.join(self.label_refs)\n\nclass LabelDef(Token):\n def __init__(self,label):\n self.label = label\n def __repr__(self):\n return \"<LabelDef %s>\" % self.label\n\nclass LabelRef(Token):\n def __init__(self,label):\n self.label = label\n def __repr__(self):\n return \"<LabelRef %s>\" % self.label\n\nclass ConstructorCall(Token):\n def __init__(self,name,arglist):\n self.name = name\n self.arglist = arglist\n def __repr__(self):\n return \"<ConstructorCall %s%s>\" % (self.name, self.arglist)\n\n# Parser stage 3 tokens\n\nclass NodeDefinition(Token):\n def __init__(self,label,node_type,arglist):\n self.label = label\n self.node_type = node_type\n self.arglist = arglist\n def __repr__(self):\n label = self.label+':' if self.label else ''\n return \"<NodeDefinition %s%s%s>\" % \\\n (label, self.node_type, self.arglist)\n\nclass Transition(Token):\n def __init__(self,label,trans_type,arglist):\n self.label = label\n self.trans_type = trans_type\n self.arglist = arglist\n self.sources = []\n self.destinations = []\n\n def __repr__(self):\n label = self.label+':' if self.label else ''\n if len(self.sources) == 1:\n srcs = self.sources[0]\n else:\n srcs = '{%s}' % ','.join(self.sources)\n if len(self.destinations) == 1:\n dests = self.destinations[0]\n else:\n dests = '{%s}' % ','.join(self.destinations)\n return \"<Transition %s=%s%s=>%s>\" % (srcs,label,self.trans_type,dests)\n\n_lex_punc_table = \\\n (\n (' ', None),\n ('\\t', None),\n ('\\n', Newline),\n ('\\r\\n', Newline), # must precede '\\r' in this list for line counting to work correctly\n ('\\r', Newline),\n (':', Colon),\n (',', Comma),\n ('=>', ArrowHead), # must precede Equal in this list\n ('=', Equal),\n ('{', LBrace),\n ('}', RBrace)\n )\n\ncurrent_line = 0\n\ndef handle_newline():\n global current_line\n current_line += 1 \n\ndef lexer (string):\n \"\"\"Convert input string into a sequence of lexer tokens.\"\"\"\n r_identifier = re.compile('((self\\.)|)\\w+')\n tokens = []\n while string:\n was_in_table = False\n for chars,tok in _lex_punc_table:\n if string[0:len(chars)] == chars:\n if tok:\n this_token = tok()\n tokens.append(this_token)\n if this_token.isNewline(): handle_newline()\n string = string[len(chars):]\n was_in_table = True\n break\n if was_in_table: continue\n if string[0] == '#':\n string = lexer_skip_comment(string)\n continue\n if string[0] == '(':\n arglist, string = lexer_build_arglist(string)\n tokens.append(arglist)\n continue\n match_result = r_identifier.match(string)\n if match_result:\n endpos = match_result.span()[1]\n tokens.append(Identifier(string[0:endpos]))\n string = string[endpos:]\n continue\n # If we reach here, we've found something indigestible.\n report_line_error(\"syntax error at '%s'\" % error_fragment(string))\n next_line = string.find('\\n')\n if next_line > -1:\n string = string[next_line:]\n continue\n break\n return tokens\n\ndef lexer_skip_comment(string):\n \"\"\"String begins with '#'. Skip everything from there to the first newline.\"\"\"\n pos = string.find('\\r')\n if pos == -1:\n pos = string.find('\\n')\n if pos == -1:\n raise Exception('Missing newline at end of comment.')\n return string[pos:]\n\ndef lexer_build_arglist(string):\n \"\"\"Helper for lexer. Parses an argument list and returns it plus the remainder of the string.\"\"\"\n ostring = string\n lookstack = [')']\n pos = 1\n while lookstack and pos < len(string):\n if lookstack[0] == string[pos]:\n del lookstack[0]\n elif lookstack[0] not in '\\'\"':\n if string[pos] == '(':\n lookstack.insert(0,')')\n elif string[pos] == '[':\n lookstack.insert(0,']')\n elif string[pos] == '{':\n lookstack.insert(0,'}')\n elif string[pos] in ')]}':\n break\n pos += 1\n if lookstack:\n cleanstr = string.strip()\n p = min(pos, len(cleanstr)-1)\n report_line_error(\"Ill-formed argument list at '%s' near '%s'\" %\n (error_fragment(ostring), cleanstr[p]))\n return Arglist(''), ''\n return Arglist(string[0:pos]), string[pos:]\n\ndef parser1(lex_tokens):\n \"\"\"Assembles label-def / constructor-call / label-ref / identifier-list tokens.\"\"\"\n p1tokens = []\n while lex_tokens:\n if lex_tokens[0].isIdentifier():\n # An identifier must be a label definition, constructor call, or label reference.\n if len(lex_tokens) > 1 and lex_tokens[1].isColon(): # Label definition\n p1tokens.append(LabelDef(lex_tokens[0].name))\n del lex_tokens[0:2]\n continue\n if len(lex_tokens) > 1 and lex_tokens[1].isArglist(): # Constructor call\n p1tokens.append(ConstructorCall(lex_tokens[0].name,lex_tokens[1].value))\n del lex_tokens[0:2]\n continue\n # Default case: identifier assumed to be a label reference.\n p1tokens.append(LabelRef(lex_tokens[0].name))\n del lex_tokens[0]\n continue\n # A left braces introduces a comma-separated list of label references\n if lex_tokens[0].isLBrace():\n del lex_tokens[0]\n label_refs = []\n need_comma = False\n need_rbrace = True\n while lex_tokens:\n if lex_tokens[0].isRBrace():\n p1tokens.append(IdentifierList(label_refs))\n del lex_tokens[0]\n need_rbrace = False\n break\n if need_comma and lex_tokens[0].isComma():\n del lex_tokens[0]\n need_comma = False\n elif not need_comma and lex_tokens[0].isIdentifier():\n label_refs.append(lex_tokens[0].name)\n del lex_tokens[0]\n need_comma = True\n else:\n report_line_error('Syntax error in identifier list near %s.' %lex_tokens[0])\n del lex_tokens[0]\n if not label_refs:\n report_line_error('Empty identifier list {}.')\n if label_refs and not need_comma:\n report_line_error('Trailing comma in identifier list {... ,}.')\n if need_rbrace:\n report_line_error('Missing right brace in identifier list {....')\n continue\n if lex_tokens[0].isRBrace():\n report_line_error('Extraneous right brace.')\n del lex_tokens[0]\n continue\n if lex_tokens[0].isEqual():\n p1tokens.append(lex_tokens[0])\n del lex_tokens[0]\n # Special handling for transitions: convert identifier to\n # labelref if followed by \":\" or to constructor call with\n # no arguments if followed by \"=>\". Otherwise just\n # continue and we'll process \"identifier (\" on the next\n # iteration.\n if len(lex_tokens) < 2:\n report_line_error('Syntax error in transition near %s' %\n (lex_tokens[0] if len(lex_tokens) > 0 else 'end of line'))\n continue\n # Assemble optional label for transition.\n if lex_tokens[0].isIdentifier() and lex_tokens[1].isColon():\n p1tokens.append(LabelDef(lex_tokens[0].name))\n del lex_tokens[0:2]\n if len(lex_tokens) < 2:\n report_line_error('Syntax error in transition near %s' %\n (lex_tokens[0] if len(lex_tokens) > 0 else 'end of line'))\n continue\n # For transitions, an identifier with no arglist is still a constructor call.\n if lex_tokens[0].isIdentifier():\n if len(lex_tokens) >= 2 and not lex_tokens[1].isArglist():\n p1tokens.append(ConstructorCall(lex_tokens[0].name,'()'))\n del lex_tokens[0]\n continue\n # Default: just pass the item (arrowhead, newline) on to the next stage\n if lex_tokens[0].isNewline(): handle_newline()\n p1tokens.append(lex_tokens[0])\n del lex_tokens[0]\n return p1tokens\n\ndef parser2(p1tokens):\n \"\"\"Create a node definition with label, or a transition with label\n and constructor call; no sources/destinations yet.\"\"\"\n p2tokens = []\n while p1tokens:\n if p1tokens[0].isNewline():\n handle_newline()\n p2tokens.append(p1tokens.pop(0))\n continue\n # Must begin with a node reference or definition.\n if p1tokens[0].isLabelDef():\n label = p1tokens[0].label\n # labeled constructor call\n if p1tokens[1].isConstructorCall():\n call = p1tokens[1]\n p2tokens.append(NodeDefinition(label, call.name, call.arglist))\n del p1tokens[0:2]\n continue\n else:\n if p1tokens[1].isLabelRef() and p1tokens[1].label[0].isupper():\n hint = \"\\n\\tDid you mean '%s()' ?\" % p1tokens[1].label\n else:\n hint = \"\"\n report_line_error(\"Label '%s:' should be followed by a node definition, not %s.%s\"\n % (label, p1tokens[1], hint))\n del p1tokens[0]\n continue\n if p1tokens[0].isConstructorCall():\n # Unlabeled constructor call: label it.\n call = p1tokens[0]\n label = gen_name(call.name)\n p2tokens.append(NodeDefinition(label, call.name, call.arglist))\n del p1tokens[0]\n continue\n if p1tokens[0].isEqual(): # start of a transition\n del p1tokens[0]\n label = None\n trans = None\n # look for optional transition label\n if p1tokens[0].isLabelDef():\n label = p1tokens[0].label\n del p1tokens[0] # labeldef\n # look for transition constructor\n if p1tokens[0].isConstructorCall():\n trans_type = p1tokens[0].name\n trans_args = p1tokens[0].arglist\n else:\n report_line_error('Ill-formed transition: should not see %s here.' % p1tokens[0])\n del p1tokens[0]\n continue\n del p1tokens[0] # constructor\n if not p1tokens[0].isArrowHead():\n report_line_error(\"Error in transition: expected '=>' not %s.\" % p1tokens[0])\n del p1tokens[0] # arrowhead\n trans_class = transition_names.get(trans_type,trans_type)\n if not label:\n label = gen_name(trans_class)\n p2tokens.append(Transition(label,trans_class,trans_args))\n continue\n # Pass along an identifier list without modification\n if p1tokens[0].isIdentifierList() or p1tokens[0].isLabelRef():\n p2tokens.append(p1tokens[0])\n del p1tokens[0]\n continue\n else:\n report_line_error(\"A %s token is not legal in this context.\" % p1tokens[0])\n del p1tokens[0]\n continue\n return p2tokens\n\ntransition_names = dict(\n N = 'NullTrans',\n T = 'TimerTrans',\n C = 'CompletionTrans',\n S = 'SuccessTrans',\n F = 'FailureTrans',\n D = 'DataTrans',\n TM = 'TextMsgTrans',\n RND = 'RandomTrans',\n PILOT = 'PilotTrans',\n Tap = 'TapTrans',\n ObsMot = 'ObservedMotionTrans',\n UnexMov = 'UnexpectedMovementTrans',\n Aruco = 'ArucoTrans',\n Next = 'NextTrans',\n CNext = 'CNextTrans',\n SayData = 'SayDataTrans',\n Hear = 'HearTrans'\n )\n\ndef gen_name(base_name, name_counts=dict()):\n name = base_name.lower()\n if name.startswith('self.'):\n name = name[5:]\n count = name_counts.get(name,0) + 1\n name_counts[name] = count\n return name + repr(count)\n\ndef parser3(p2tokens):\n \"\"\"Chain nodes and transitions by filling in source/destination fields.\"\"\"\n current_node = None\n need_destination = False\n p3tokens = []\n must_transition = False\n while p2tokens:\n while p2tokens and p2tokens[0].isNewline():\n must_transition = False\n handle_newline()\n del p2tokens[0]\n if not p2tokens: break\n if p2tokens[0].isLabelRef(): \n must_transition = True\n current_node = [p2tokens[0].label]\n del p2tokens[0]\n elif p2tokens[0].isNodeDefinition():\n must_transition = True\n current_node = [p2tokens[0].label]\n p3tokens.append(p2tokens[0])\n del p2tokens[0]\n elif p2tokens[0].isIdentifierList():\n must_transition = True\n current_node = p2tokens[0].label_refs\n del p2tokens[0]\n elif not current_node:\n report_line_error('Node reference expected before this transition: %s' % p2tokens[0])\n # node definition could be followed by newlines\n while p2tokens and p2tokens[0].isNewline():\n must_transition = False\n handle_newline()\n del p2tokens[0]\n if not p2tokens: break\n # next item must be a transition\n if p2tokens[0].isTransition():\n # check for source\n if not current_node:\n report_line_error('Transition %s has no source nodes.' % p2tokens[0].label)\n p2tokens[0].sources = current_node\n need_destination = True\n p3tokens.append(p2tokens[0])\n del p2tokens[0]\n elif must_transition:\n report_line_error(\"Expected a transition after '%s', not %s.\" %\n (','.join(current_node), p2tokens[0]))\n del p2tokens[0]\n continue\n while p2tokens and p2tokens[0].isNewline():\n handle_newline()\n del p2tokens[0]\n if not p2tokens:\n report_line_error('Missing destination for transition %s.' % p3tokens[-1].label)\n continue\n # next item must be a destination for the transition\n if p2tokens[0].isLabelRef():\n current_node = [p2tokens[0].label]\n del p2tokens[0]\n if need_destination:\n if p3tokens[-1].isTransition():\n p3tokens[-1].destinations = current_node\n need_destination = False\n continue\n elif p2tokens[0].isNodeDefinition():\n current_node = [p2tokens[0].label]\n p3tokens[-1].destinations = current_node\n continue # process the node defintion on the next iteration\n elif p2tokens[0].isIdentifierList():\n current_node = p2tokens[0].label_refs\n del p2tokens[0]\n if need_destination:\n if p3tokens[-1].isTransition():\n p3tokens[-1].destinations = current_node\n need_destination = False\n else:\n raise Exception('parser3 is confused by %s.' % p2tokens)\n return p3tokens\n\ndef generate_machine(lines):\n global indent_level, current_line, found_error\n found_error = False\n current_line = starting_line\n tok = lexer(''.join(lines))\n if found_error: return\n current_line = starting_line\n p1tokens = parser1(tok)\n if found_error: return\n current_line = starting_line\n p2tokens = parser2(p1tokens)\n if found_error: return\n current_line = starting_line\n p3tokens = parser3(p2tokens)\n if found_error: return\n\n labels = {}\n for item in p3tokens:\n if item.label in labels:\n report_global_error(\"Label '%s:' is multiply defined.\" % item.label)\n elif item.isNodeDefinition() or item.isTransition():\n labels[item.label] = item\n else:\n raise Exception(\"Problem in generate_machine: %s\" % item)\n\n # Check for undefined references\n for item in p3tokens:\n if item.isTransition():\n for ref in item.sources + item.destinations:\n if ref not in labels:\n hint = (\" Should it be %s() ?\" % ref) if ref[0].isupper() else \"\"\n report_global_error(\"Label '%s' was referenced but never defined.%s\" %\n (ref,hint))\n labels[ref] = None\n\n # Write out the state machine source as a comment\n emit_line('def setup(self):')\n indent_level += 4\n indent = ' ' * indent_level + \"# \"\n # indent = \"#\"\n out_f.write(indent + indent.join(lines))\n emit_line('')\n emit_line('# Code generated by genfsm on %s:' % time.strftime('%c'))\n emit_line('')\n\n # Generate the nodes, then the transitions\n for item in p3tokens:\n if item.isNodeDefinition():\n emit_line('%s = %s%s .set_name(\"%s\") .set_parent(self)' %\n (item.label, item.node_type, item.arglist, item.label))\n for item in p3tokens:\n if item.isTransition():\n emit_line('')\n emit_line('%s = %s%s .set_name(\"%s\")' %\n (item.label, item.trans_type, item.arglist, item.label))\n emit_line('%s .add_sources(%s) .add_destinations(%s)' %\n (item.label, ','.join(item.sources), ','.join(item.destinations)))\n\n emit_line('')\n emit_line('return self')\n indent_level -= 4\n\nindent_level = 0\n\ndef emit_line(line):\n out_f.write((' '*indent_level) + line + '\\n')\n \ndef process_file():\n global line_cache, current_line, starting_line, indent_level\n line_cache = [None] # dummy line 0\n current_line = 0\n r_setup = re.compile('^\\s*\\$setup\\s*((\\\"\\\"\\\")|(\\'\\'\\')|\\{)\\s*((\\#.*)|)$')\n r_indent = re.compile('^\\s*')\n while True:\n line = in_f.readline()\n if not line: break\n line_cache.append(line)\n current_line += 1\n # Echo lines to the output file until we reach a $setup line.\n if line.find('$setup') == -1:\n out_f.write(line)\n continue\n setup_match = r_setup.match(line)\n if not setup_match:\n report_line_error(\"Incorrect $setup syntax: '%s'\" % line.strip())\n continue\n delim = setup_match.group(1)[0]\n if delim == '{':\n close_delim = '}'\n r_end = re.compile('\\s*\\}\\s*$')\n else:\n close_delim = delim * 3\n r_end = re.compile('^\\s*' + close_delim)\n\n # Collect the lines of the state machine.\n starting_line = current_line + 1\n indent_level = r_indent.match(line).span()[1]\n lines = []\n while True:\n line = in_f.readline()\n if not line:\n report_line_error(\"State machine at line %s ended without closing %s.\" %\n (starting_line-1, close_delim))\n return\n current_line += 1\n line_cache.append(line)\n if r_end.match(line): break\n lines.append(line)\n # Now parse the collected lines and generate code.\n generate_machine(lines)\n\nfound_error = False\n\ndef report_line_error(error_text):\n global found_error\n cprint(line_cache[current_line].rstrip(), color='red', file=sys.stderr)\n cprint('Line %d: %s\\n' % (current_line, error_text), color='red', file=sys.stderr)\n found_error = True\n\ndef report_global_error(error_text):\n global found_error\n cprint('Error: %s\\n' % error_text, color='red', file=sys.stderr)\n found_error = True\n\ndef error_fragment(string):\n s = string.strip()\n p = s.find('\\n')\n if p == -1:\n p = len(s)\n fragment = s[0:min(p,20)]\n if len(fragment) < p:\n fragment += \"...\"\n return fragment\n\nif __name__ == '__main__':\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print('Usage: genfsm [infile.fsm | -] [outfile.py | -]')\n sys.exit(0)\n\n infile_name = sys.argv[1]\n if len(sys.argv) == 3:\n outfile_name = sys.argv[2]\n elif infile_name == '-':\n outfile_name = '-'\n else:\n outfile_name = infile_name[0:infile_name.rfind('.')] + \".py\"\n if infile_name == outfile_name:\n print(\"Output file name can't be the same as input file.\\nDid you mean %s ?\" %\n (infile_name[0:infile_name.rfind('.')] + \".fsm\"))\n sys.exit(1)\n\n try:\n with (open(infile_name) if infile_name != '-' else sys.stdin) as in_f:\n try:\n with (open(outfile_name,'w') if outfile_name != '-' else sys.stdout) as out_f:\n process_file()\n if not found_error:\n cprint('Wrote generated code to %s.' %\n (outfile_name if outfile_name != '-' else 'standard output'),\n color='green')\n except Exception as e:\n print('Error opening output file: %s' % e)\n import traceback\n traceback.print_exc()\n sys.exit(1)\n except Exception as e:\n print('Error opening input file: %s' % e)\n sys.exit(1)\n sys.exit(0)\n"
},
{
"alpha_fraction": 0.5550264716148376,
"alphanum_fraction": 0.5613756775856018,
"avg_line_length": 31.30769157409668,
"blob_id": "4cfbcaecbb0a40de2a0464475b0774ccbe98a29d",
"content_id": "19dfc3743bf5565e962b28468a2502a503b79e60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3780,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 117,
"path": "/cozmo_fsm/kine.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import math\nimport numpy as np\n\nfrom . import geometry\nfrom . import rrt_shapes\n\nclass Joint():\n def __init__(self, name, parent=None, type='fixed', getter=(lambda:0),\n description='A kinematic joint',\n qmin=-math.inf, qmax=math.inf,\n d=0, theta=0, r=0, alpha=0,\n collision_model=None, ctransform=geometry.identity()):\n self.name = name\n self.parent = parent\n self.type = type\n if type == 'fixed':\n self.apply_q = self.fixed\n elif type == 'revolute':\n self.apply_q = self.revolute\n elif type == 'prismatic':\n self.apply_q = self.prismatic\n elif type == 'world':\n self.apply_q = self.world_joint\n else:\n raise ValueError(\"Type must be 'fixed', 'revolute', 'prismatic', or 'world'.\")\n self.getter = getter\n self.description = description\n self.children = []\n self.d = d\n self.theta = theta\n self.r = r\n self.alpha = alpha\n self.children = []\n self.collision_model = collision_model\n self.q = 0\n self.qmin = qmin\n self.qmax = qmax\n self.parent_link_to_this_joint = geometry.dh_matrix(-d,-theta,-r,-alpha)\n self.this_joint_to_parent_link = np.linalg.inv(self.parent_link_to_this_joint)\n\n self.solver = None\n\n def __repr__(self):\n if self.type == 'fixed':\n qval = 'fixed'\n elif isinstance(self.q, (int,float)):\n qval = \"q=%.2f deg.\" % (self.q*180/math.pi)\n else:\n qval = (\"q=%s\" % repr(self.q))\n return \"<Joint '%s' %s>\" % (self.name, qval)\n\n def this_joint_to_this_link(self):\n \"The link moves by q in the joint's reference frame.\"\n return self.apply_q()\n\n def this_link_to_this_joint(self):\n return np.linalg.inv(self.this_joint_to_this_link())\n\n def revolute(self):\n return geometry.aboutZ(-self.q)\n\n def prismatic(self):\n return geometry.translate(0.,0.,-self.q)\n\n def fixed(self):\n return geometry.identity()\n\n def world_joint(self):\n return geometry.translate(self.q[0],self.q[1]).dot(geometry.aboutZ(self.q[2]))\n\nclass Kinematics():\n def __init__(self,joint_list,robot):\n self.joints = dict()\n for j in joint_list:\n self.joints[j.name] = j\n if j.parent:\n j.parent.children.append(j)\n self.base = self.joints[joint_list[0].name]\n self.robot = robot\n robot.kine = self\n self.get_pose()\n\n def joint_to_base(self,joint):\n if isinstance(joint,str):\n joint = self.joints[joint]\n Tinv = geometry.identity()\n j = joint\n while j is not self.base and j.parent is not None:\n Tinv = j.parent.this_link_to_this_joint().dot(\n j.this_joint_to_parent_link.dot(Tinv)\n )\n j = j.parent\n if j:\n return Tinv\n else:\n raise Exception('Joint %s has no path to base frame' % joint)\n\n def base_to_joint(self,joint):\n return np.linalg.inv(self.joint_to_base(joint))\n\n def joint_to_joint(self,joint1,joint2):\n return self.base_to_joint(joint2).dot(self.joint_to_base(joint1))\n\n def link_to_base(self,joint):\n if isinstance(joint,str):\n joint = self.joints[joint]\n return self.joint_to_base(joint).dot(joint.this_link_to_this_joint())\n\n def base_to_link(self,joint):\n return np.linalg.inv(self.link_to_base(joint))\n\n def link_to_link(self,joint1,joint2):\n return self.base_to_link(joint2).dot(self.link_to_base(joint1))\n\n def get_pose(self):\n for j in self.joints.values():\n j.q = j.getter()\n"
},
{
"alpha_fraction": 0.5601823925971985,
"alphanum_fraction": 0.6091185212135315,
"avg_line_length": 32.57143020629883,
"blob_id": "95d8263e335989875fb685a3165a4f2a00983580",
"content_id": "cdc30276c59a1d162423a1d461bf4ad03e20f7b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3290,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 98,
"path": "/cozmo_fsm/custom_objs.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import cozmo\nfrom cozmo.objects import CustomObject, CustomObjectMarkers, CustomObjectTypes\n\ncustom_marker_types = []\ncustom_container_types = []\ncustom_cube_types = []\n\nasync def declare_objects(robot):\n\n \"\"\"\n await robot.world.define_custom_box(\n CustomObjectTypes.CustomType00,\n CustomObjectMarkers.Hexagons4, # front\n CustomObjectMarkers.Triangles5, # back\n CustomObjectMarkers.Circles2, # top\n CustomObjectMarkers.Diamonds3, # bottom\n CustomObjectMarkers.Circles4, # left\n CustomObjectMarkers.Diamonds5, # right\n 50, 20, 1, # depth, width, height\n 40, 40, # marker width and height\n True) # is_unique\n return\n \"\"\"\n\n global custom_marker_types, custom_cube_types\n \n decl_marker = robot.world.define_custom_wall\n custom_marker_types = [\n CustomObjectTypes.CustomType00,\n CustomObjectTypes.CustomType01,\n CustomObjectTypes.CustomType02,\n CustomObjectTypes.CustomType03\n ]\n\n await decl_marker(CustomObjectTypes.CustomType00,\n CustomObjectMarkers.Circles2,\n 40, 40, 40, 40, True)\n\n await decl_marker(CustomObjectTypes.CustomType01,\n CustomObjectMarkers.Triangles2,\n 40, 40, 40, 40, True)\n\n await decl_marker(CustomObjectTypes.CustomType02,\n CustomObjectMarkers.Diamonds2,\n 40, 40, 40, 40, True)\n\n await decl_marker(CustomObjectTypes.CustomType03,\n CustomObjectMarkers.Hexagons2,\n 40, 40, 40, 40, True)\n\n\n# Markers for containers\n custom_container_types = [\n CustomObjectTypes.CustomType04,\n CustomObjectTypes.CustomType05\n ]\n\n await decl_marker(CustomObjectTypes.CustomType04,\n CustomObjectMarkers.Circles3,\n 40, 40, 40, 40, False)\n\n await decl_marker(CustomObjectTypes.CustomType05,\n CustomObjectMarkers.Triangles3,\n 40, 40, 40, 40, False)\n\n\n\n# Markers for cubes\n\n decl_cube = robot.world.define_custom_cube\n\n custom_cube_types = [\n CustomObjectTypes.CustomType10,\n CustomObjectTypes.CustomType11,\n CustomObjectTypes.CustomType12,\n CustomObjectTypes.CustomType13,\n CustomObjectTypes.CustomType14,\n CustomObjectTypes.CustomType15\n ]\n\n await decl_cube(CustomObjectTypes.CustomType10,\n CustomObjectMarkers.Circles5,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType11,\n CustomObjectMarkers.Diamonds5,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType12,\n CustomObjectMarkers.Hexagons5,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType13,\n CustomObjectMarkers.Triangles4,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType14,\n CustomObjectMarkers.Circles4,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType15,\n CustomObjectMarkers.Diamonds4,\n 50, 40, 40, True)\n"
},
{
"alpha_fraction": 0.5768401026725769,
"alphanum_fraction": 0.6000663638114929,
"avg_line_length": 46.838623046875,
"blob_id": "a542678c92edd93e656db9cf64d802c592a9f63b",
"content_id": "02383a2711dfc71525992df5cef582034e2e7c0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18083,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 378,
"path": "/cozmo_fsm/doorpass.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from cozmo.util import Pose\nfrom numpy import matrix, tan, arctan2\nfrom math import sin, cos, atan2, pi, sqrt\n\ntry: from cv2 import Rodrigues\nexcept: pass\n\nfrom .nodes import *\nfrom .transitions import *\nfrom .geometry import wrap_angle\nfrom .pilot0 import *\nfrom .worldmap import WallObj, DoorwayObj\nfrom time import sleep\n\nclass DoorPass(StateNode):\n \"\"\"Pass through a doorway. Assumes the doorway is nearby and unobstructed.\"\"\"\n\n OUTER_GATE_DISTANCE = 150 # mm\n INNER_GATE_DISTANCE = 70 # mm\n\n def __init__(self, door=None):\n self.door = door\n super().__init__()\n\n def start(self, event=None):\n door = self.door\n if isinstance(event,DataEvent):\n door = event.data\n if isinstance(door, int):\n door ='Doorway-%d' % door\n if isinstance(door, str):\n doorway = self.robot.world.world_map.objects.get(door)\n elif isinstance(door, DoorwayObj):\n doorway = door\n else:\n doorway = None\n if isinstance(doorway, DoorwayObj):\n self.object = doorway\n else:\n print(\"Error in DoorPass: no doorway named %s\" % repr(door))\n raise ValueError(door,doorway)\n super().start(event)\n\n\n @staticmethod\n def calculate_gate(start_point, door, offset):\n \"\"\"Returns closest gate point (gx, gy)\"\"\"\n (rx,ry) = start_point\n dx = door.x\n dy = door.y\n dtheta = door.theta\n pt1x = dx + offset * cos(dtheta)\n pt1y = dy + offset * sin(dtheta)\n pt2x = dx + offset * cos(dtheta+pi)\n pt2y = dy + offset * sin(dtheta+pi)\n dist1sq = (pt1x-rx)**2 + (pt1y-ry)**2\n dist2sq = (pt2x-rx)**2 + (pt2y-ry)**2\n if dist1sq < dist2sq:\n return (pt1x, pt1y, wrap_angle(dtheta+pi))\n else:\n return (pt2x, pt2y, dtheta)\n\n\n class AdjustLiftHeight(SetLiftHeight):\n # TODO: If lift is high, push it higher (we're carrying something).\n # If lift isn't high, drop it to zero\n def start(self, event=None):\n self.height = 0\n super().start(event)\n\n\n class AwayFromCollide(Forward):\n def start(self, event=None):\n # super().start(event)\n if isinstance(event,DataEvent):\n startNode = event.data[0]\n collideObj = event.data[1]\n (rx, ry, rtheta) = self.robot.world.particle_filter.pose_estimate()\n cx, cy = collideObj.center[0,0],collideObj.center[1,0]\n ctheta = atan2(cy-ry, cx-rx)\n delta_angle = wrap_angle(ctheta - rtheta)\n delta_angle = delta_angle/pi*180\n if -90 < delta_angle and delta_angle < 90:\n self.distance = distance_mm(-40)\n else:\n self.distance = distance_mm(40)\n self.speed = speed_mmps(50)\n super().start(event)\n else:\n raise ValueError('DataEvent to AwayFromCollide must be a StartCollides.args', event.data)\n self.post_failure()\n\n\n class TurnToGate(Turn):\n \"\"\"Turn to the approach gate, or post success if we're already there.\"\"\"\n def __init__(self,offset):\n self.offset = offset\n super().__init__(speed=Angle(radians=2.0))\n\n def start(self,event=None):\n (rx, ry, rtheta) = self.robot.world.particle_filter.pose_estimate()\n (gate_x, gate_y, _) = DoorPass.calculate_gate((rx,ry), self.parent.object, self.offset)\n bearing = atan2(gate_y-ry, gate_x-rx)\n turn = wrap_angle(bearing - rtheta)\n print('^^ TurnToGate: gate=(%.1f, %.1f) offset=%.1f rtheta=%.1f bearing=%.1f turn=%.1f' %\n (gate_x, gate_y, self.offset, rtheta*180/pi, bearing*180/pi, turn*180/pi))\n if False and abs(turn) < 0.1:\n self.angle = Angle(0)\n super().start(event)\n self.post_success()\n else:\n self.angle = Angle(radians=turn)\n super().start(event)\n\n\n class ForwardToGate(Forward):\n \"\"\"Travel forward to reach the approach gate.\"\"\"\n def __init__(self,offset):\n self.offset = offset\n super().__init__()\n\n def start(self,event=None):\n (rx, ry, rtheta) = self.robot.world.particle_filter.pose_estimate()\n (gate_x, gate_y, _) = DoorPass.calculate_gate((rx,ry), self.parent.object, self.offset)\n dist = sqrt((gate_x-rx)**2 + (gate_y-ry)**2)\n self.distance = distance_mm(dist)\n self.speed = speed_mmps(50)\n super().start(event)\n\n class TurnToMarker(Turn):\n \"\"\"Use camera image and native pose to center the door marker.\"\"\"\n def start(self,event=None):\n marker_ids = self.parent.object.marker_ids\n marker = self.robot.world.aruco.seen_marker_objects.get(marker_ids[0], None) or \\\n self.robot.world.aruco.seen_marker_objects.get(marker_ids[1], None)\n if not marker:\n self.angle = Angle(0)\n super().start(event)\n print(\"TurnToMarker failed to find marker %s or %s!\" % marker_ids)\n self.post_failure()\n return\n else:\n print('TurnToMarker saw marker', marker)\n sensor_dist = marker.camera_distance\n sensor_bearing = atan2(marker.camera_coords[0],\n marker.camera_coords[2])\n x = self.robot.pose.position.x\n y = self.robot.pose.position.y\n theta = self.robot.pose.rotation.angle_z.radians\n direction = theta + sensor_bearing\n dx = sensor_dist * cos(direction)\n dy = sensor_dist * sin(direction)\n turn = wrap_angle(atan2(dy,dx) - self.robot.pose.rotation.angle_z.radians)\n if abs(turn) < 0.5*pi/180:\n self.angle = Angle(0)\n else:\n self.angle = Angle(radians=turn)\n print(\"TurnToMarker %s turning by %.1f degrees\" % (self.name, self.angle.degrees))\n super().start(event)\n\n class CheckCarrying(SetLiftHeight):\n def __init__(self):\n super().__init__()\n\n def start(self,event=None):\n if self.robot.carrying:\n self.height = 0.4 if self.robot.lift_ratio > 0.5 else 1\n super().start(event)\n\n class DriveThrough(Forward):\n \"\"\"Travel forward to drive through the gate.\"\"\"\n def __init__(self):\n super().__init__()\n\n def start(self,event=None):\n (rx, ry, rtheta) = self.robot.world.particle_filter.pose_estimate()\n (gate_x, gate_y, gate_theta) = DoorPass.calculate_gate((rx,ry), self.parent.object, 5)\n dist = sqrt((gate_x-rx)**2 + (gate_y-ry)**2)\n offset = 120\n delta_theta = wrap_angle(rtheta-(gate_theta+pi/2))\n delta_dist = abs(offset/sin(delta_theta))\n dist += delta_dist\n self.distance = distance_mm(dist)\n self.speed = speed_mmps(50)\n super().start(event)\n\n def setup(self):\n # droplift: self.AdjustLiftHeight() =N=>\n # SetHeadAngle(0) =T(0.2)=> check_start # Time for vision to process\n # \n # check_start: PilotCheckStartDetail()\n # check_start =S=> turn_to_gate1\n # check_start =D=> away_from_collide\n # check_start =F=> Forward(-80) =C=> check_start2\n # \n # check_start2: PilotCheckStartDetail()\n # check_start2 =S=> turn_to_gate1\n # check_start2 =D=> away_from_collide2\n # check_start2 =F=> ParentFails()\n # \n # check_start3: PilotCheckStart()\n # check_start3 =S=> turn_to_gate1\n # check_start3 =F=> ParentFails()\n # \n # turn_to_gate1: self.TurnToGate(DoorPass.OUTER_GATE_DISTANCE) =C=>\n # StateNode() =T(0.2)=> forward_to_gate1\n # \n # away_from_collide: self.AwayFromCollide() =C=> StateNode() =T(0.2)=> check_start2\n # away_from_collide =F=> check_start2\n # \n # away_from_collide2: self.AwayFromCollide() =C=> StateNode() =T(0.2)=> check_start3\n # away_from_collide2 =F=> check_start3\n # \n # forward_to_gate1: self.ForwardToGate(DoorPass.OUTER_GATE_DISTANCE) =C=>\n # StateNode() =T(0.2)=> {look_up, turn_to_gate2}\n # \n # # If we're carrying a cube, we lower the lift so we can see\n # look_up: SetHeadAngle(35)\n # \n # turn_to_gate2: self.TurnToGate(DoorPass.INNER_GATE_DISTANCE) =C=>\n # StateNode() =T(0.2)=> self.CheckCarrying() =C=> turn_to_marker1\n # \n # turn_to_marker1: self.TurnToMarker()\n # turn_to_marker1 =C=> marker_forward1\n # turn_to_marker1 =F=> marker_forward1\n # \n # marker_forward1: self.ForwardToGate(DoorPass.INNER_GATE_DISTANCE) =C=>\n # SetHeadAngle(40) =C=> StateNode() =T(0.2)=> turn_to_marker2\n # \n # turn_to_marker2: self.TurnToMarker()\n # turn_to_marker2 =C=> marker_forward2\n # turn_to_marker2 =F=> marker_forward2\n # \n # marker_forward2: StateNode() =T(0.2)=> {lower_head, through_door}\n # \n # lower_head: SetHeadAngle(0)\n # \n # through_door: self.DriveThrough()\n # \n # {lower_head, through_door} =C=> self.CheckCarrying() =C=> ParentCompletes()\n # \n \n # Code generated by genfsm on Sat Feb 25 01:50:55 2023:\n \n droplift = self.AdjustLiftHeight() .set_name(\"droplift\") .set_parent(self)\n setheadangle1 = SetHeadAngle(0) .set_name(\"setheadangle1\") .set_parent(self)\n check_start = PilotCheckStartDetail() .set_name(\"check_start\") .set_parent(self)\n forward1 = Forward(-80) .set_name(\"forward1\") .set_parent(self)\n check_start2 = PilotCheckStartDetail() .set_name(\"check_start2\") .set_parent(self)\n parentfails1 = ParentFails() .set_name(\"parentfails1\") .set_parent(self)\n check_start3 = PilotCheckStart() .set_name(\"check_start3\") .set_parent(self)\n parentfails2 = ParentFails() .set_name(\"parentfails2\") .set_parent(self)\n turn_to_gate1 = self.TurnToGate(DoorPass.OUTER_GATE_DISTANCE) .set_name(\"turn_to_gate1\") .set_parent(self)\n statenode1 = StateNode() .set_name(\"statenode1\") .set_parent(self)\n away_from_collide = self.AwayFromCollide() .set_name(\"away_from_collide\") .set_parent(self)\n statenode2 = StateNode() .set_name(\"statenode2\") .set_parent(self)\n away_from_collide2 = self.AwayFromCollide() .set_name(\"away_from_collide2\") .set_parent(self)\n statenode3 = StateNode() .set_name(\"statenode3\") .set_parent(self)\n forward_to_gate1 = self.ForwardToGate(DoorPass.OUTER_GATE_DISTANCE) .set_name(\"forward_to_gate1\") .set_parent(self)\n statenode4 = StateNode() .set_name(\"statenode4\") .set_parent(self)\n look_up = SetHeadAngle(35) .set_name(\"look_up\") .set_parent(self)\n turn_to_gate2 = self.TurnToGate(DoorPass.INNER_GATE_DISTANCE) .set_name(\"turn_to_gate2\") .set_parent(self)\n statenode5 = StateNode() .set_name(\"statenode5\") .set_parent(self)\n checkcarrying1 = self.CheckCarrying() .set_name(\"checkcarrying1\") .set_parent(self)\n turn_to_marker1 = self.TurnToMarker() .set_name(\"turn_to_marker1\") .set_parent(self)\n marker_forward1 = self.ForwardToGate(DoorPass.INNER_GATE_DISTANCE) .set_name(\"marker_forward1\") .set_parent(self)\n setheadangle2 = SetHeadAngle(40) .set_name(\"setheadangle2\") .set_parent(self)\n statenode6 = StateNode() .set_name(\"statenode6\") .set_parent(self)\n turn_to_marker2 = self.TurnToMarker() .set_name(\"turn_to_marker2\") .set_parent(self)\n marker_forward2 = StateNode() .set_name(\"marker_forward2\") .set_parent(self)\n lower_head = SetHeadAngle(0) .set_name(\"lower_head\") .set_parent(self)\n through_door = self.DriveThrough() .set_name(\"through_door\") .set_parent(self)\n checkcarrying2 = self.CheckCarrying() .set_name(\"checkcarrying2\") .set_parent(self)\n parentcompletes1 = ParentCompletes() .set_name(\"parentcompletes1\") .set_parent(self)\n \n nulltrans1 = NullTrans() .set_name(\"nulltrans1\")\n nulltrans1 .add_sources(droplift) .add_destinations(setheadangle1)\n \n timertrans1 = TimerTrans(0.2) .set_name(\"timertrans1\")\n timertrans1 .add_sources(setheadangle1) .add_destinations(check_start)\n \n successtrans1 = SuccessTrans() .set_name(\"successtrans1\")\n successtrans1 .add_sources(check_start) .add_destinations(turn_to_gate1)\n \n datatrans1 = DataTrans() .set_name(\"datatrans1\")\n datatrans1 .add_sources(check_start) .add_destinations(away_from_collide)\n \n failuretrans1 = FailureTrans() .set_name(\"failuretrans1\")\n failuretrans1 .add_sources(check_start) .add_destinations(forward1)\n \n completiontrans1 = CompletionTrans() .set_name(\"completiontrans1\")\n completiontrans1 .add_sources(forward1) .add_destinations(check_start2)\n \n successtrans2 = SuccessTrans() .set_name(\"successtrans2\")\n successtrans2 .add_sources(check_start2) .add_destinations(turn_to_gate1)\n \n datatrans2 = DataTrans() .set_name(\"datatrans2\")\n datatrans2 .add_sources(check_start2) .add_destinations(away_from_collide2)\n \n failuretrans2 = FailureTrans() .set_name(\"failuretrans2\")\n failuretrans2 .add_sources(check_start2) .add_destinations(parentfails1)\n \n successtrans3 = SuccessTrans() .set_name(\"successtrans3\")\n successtrans3 .add_sources(check_start3) .add_destinations(turn_to_gate1)\n \n failuretrans3 = FailureTrans() .set_name(\"failuretrans3\")\n failuretrans3 .add_sources(check_start3) .add_destinations(parentfails2)\n \n completiontrans2 = CompletionTrans() .set_name(\"completiontrans2\")\n completiontrans2 .add_sources(turn_to_gate1) .add_destinations(statenode1)\n \n timertrans2 = TimerTrans(0.2) .set_name(\"timertrans2\")\n timertrans2 .add_sources(statenode1) .add_destinations(forward_to_gate1)\n \n completiontrans3 = CompletionTrans() .set_name(\"completiontrans3\")\n completiontrans3 .add_sources(away_from_collide) .add_destinations(statenode2)\n \n timertrans3 = TimerTrans(0.2) .set_name(\"timertrans3\")\n timertrans3 .add_sources(statenode2) .add_destinations(check_start2)\n \n failuretrans4 = FailureTrans() .set_name(\"failuretrans4\")\n failuretrans4 .add_sources(away_from_collide) .add_destinations(check_start2)\n \n completiontrans4 = CompletionTrans() .set_name(\"completiontrans4\")\n completiontrans4 .add_sources(away_from_collide2) .add_destinations(statenode3)\n \n timertrans4 = TimerTrans(0.2) .set_name(\"timertrans4\")\n timertrans4 .add_sources(statenode3) .add_destinations(check_start3)\n \n failuretrans5 = FailureTrans() .set_name(\"failuretrans5\")\n failuretrans5 .add_sources(away_from_collide2) .add_destinations(check_start3)\n \n completiontrans5 = CompletionTrans() .set_name(\"completiontrans5\")\n completiontrans5 .add_sources(forward_to_gate1) .add_destinations(statenode4)\n \n timertrans5 = TimerTrans(0.2) .set_name(\"timertrans5\")\n timertrans5 .add_sources(statenode4) .add_destinations(look_up,turn_to_gate2)\n \n completiontrans6 = CompletionTrans() .set_name(\"completiontrans6\")\n completiontrans6 .add_sources(turn_to_gate2) .add_destinations(statenode5)\n \n timertrans6 = TimerTrans(0.2) .set_name(\"timertrans6\")\n timertrans6 .add_sources(statenode5) .add_destinations(checkcarrying1)\n \n completiontrans7 = CompletionTrans() .set_name(\"completiontrans7\")\n completiontrans7 .add_sources(checkcarrying1) .add_destinations(turn_to_marker1)\n \n completiontrans8 = CompletionTrans() .set_name(\"completiontrans8\")\n completiontrans8 .add_sources(turn_to_marker1) .add_destinations(marker_forward1)\n \n failuretrans6 = FailureTrans() .set_name(\"failuretrans6\")\n failuretrans6 .add_sources(turn_to_marker1) .add_destinations(marker_forward1)\n \n completiontrans9 = CompletionTrans() .set_name(\"completiontrans9\")\n completiontrans9 .add_sources(marker_forward1) .add_destinations(setheadangle2)\n \n completiontrans10 = CompletionTrans() .set_name(\"completiontrans10\")\n completiontrans10 .add_sources(setheadangle2) .add_destinations(statenode6)\n \n timertrans7 = TimerTrans(0.2) .set_name(\"timertrans7\")\n timertrans7 .add_sources(statenode6) .add_destinations(turn_to_marker2)\n \n completiontrans11 = CompletionTrans() .set_name(\"completiontrans11\")\n completiontrans11 .add_sources(turn_to_marker2) .add_destinations(marker_forward2)\n \n failuretrans7 = FailureTrans() .set_name(\"failuretrans7\")\n failuretrans7 .add_sources(turn_to_marker2) .add_destinations(marker_forward2)\n \n timertrans8 = TimerTrans(0.2) .set_name(\"timertrans8\")\n timertrans8 .add_sources(marker_forward2) .add_destinations(lower_head,through_door)\n \n completiontrans12 = CompletionTrans() .set_name(\"completiontrans12\")\n completiontrans12 .add_sources(lower_head,through_door) .add_destinations(checkcarrying2)\n \n completiontrans13 = CompletionTrans() .set_name(\"completiontrans13\")\n completiontrans13 .add_sources(checkcarrying2) .add_destinations(parentcompletes1)\n \n return self\n"
},
{
"alpha_fraction": 0.47660723328590393,
"alphanum_fraction": 0.5203680992126465,
"avg_line_length": 33.378318786621094,
"blob_id": "c352b9fd4ad3daa3e73f8535cb2b84c081ba1fbb",
"content_id": "623c97bb792ec196cd114fce77097dc52728e11f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15541,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 452,
"path": "/cozmo_fsm/geometry.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nGeometry calculations, including transformation matrices for\nkinematics.\n\n\"\"\"\n\nimport numpy as np\nfrom math import sin, cos, tan, pi, atan2, asin, sqrt, floor, ceil\nfrom fractions import Fraction\nimport copy\n\ndef point(x=0,y=0,z=0):\n return np.array([ [x], [y], [z], [1.] ])\n\ndef norm(pt):\n return pt[0][0:3].norm()\n\ndef aboutX(theta):\n c = cos(theta)\n s = sin(theta)\n return np.array([\n [ 1, 0, 0, 0],\n [ 0, c, -s, 0],\n [ 0, s, c, 0],\n [ 0, 0, 0, 1]])\n\ndef aboutY(theta):\n c = cos(theta)\n s = sin(theta)\n return np.array([\n [ c, 0, s, 0],\n [ 0, 1, 0, 0],\n [-s, 0, c, 0],\n [ 0, 0, 0, 1]])\n\ndef aboutZ(theta):\n c = cos(theta)\n s = sin(theta)\n return np.array([\n [ c, -s, 0, 0],\n [ s, c, 0, 0],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1.]])\n\ndef translate(x,y,z=0):\n return np.array([\n [ 1, 0, 0, x],\n [ 0, 1, 0, y],\n [ 0, 0, 1, z],\n [ 0, 0, 0, 1.]])\n\ndef normalize(v):\n s = v[3,0]\n if s == 0:\n return v\n else:\n return v/s\n\ndef identity():\n return np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1.]])\n\ndef dh_matrix(d,theta,r,alpha):\n \"\"\"Denavit-Hartenberg transformation from joint i to joint i+1.\"\"\"\n return aboutX(alpha).dot(translate(r,0,d).dot(aboutZ(theta)))\n\ndef translation_part(t):\n return np.array([ [t[0,3]], [t[1,3]], [t[2,3]], [0] ]) / t[3,3]\n\ndef rotation_part(t):\n r = t.copy()\n r[0:3,3] = 0\n return r\n\ndef wrap_angle(angle_rads):\n \"\"\"Keep angle between -pi and pi.\"\"\"\n if isinstance(angle_rads, np.ndarray):\n raise ValueError(\"Argument not a scalar: %s\", angle_rads)\n while angle_rads <= -pi:\n angle_rads += 2*pi\n while angle_rads > pi:\n angle_rads -= 2*pi\n return angle_rads\n\ndef wrap_selected_angles(angle_rads, index):\n \"\"\"Keep angle between -pi and pi for column vector of angles\"\"\"\n for i in index:\n angle_rads[i,0] = wrap_angle(angle_rads[i,0])\n return angle_rads\n\ndef tprint(t):\n number_format = \"%7.3f\"\n def tprint_vector(t):\n for i in range(t.shape[0]):\n if i == 0:\n print('[ ',end='')\n else:\n print(' ',end='')\n print(number_format % t[i],end='')\n if i+1 == t.shape[0]:\n print(' ]')\n else:\n print()\n def tprint_matrix(t):\n for i in range(t.shape[0]):\n if i == 0:\n print('[ ',end='')\n else:\n print(' ',end='')\n for j in range(t.shape[1]):\n if j>0: print(' ',end='')\n print(number_format % t[i][j], end='')\n if i+1 == t.shape[0]:\n print(' ]')\n else:\n print()\n if isinstance(t, np.ndarray) and t.ndim == 1:\n tprint_vector(t)\n elif isinstance(t, np.ndarray) and t.ndim == 2:\n tprint_matrix(t)\n elif isinstance(t, (int,float)):\n print(number_format % t)\n else:\n print(t)\n\ndef rotate_point(point, center, angle):\n pointX, pointY = point\n centerX, centerY = center\n rotatedX = cos(angle) * (pointX - centerX) - sin(angle) * (pointY-centerY) + centerX\n rotatedY = sin(angle) * (pointX - centerX) + cos(angle) * (pointY - centerY) + centerY\n return rotatedX, rotatedY\n\n#---------------- Quaternions ----------------\n\ndef quat2rot(q0,q1,q2,q3):\n # formula from http://stackoverflow.com/questions/7938373/from-quaternions-to-opengl-rotations\n q0_sq = q0*q0; q1_sq = q1*q1; q2_sq = q2*q2; q3_sq = q3*q3\n t_q0q1 = 2. * q0 * q1\n t_q0q2 = 2. * q0 * q2\n t_q0q3 = 2. * q0 * q3\n t_q1q2 = 2. * q1 * q2\n t_q1q3 = 2. * q1 * q3\n t_q2q3 = 2. * q2 * q3\n return np.array([\n [ q0_sq+q1_sq-q2_sq-q3_sq, t_q1q2-t_q0q3, t_q1q3+t_q0q2, 0. ],\n [ t_q1q2+t_q0q3, q0_sq-q1_sq+q2_sq-q3_sq, t_q2q3-t_q0q1, 0. ],\n [ t_q1q3-t_q0q2, t_q2q3+t_q0q1, q0_sq-q1_sq-q2_sq+q3_sq, 0. ],\n [ 0., 0., 0., 1. ]])\n\ndef quat2rot33(q0,q1,q2,q3):\n # formula from http://stackoverflow.com/questions/7938373/from-quaternions-to-opengl-rotations\n q0_sq = q0*q0; q1_sq = q1*q1; q2_sq = q2*q2; q3_sq = q3*q3\n t_q0q1 = 2. * q0 * q1\n t_q0q2 = 2. * q0 * q2\n t_q0q3 = 2. * q0 * q3\n t_q1q2 = 2. * q1 * q2\n t_q1q3 = 2. * q1 * q3\n t_q2q3 = 2. * q2 * q3\n return np.array([\n [ q0_sq+q1_sq-q2_sq-q3_sq, t_q1q2-t_q0q3, t_q1q3+t_q0q2, ],\n [ t_q1q2+t_q0q3, q0_sq-q1_sq+q2_sq-q3_sq, t_q2q3-t_q0q1, ],\n [ t_q1q3-t_q0q2, t_q2q3+t_q0q1, q0_sq-q1_sq-q2_sq+q3_sq]])\n\n\ndef quaternion_to_euler_angle(quaternion):\n # source: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\n w, x, y, z = quaternion\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n X = atan2(t0, t1)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n Y = asin(t2)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n Z = atan2(t3, t4)\n\n return X, Y, Z\n\n\n#---------------- Orientation state from quaternion ----------------\n\nORIENTATION_UPRIGHT = 'upright'\nORIENTATION_INVERTED = 'inverted'\nORIENTATION_SIDEWAYS = 'sideways'\nORIENTATION_TILTED = 'tilted'\nORIENTATION_LEFT = 'left'\nORIENTATION_RIGHT = 'right'\n\ndef get_orientation_state(quaternion, isPlanar=False):\n \"\"\"Utility used by light cubes, charger, and custom markers.\"\"\"\n q0, q1, q2, q3 = quaternion\n mat_arr = quat2rot(q0, q1, q2, q3)\n z_vec = np.array([0, 0, 1, 1])\n z_dot = mat_arr.dot(z_vec)[:3]\n dot_product = np.round(z_dot.dot(np.array([0, 0, 1])), decimals=2)\n x, y, z = quaternion_to_euler_angle(quaternion)\n if isPlanar:\n perpendicular = True if -0.5 < y < 0.5 else False\n if not perpendicular:\n dot_product = np.round(z_dot.dot(np.array([1, 0, 0])), decimals=2)\n x, y, z = quaternion_to_euler_angle([q0, q2, q3, q1])\n x = -y if x>0 else y+pi\n x = x if x < pi else (x - 2*pi)\n if dot_product >= 0.9:\n orientation = ORIENTATION_UPRIGHT\n elif dot_product <= -0.9:\n orientation = ORIENTATION_INVERTED\n z -= pi\n elif -0.15 <= dot_product <= 0.15:\n if isPlanar:\n # Markers\n if 0 < x < pi:\n orientation = ORIENTATION_RIGHT\n else:\n orientation = ORIENTATION_LEFT\n else:\n # Cubes\n isSideways = abs(y) < 0.2 or abs(abs(y)-pi/2) < 0.2\n orientation = ORIENTATION_SIDEWAYS if isSideways else ORIENTATION_TILTED\n if round(y, 1) == 0:\n z = z-pi/2 if x>0 else z+pi/2\n else:\n #w, x, y, z = quaternion\n #x, y, z = quaternion_to_euler_angle([w, y, x, z])\n x, y, _ = quaternion_to_euler_angle([q0, q2, q1, q3])\n z = -y if x>0 else y+pi\n else:\n orientation = ORIENTATION_TILTED\n\n return orientation, x, y, z\n # return orientation, x, y, wrap_angle(z)\n\n\ndef same_orientation(old_object, new_object):\n q1 = old_object.pose.rotation.q0_q1_q2_q3\n q2 = new_object.pose.rotation.q0_q1_q2_q3\n old_orientation, _, _, _ = get_orientation_state(q1)\n new_orientation, _, _, _ = get_orientation_state(q2)\n if old_orientation != new_orientation:\n return False\n elif old_orientation == ORIENTATION_SIDEWAYS:\n old_pattern_number = get_pattern_number(old_object.pose.rotation.euler_angles)\n new_pattern_number = get_pattern_number(new_object.pose.rotation.euler_angles)\n if old_pattern_number == new_pattern_number:\n return True\n else:\n return False\n else:\n return True\n\ndef get_pattern_number(eulerAngles):\n x, y, z = eulerAngles\n pattern = -1\n z = min([pi/2, 0, -pi/2], key=lambda val:abs(val-z))\n if z == -pi/2:\n pattern = 1\n elif z == pi/2:\n pattern = 3\n else:\n if min([0, -pi, pi], key=lambda val:abs(val-x)) == 0:\n pattern = 2\n else:\n pattern = 4\n return pattern\n\n\n#---------------- General Geometric Calculations ----------------\n\ndef project_to_line(x0,y0,theta0,x1,y1):\n \"\"\"Returns the projection of the point (x1,y1) onto the\n line through (x0,y0) with orientation theta0.\"\"\"\n bigvalue = 1e6\n m0 = max(-bigvalue, min(bigvalue, tan(theta0)))\n if abs(m0) < 1/bigvalue:\n return (x1,y0)\n m1 = -1 / m0\n b0 = y0 - m0*x0\n b1 = y1 - m1*x1\n x2 = (b0-b1) / (m1-m0)\n y2 = m0 * x2 + b0\n return (x2,y2)\n\ndef line_equation(p1, p2):\n \"Returns the line equation used by line_intersection.\"\n A = (p1[1] - p2[1])\n B = (p2[0] - p1[0])\n C = (p1[0]*p2[1] - p2[0]*p1[1])\n return (A, B, -C)\n\ndef line_extrapolate(L, x):\n (A,B,C) = L\n s = +1 if B > 0 else -1\n return C if B == 0 else (-A/B)*x + C*s\n\ndef line_intersection(L1,L2):\n \"Intersection point of two lines defined by line equations\"\n D = L1[0] * L2[1] - L1[1] * L2[0]\n if D == 0: return False\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n x = Dx / D\n y = Dy / D\n return (x,y)\n\ndef segment_intersect_test(p1, p2, p3, p4):\n \"\"\"Returns True if the line segment from p1 to p2\n intersects the line segment from p3 to p4. Formula from\n http://www.cs.swan.ac.uk/~cssimon/line_intersection.html\"\"\"\n (x1,y1) = p1\n (x2,y2) = p2\n (x3,y3) = p3\n (x4,y4) = p4\n denom = (x4-x3)*(y1-y2) - (x1-x2)*(y4-y3)\n if abs(denom) < 0.0001:\n return False\n numa = (y3-y4)*(x1-x3) + (x4-x3)*(y1-y3)\n numb = (y1-y2)*(x1-x3) + (x2-x1)*(y1-y3)\n ta = numa / denom\n tb = numb / denom\n if (0 <= ta <= 1) and (0 <= tb <= 1):\n return True\n else:\n return False\n\n\ndef rotation_matrix_to_euler_angles(R):\n \"Input R is a 3x3 rotation matrix.\"\n sy = sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])\n singular = sy < 1e-6\n if not singular:\n x = atan2(R[2,1] , R[2,2])\n y = atan2(-R[2,0], sy)\n z = atan2(R[1,0], R[0,0])\n else:\n x = atan2(-R[1,2], R[1,1])\n y = atan2(-R[2,0], sy)\n z = 0\n\n return np.array([x, y, z])\n\n\ndef polygon_fill(polygon, offset):\n \"\"\"\n Implement the scanline polygon fill algorithm\n Input a polygon (rrt shape) and return points inside the polygon\n \"\"\"\n class Edge:\n def __init__(self, ymax, x, sign, dx, dy, sum):\n self.ymax = ymax\n self.xval = x\n self.sign = sign\n self.dx = dx\n self.dy = dy\n self.sum = sum\n def __repr__(self):\n return '<Edge (ymax= %s, xval= %s, sign= %s, dx= %s, dy= %s, sum= %s )>' % \\\n (self.ymax, self.xval, self.sign, self.dx, self.dy, self.sum)\n\n [xCenter, yCenter, _, _] = polygon.vertices.mean(1)\n edges = polygon.edges\n ((xmin,ymin), (xmax,ymax)) = polygon.get_bounding_box()\n xmin, ymin, xmax, ymax = floor(xmin), floor(ymin), ceil(xmax), ceil(ymax)\n xdelta = abs(xmin) if xmin < 0 else 0\n xmin += xdelta\n xmax += xdelta\n xCenter += xdelta\n ydelta = abs(ymin) if ymin < 0 else 0\n ymin += ydelta\n ymax += ydelta\n yCenter += ydelta\n edge_table = [[] for i in range(ymax+1)]\n active_list, points = [], []\n\n for edge in edges:\n ([[p1x], [p1y], _, _], [[p2x], [p2y], _, _]) = edge\n if (p1y-p2y) != 0: # Don't need to consider horizontal edges\n p1x, p1y, p2x, p2y = p1x+xdelta, p1y+ydelta, p2x+xdelta, p2y+ydelta\n end_points = [[p1x, p1y], [p2x, p2y]]\n end_points = sorted(end_points, key = lambda pt: pt[1]) # Sort on y value\n _xval, _ymin, _ymax = int(round(end_points[0][0])), int(round(end_points[0][1])), int(round(end_points[1][1]))\n slope = Fraction((p1x-p2x)/(p1y-p2y)).limit_denominator(10)\n _dx = slope.numerator\n _dy = slope.denominator\n _sign = 1 if (_dx > 0) == (_dy > 0) else -1\n _edge = Edge(_ymax, _xval, _sign, abs(_dx), abs(_dy), 0)\n edge_table[_ymin].append(_edge)\n\n for scanline in range(ymin, ymax+1):\n # Add match (ymin==scanline) edges to the active_list\n if len(edge_table[scanline]) > 0:\n for edge in edge_table[scanline]:\n active_list.append(edge)\n if len(active_list) > 0:\n y_lower_bound = (ymin - offset) if (offset < 0) else (yCenter - offset)\n y_upper_bound = (ymax + offset) if (offset < 0) else (yCenter + offset)\n if y_lower_bound < scanline < y_upper_bound:\n # Sort active_list on x value; if same x value, sort on slope (1/m)\n active_list = sorted(active_list, key = lambda x: (x.xval, x.sign*x.dx/x.dy))\n for _x in range(active_list[0].xval, active_list[1].xval):\n x_lower_bound = (active_list[0].xval - offset) if (offset < 0) else (xCenter - offset)\n x_upper_bound = (active_list[1].xval + offset) if (offset < 0) else (xCenter + offset)\n if x_lower_bound < _x < x_upper_bound:\n points.append([_x-xdelta, scanline-ydelta])\n if len(active_list) > 3:\n y_lower_bound = (ymin - offset) if (offset < 0) else (yCenter - offset)\n y_upper_bound = (ymax + offset) if (offset < 0) else (yCenter + offset)\n if y_lower_bound < scanline < y_upper_bound:\n for _x in range(active_list[2].xval, active_list[3].xval):\n x_lower_bound = (active_list[2].xval - offset) if (offset < 0) else (xCenter - offset)\n x_upper_bound = (active_list[3].xval + offset) if (offset < 0) else (xCenter + offset)\n if x_lower_bound < _x < x_upper_bound:\n points.append([_x-xdelta, scanline-ydelta])\n # Remove form active_list if edge.ymax = scanline\n active_list = [edge for edge in active_list if scanline < edge.ymax]\n # Increase x-value\n for edge in active_list:\n # Add dx to sum\n edge.sum += edge.dx\n # While sum ≥ dy, adjust x, subtract dy from sum\n while edge.sum >= edge.dy:\n edge.xval += edge.sign\n edge.sum -= edge.dy\n\n return points\n\ndef check_concave(polygon):\n \"\"\"\n Input a polygon (rrt shape)\n Return a boolean(is concave or not) and\n a list of triangle vertices divided from the concave polygon\n \"\"\"\n # Currently only works for quadrilateral\n vertices = np.transpose(polygon.vertices).tolist()\n edges = [[p1x-p2x, p1y-p2y] for [[p1x], [p1y], _, _], [[p2x], [p2y], _, _] in polygon.edges]\n crossProducts = [np.cross(edges[i], edges[i-1]) > 0 for i in range(len(edges))]\n if all(crossProducts) or not any(crossProducts):\n return False, None\n else:\n trues = [i for i in range(len(crossProducts)) if crossProducts[i] == True]\n falses = [i for i in range(len(crossProducts)) if crossProducts[i] == False]\n idx = trues[0] if len(trues) < len(falses) else falses[0]\n vertices += vertices\n tri1 = vertices[:][idx:idx+3]\n tri2 = vertices[:][idx+2:idx+5]\n return True, [np.transpose(tri1), np.transpose(tri2)]\n"
},
{
"alpha_fraction": 0.7847919464111328,
"alphanum_fraction": 0.7847919464111328,
"avg_line_length": 23.89285659790039,
"blob_id": "a77b4a5b9113f08b3e079c1d371cfa9777365830",
"content_id": "4601347a7e38de89bada166d4c9db17bd34bbfd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 697,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 28,
"path": "/cozmo_fsm/__init__.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "from cozmo.util import radians, degrees, Pose, Rotation\n\nfrom . import base\nfrom . import program\nbase.program = program\n\nfrom .nodes import *\nfrom .transitions import *\nfrom .program import *\nfrom .trace import tracefsm\nfrom .particle import *\nfrom .particle_viewer import ParticleViewer\nfrom .path_planner import PathPlanner\nfrom .cozmo_kin import *\nfrom .rrt import *\nfrom .path_viewer import PathViewer\nfrom .speech import *\nfrom .worldmap import WorldMap\nfrom .worldmap_viewer import WorldMapViewer\nfrom .cam_viewer import CamViewer\nfrom .pilot import *\nfrom .pickup import *\nfrom .doorpass import *\nfrom . import wall_defs\nfrom . import custom_objs\nfrom .sim_robot import SimRobot\n\ndel base\n"
},
{
"alpha_fraction": 0.5845018029212952,
"alphanum_fraction": 0.5930913090705872,
"avg_line_length": 32.186439514160156,
"blob_id": "b595edfd78b19f0b6d86ff9db2f716f9b5ddffcf",
"content_id": "d9a83cfb572c3a1275cd36e76a2f23be65a1ec00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 21538,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 649,
"path": "/simple_cli",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3 -i\n\"\"\"\n\nSimple Cozmo CLI\n================\nA simple (and crude) Command Line Interface for Cozmo.\n\nAvailable commands:\n\n (python code)\n Executable statements are directly passed to python's interpreter.\n The magic global variables ans holds the results of evaluating the\n last expression.\n\n exit\n Quit the CLI; return to Python.\n\n monitor(robot[, Event])\n Monitor all event types in the dispatch table, or a specified type of Event\n\n unmonitor(robot[, Event])\n Turn off monitoring\n\n runfsm(module_name)\n Imports or reloads a state machine module and runs the\n state machine.\n\n tracefsm(trace_level)\n Sets the FSM tracing level (0-9). With no argument, returns\n the current level.\n\n tm message\n Sends 'message' as a text message to the currently running\n state machine.\n\n show active\n Shows active state nodes and transitions.\n\n show cam_viewer | path_viewer | particle_viewer | worldmap_viewer\n Displays OpenCV viewer of the specified type.\n\n !cmd\n Runs 'cmd' in a shell and prints the result.\n*********\n\nNOTE: On Windows you must install the pyreadline package in\norder to get readline.\n\nAuthor: David S. Touretzky, Carnegie Mellon University\n=======\n\n\"\"\"\n\n# All that stuff we really need in order to get going.\nimport atexit\nimport code\nimport datetime\nimport logging\nimport os\nimport platform\nimport re\nimport readline\nimport rlcompleter\nimport subprocess\nimport sys\nimport time\nimport traceback\nfrom importlib import __import__, reload\n\ntry:\n from termcolor import cprint\nexcept:\n def cprint(string,color=None):\n print(string)\n\ntry:\n import matplotlib\n import matplotlib.pyplot as plt\nexcept:\n pass\n\n\nimport cozmo\nfrom cozmo.util import *\n\nfrom event_monitor import monitor, unmonitor\n\nimport cozmo_fsm\nfrom cozmo_fsm import *\nfrom cozmo_fsm.worldmap import ArucoMarkerObj, CustomMarkerObj, WallObj\n\n# tab completion\nreadline.parse_and_bind('tab: complete')\n# history file\nif 'HOME' in os.environ: # Linux\n histfile = os.path.join(os.environ['HOME'], '.pythonhistory')\nelif 'USERPROFILE' in os.environ: # Windows\n histfile = os.path.join(os.environ['USERPROFILE'], '.pythonhistory')\nelse:\n histfile = '.pythonhistory'\n\ntry:\n readline.read_history_file(histfile)\nexcept IOError:\n pass\natexit.register(readline.write_history_file, histfile)\ndel rlcompleter\n\nos_version = platform.system()\ndel platform\n\n# Put current directory on search path.\nif '.' not in sys.path:\n sys.path.append('.')\n\nres = 0\nans = None\n\nRUNNING = True\n\n\ndef simple_cli_callback(_wcube1, _wcube2, _wcube3, _wcharger):\n global wcube1, wcube2, wcube3, wcharger\n wcube1 = _wcube1\n wcube2 = _wcube2\n wcube3 = _wcube3\n wcharger = _wcharger\n\ndef runfsm(module_name, running_modules=dict()):\n \"\"\"runfsm('modname') reloads that module and expects it to contain\n a class of the same name. It calls that class's constructor and then\n calls the instance's start() method.\"\"\"\n\n global running_fsm\n running_fsm = cozmo_fsm.program.running_fsm\n if running_fsm:\n robot.stop_all_motors()\n running_fsm.stop()\n\n r_py = re.compile('.*\\.py$')\n if r_py.match(module_name):\n print(\"\\n'%s' is not a module name. Trying '%s' instead.\\n\" %\n (module_name, module_name[0:-3]))\n module_name = module_name[0:-3]\n\n found = False\n try:\n reload(running_modules[module_name])\n found = True\n except KeyError: pass\n except: raise\n if not found:\n try:\n running_modules[module_name] = __import__(module_name)\n except ImportError as e:\n print(\"Error loading %s: %s. Check your search path.\\n\" %\n (module_name,e))\n return\n except Exception as e:\n print('\\n===> Error loading %s:' % module_name)\n raise\n\n py_filepath = running_modules[module_name].__file__\n fsm_filepath = py_filepath[0:-2] + 'fsm'\n try:\n py_time = datetime.datetime.fromtimestamp(os.path.getmtime(py_filepath))\n fsm_time = datetime.datetime.fromtimestamp(os.path.getmtime(fsm_filepath))\n if py_time < fsm_time:\n cprint('Warning: %s.py is older than %s.fsm. Should you run genfsm?' %\n (module_name,module_name), color=\"yellow\")\n except: pass\n\n # The parent node class's constructor must match the module name.\n the_module = running_modules[module_name]\n the_class = the_module.__getattribute__(module_name) \\\n if module_name in dir(the_module) else None\n if isinstance(the_class,type) and issubclass(the_class,StateNode) and not issubclass(the_class,StateMachineProgram):\n cprint(\"%s is not an instance of StateMachineProgram.\\n\" % module_name, color=\"red\")\n return\n if not isinstance(the_class,type) or not issubclass(the_class,StateMachineProgram):\n cprint(\"Module %s does not contain a StateMachineProgram named %s.\\n\" %\n (module_name, module_name), color=\"red\")\n return\n the_module.robot = robot\n the_module.world = robot.world\n the_module.charger = robot.world.charger\n the_module.cube1 = robot.world.light_cubes[cozmo.objects.LightCube1Id]\n the_module.cube2 = robot.world.light_cubes[cozmo.objects.LightCube2Id]\n the_module.cube3 = robot.world.light_cubes[cozmo.objects.LightCube3Id]\n the_module.wcharger = wcharger\n the_module.wcube1 = wcube1\n the_module.wcube2 = wcube2\n the_module.wcube3 = wcube3\n # Class's __init__ method will call setup, which can reference the above variables.\n running_fsm = cozmo_fsm.program.running_fsm = the_class()\n running_fsm.simple_cli_callback = simple_cli_callback\n cli_globals = globals()\n cli_globals['running_fsm'] = running_fsm\n robot.loop.call_soon(running_fsm.start)\n return running_fsm\n\n\ndef text_message(msg):\n running_fsm = cozmo_fsm.program.running_fsm\n if not running_fsm or not running_fsm.running:\n print('No state machine running. Use runfsm(module_name) to start a state machine.')\n return\n try:\n running_fsm.robot.erouter.post(TextMsgEvent(msg))\n except KeyboardInterrupt: raise\n except Exception as e:\n traceback.print_exc()\n print()\n\n\ndef start_stuff(args):\n spec = args[0] if len(args) > 0 else \"\"\n if spec == 'perched':\n try:\n cams = [int(x) for x in args[1:]]\n except:\n print('Usage: start perched [camera_number...]')\n return\n robot.world.perched.start_perched_camera_thread(cams)\n elif spec == 'server':\n robot.world.server.start_server_thread()\n elif spec == 'client':\n if len(args) != 2:\n print('Usage: start client IP_address')\n return\n robot.world.client.start_client_thread(args[1])\n elif spec == 'shared_map':\n robot.world.client.use_shared_map()\n print('Now using shared map.')\n else:\n print(\"\"\"Usage:\n start perched\n start server\n start client [IP_Address]\n start shared_map\n\"\"\")\n\ndef show_stuff(args):\n global running_fsm\n running_fsm = cozmo_fsm.program.running_fsm\n spec = args[0] if len(args) > 0 else \"\"\n if spec == 'active':\n if not running_fsm:\n print('No state machine present.')\n elif not running_fsm.running:\n print(\"State machine '%s' is not running.\" % running_fsm.name)\n else:\n show_active(running_fsm,0)\n elif spec == \"kine\":\n show_kine(args[1:])\n elif spec == 'cam_viewer' or spec=='viewer':\n if running_fsm:\n running_fsm.cam_viewer = True\n if not robot.world.cam_viewer:\n robot.world.cam_viewer = CamViewer(robot)\n robot.world.cam_viewer.start()\n elif spec == \"crosshairs\":\n if running_fsm:\n running_fsm.viewer_crosshairs = not running_fsm.viewer_crosshairs\n elif spec == \"particle_viewer\":\n if not robot.world.particle_viewer:\n robot.world.particle_viewer = ParticleViewer(robot)\n robot.world.particle_viewer.start()\n elif spec == \"path_viewer\":\n if not robot.world.path_viewer:\n robot.world.path_viewer = PathViewer(robot,world.rrt)\n robot.world.path_viewer.start()\n elif spec == \"worldmap_viewer\":\n if not robot.world.worldmap_viewer:\n robot.world.worldmap_viewer = WorldMapViewer(robot)\n robot.world.worldmap_viewer.start()\n elif (spec == \"all\") or (spec == \"all_viewers\"):\n if running_fsm:\n running_fsm.cam_viewer = True\n robot.world.cam_viewer = CamViewer(robot)\n robot.world.cam_viewer.start()\n robot.world.particle_viewer = ParticleViewer(robot)\n robot.world.particle_viewer.start()\n robot.world.path_viewer = PathViewer(robot,world.rrt)\n robot.world.path_viewer.start()\n robot.world.worldmap_viewer = WorldMapViewer(robot)\n robot.world.worldmap_viewer.start()\n elif spec == \"pose\":\n robot.world.world_map.show_pose()\n elif spec == \"landmarks\":\n robot.world.particle_filter.show_landmarks()\n elif spec == \"objects\":\n robot.world.world_map.show_objects()\n elif spec == \"particle\":\n robot.world.particle_filter.show_particle(args[1:])\n elif spec == \"camera\":\n show_camera(args[1:])\n else:\n print(\"\"\"Invalid option. Try one of:\n show viewer | cam_viewer\n show crosshairs\n show worldmap_viewer\n show particle_viewer\n show path_viewer\n show all | all_viewers\n show active\n show kine [joint]\n show pose\n show landmarks\n show objects\n show particle [n]\n show camera n\n \"\"\")\n\n\ndef show_active(node,depth):\n if node.running: print(' '*depth, node)\n for child in node.children.values():\n show_active(child, depth+1)\n for trans in node.transitions:\n if trans.running: print(' '*(depth+1), trans)\n\ndef show_kine(args):\n if len(args) == 0:\n show_kine_tree(0, robot.kine.joints['base'])\n print()\n elif len(args) == 1:\n show_kine_joint(args[0])\n else:\n print('Usage: show kine [joint]')\n\ndef show_kine_tree(level, joint):\n qstring = ''\n if joint.type != 'fixed':\n if isinstance(joint.q, (float,int)):\n qval = ('%9.5g' % joint.q).strip()\n if joint.type == 'revolute':\n qval = qval + (' (%.1f deg.)' % (joint.q*180/pi))\n else:\n qval = '(' + (', '.join([('%9.5g' % v).strip() for v in joint.q])) + ')'\n qstring = ' q=' + qval\n print(' '*level, joint.name, ': ', joint.type, qstring, sep='')\n for child in joint.children:\n show_kine_tree(level+1, child)\n\ndef show_kine_joint(name):\n if name not in robot.kine.joints:\n print(\"'\"+repr(name)+\"' is not the name of a joint. Try 'show kine'.\")\n return\n joint = robot.kine.joints[name]\n fmt = '%10s'\n\n def formatq(type,val):\n if type == 'revolute':\n if val == inf:\n return 'inf'\n elif val == -inf:\n return '-inf'\n jrad = ('%9.5g' % val).strip() + ' radians'\n jdeg = '(' + ('%9.5g' % (val * 180/pi)).strip() + ' degrees)' if val != 0 else ''\n return jrad + ' ' + jdeg\n elif type == 'prismatic':\n return ('%9.5g' % val).strip() + ' mm'\n elif type == 'fixed':\n return ''\n elif type == 'world':\n if val is None:\n return ''\n else:\n return '(' + (', '.join(['%9.5g' % x for x in val])) + ')'\n else:\n raise ValueError(type)\n\n print(fmt % 'Name:', name)\n print(fmt % 'Type:', joint.type)\n print(fmt % 'Parent:', joint.parent.name if joint.parent else '')\n print(fmt % 'Descr.:', joint.description)\n print(fmt % 'q:', formatq(joint.type, joint.q))\n print(fmt % 'qmin:', formatq(joint.type, joint.qmin))\n print(fmt % 'qmax:', formatq(joint.type, joint.qmax))\n print(fmt % 'DH d:', formatq('prismatic',joint.d))\n print(fmt % 'DH theta:', formatq('revolute',joint.theta))\n print(fmt % 'DH alpha:', formatq('revolute',joint.alpha))\n print(fmt % 'DH r:', formatq('prismatic',joint.r))\n print(fmt % 'Link in base frame:')\n tprint(robot.kine.link_to_base(name))\n print()\n\ndef show_landmarks():\n landmarks = robot.world.particle_filter.sensor_model.landmarks\n print('The particle filter has %d landmark%s:' %\n (len(landmarks), '' if (len(landmarks) == 1) else 's'))\n show_landmarks_helper(landmarks)\n\ndef show_landmarks_helper(landmarks):\n sorted_keys = sort_wmobject_ids(landmarks)\n for key in sorted_keys:\n value = landmarks[key]\n if isinstance(value, Pose):\n x = value.position.x\n y = value.position.y\n theta = value.rotation.angle_z.degrees\n sigma_x = 0\n sigma_y = 0\n sigma_theta = 0\n else:\n x = value[0][0,0]\n y = value[0][1,0]\n theta = value[1] * 180/pi\n sigma_x = sqrt(value[2][0,0])\n sigma_y = sqrt(value[2][1,1])\n sigma_theta = sqrt(value[2][2,2])*180/pi\n if key.startswith('Aruco-'):\n print(' Aruco marker %s' % key[6:], end='')\n elif key.startswith('Wall-'):\n print(' Wall %s' % key[5:], end='')\n elif key.startswith('Cube-'):\n print(' Cube %s' % key[5:], end='')\n else:\n print(' %r' % key, end='')\n print(' at (%6.1f, %6.1f) @ %4.1f deg +/- (%4.1f,%4.1f) +/- %3.1f deg' %\n (x, y, theta, sigma_x, sigma_y, sigma_theta))\n print()\n\ndef show_particle(args):\n if len(args) == 0:\n particle = robot.world.particle_filter.best_particle\n particle_number = '(best=%d)' % particle.index\n elif len(args) > 1:\n print('Usage: show particle [number]')\n return\n else:\n try:\n particle_number = int(args[0])\n particle = robot.world.particle_filter.particles[particle_number]\n except ValueError:\n print('Usage: show particle [number]')\n return\n except IndexError:\n print('Particle number must be between 0 and',\n len(robot.world.particle_filter.particles)-1)\n return\n print ('Particle %s: x=%6.1f y=%6.1f theta=%6.1f deg log wt=%f [%.25f]' %\n (particle_number, particle.x, particle.y, particle.theta*180/pi,\n particle.log_weight, particle.weight))\n if len(particle.landmarks) > 0:\n print('Landmarks:')\n show_landmarks_helper(particle.landmarks)\n else:\n print()\n\ndef show_camera(args):\n if len(args) != 1:\n print('Usage: show camera n, where n is a camera number, typically 0 or 1.')\n return\n try:\n cam = int(args[0])\n except ValueError:\n show_camera()\n robot.world.perched.check_camera(cam)\n\ndef do_reload(module_name):\n the_module = None\n try:\n the_module = reload(sys.modules[module_name])\n except KeyError:\n print(\"Module '%s' isn't loaded.\" % module_name)\n except: raise\n if the_module:\n print(the_module)\n print()\n\ndef do_shell_command(cmd):\n try:\n subprocess.call(cmd, shell=True)\n except Exception as e:\n print(e)\n\n\ndef run(sdk_conn):\n global robot\n robot = sdk_conn.wait_for_robot()\n if \"TK_VIEWER\" in sys.argv:\n time.sleep(1.5) # allow time for Tk to set up the viewer window\n try:\n cozmo_fsm.evbase.robot_for_loading = robot\n robot.erouter = cozmo_fsm.evbase.EventRouter()\n robot.erouter.robot = robot\n robot.erouter.start()\n except: pass\n cli_loop(robot)\n\n\ndef cli_loop(robot):\n global ans, RUNNING\n\n cli_globals = globals()\n cli_globals['world'] = robot.world\n cli_globals['light_cubes'] = world.light_cubes\n cli_globals['cube1'] = light_cubes[cozmo.objects.LightCube1Id]\n cli_globals['cube2'] = light_cubes[cozmo.objects.LightCube2Id]\n cli_globals['cube3'] = light_cubes[cozmo.objects.LightCube3Id]\n cli_globals['charger'] = robot.world.charger\n cli_globals['ans'] = None\n\n running_fsm = cozmo_fsm.program.running_fsm = \\\n StateMachineProgram(cam_viewer=False, simple_cli_callback=simple_cli_callback)\n cli_globals['running_fsm'] = running_fsm\n running_fsm.start()\n\n cli_loop._console = code.InteractiveConsole()\n while True:\n if RUNNING == False:\n return\n cli_loop._line = ''\n while cli_loop._line == '':\n readline.write_history_file(histfile)\n try:\n cli_loop._line = cli_loop._console.raw_input('C> ').strip()\n except KeyboardInterrupt:\n process_interrupt()\n continue\n except EOFError:\n print(\"EOF.\\nType 'exit' to exit.\\n\")\n continue\n try:\n robot.kine.get_pose()\n except: pass\n if cli_loop._line[0] == '!':\n do_shell_command(cli_loop._line[1:])\n continue\n elif cli_loop._line[0:3] == 'tm ' or cli_loop._line == 'tm':\n text_message(cli_loop._line[3:])\n continue\n elif cli_loop._line[0:5] == 'show ' or cli_loop._line == 'show':\n show_args = cli_loop._line[5:].split(' ')\n show_stuff(show_args)\n continue\n elif cli_loop._line[0:7] == 'reload ':\n do_reload(cli_loop._line[7:])\n continue\n elif cli_loop._line[0:6] == 'start ' or cli_loop._line == 'start':\n start_args = cli_loop._line[6:].split(' ')\n start_stuff(start_args)\n continue\n cli_loop._do_await = False\n if cli_loop._line[0:7] == 'import ' or cli_loop._line[0:5] == 'from ' or \\\n cli_loop._line[0:7] == 'global ' or cli_loop._line[0:4] == 'del ' or \\\n cli_loop._line[0:4] == 'for ' or cli_loop._line[0:6] == 'while ' or \\\n cli_loop._line[0:4] == 'def ' or cli_loop._line[0:6] == 'async ' :\n # Can't use assignment to capture a return value, so None.\n ans = None\n elif cli_loop._line[0:6] == 'await ':\n cli_loop._do_await = True\n cli_loop._line = 'ans=' + cli_loop._line[6:]\n elif cli_loop._line[0:5] == 'exit':\n # Clean up\n try:\n world_viewer.exited = True\n except: pass\n if running_fsm:\n running_fsm.stop()\n RUNNING=False\n else:\n cli_loop._line = 'ans=' + cli_loop._line\n try:\n cli_globals['charger'] = robot.world.charger # charger may have appeared\n exec(cli_loop._line, cli_globals)\n if cli_loop._do_await:\n print(\"Can't use await outside of an async def.\")\n ans = None # ans = await ans\n if not ans is None:\n print(ans,end='\\n\\n')\n except KeyboardInterrupt:\n print('Keyboard interrupt!')\n except SystemExit:\n print('Type exit() again to exit Python.')\n RUNNING = False\n except Exception:\n traceback.print_exc()\n print()\n\nVERBOSE = False # True if we want all the log messages\n\n\ndef suppress_filter(log_record):\n message = log_record.msg if isinstance(log_record.msg, str) else repr(log_record.msg)\n if VERBOSE:\n return True\n if log_record.levelno == logging.ERROR and \\\n message.startswith(\"Received a custom object type:\"):\n return False\n if message.startswith(\"Defined: \") and \\\n len(log_record.args) > 0 and \\\n isinstance(log_record.args[0], cozmo.objects.CustomObject):\n return False\n if message.startswith(\"Invalidating pose for\") or \\\n message.startswith(\"Robot delocalized\") or \\\n message.startswith(\"Object connected\") or \\\n message.startswith(\"Object disconnected\"):\n return False\n if message.find(\"event received for unknown object ID\") >= 0:\n return False\n return True\n\nlogging_is_setup = False\n\n\ndef start_connection():\n global logging_is_setup, VERBOSE\n if not logging_is_setup:\n cozmo.setup_basic_logging()\n logging_is_setup = True\n if \"VERBOSE\" in sys.argv:\n VERBOSE = True\n cozmo.logger.addFilter(suppress_filter)\n cozmo.robot.Robot.drive_off_charger_on_connect = False\n connection_error = None\n\n try:\n if len(sys.argv) >= 2:\n if sys.argv[1] == \"TK_VIEWER\":\n cozmo.connect_with_tkviewer(run)\n else:\n print(\"\\nUnrecognized argument '%s'. Use TK_VIEWER instead.\\n\" % sys.argv[1])\n cozmo.connect(run)\n else:\n cozmo.connect(run)\n except cozmo.exceptions.SDKVersionMismatch as e:\n print('\\n\\n***** SDK version mismatch:',e,'\\n\\n')\n connection_error = e\n except cozmo.ConnectionError as e:\n connection_error = e\n if connection_error is not None:\n sys.exit(\"A connection error occurred: %s\" % connection_error)\n\n\ndef process_interrupt():\n robot.stop_all_motors()\n running_fsm = cozmo_fsm.program.running_fsm\n if running_fsm and running_fsm.running:\n print('\\nKeyboardInterrupt: stopping', running_fsm.name)\n running_fsm.stop()\n else:\n print(\"\\nKeyboardInterrupt. Type 'exit' to exit.\")\n\n\nif __name__ == '__main__':\n start_connection()\n"
},
{
"alpha_fraction": 0.6007326245307922,
"alphanum_fraction": 0.6318681240081787,
"avg_line_length": 35.36666488647461,
"blob_id": "5cba46141689f21caa2cb34934cfa07e7233e528",
"content_id": "5426236593501ad3439212443bd57dc1a971f47d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 30,
"path": "/cozmo_fsm/examples/PF_Aruco.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPF_Aruco demonstrates a particle filter using ArUco markers.\nThere are three sensor models provided:\n ArucoDistanceSensorModel -- distances only\n ArucoBearingSensorModel -- bearings only\n ArucoCombinedSensorModel -- combined distances + bearings\n\nIn the particle viewer window:\n the WASD keys move the robot\n 'e' forces an evaluation step\n 'r' forces a resampling\n 'v' displays the weight statistics\n 'z' re-randomizes the particles.\n\"\"\"\n\nfrom cozmo_fsm import *\nfrom cozmo.util import degrees, Pose\n\nclass PF_Aruco(StateMachineProgram):\n def __init__(self):\n landmarks = {\n 'Aruco-0' : Pose(-55, 160, 0, angle_z=degrees(90)),\n 'Aruco-1' : Pose( 55, 160, 0, angle_z=degrees(90)),\n 'Aruco-2' : Pose(160, 55, 0, angle_z=degrees( 0)),\n 'Aurco-3' : Pose(160, -55, 0, angle_z=degrees( 0))\n }\n pf = ParticleFilter(robot,\n landmarks = landmarks,\n sensor_model = ArucoCombinedSensorModel(robot))\n super().__init__(particle_filter=pf, particle_viewer=True)\n\n"
},
{
"alpha_fraction": 0.6072245240211487,
"alphanum_fraction": 0.6758115887641907,
"avg_line_length": 33.71428680419922,
"blob_id": "1b9cf1345bbb217624a07a25db6d146953e5b5a3",
"content_id": "5e101ee6f9ada5e3e6509b1f5ffd7204398e434d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2187,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 63,
"path": "/aruco/generatetags.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2,os,sys,time\nimport cv2.aruco as aruco\n\ndef make_folder(name):\n try:\n os.system(\"mkdir \"+name)\n except:\n pass\n\n\ndef getBottomLeftWhite(im):\n (width,height) = im.shape\n for y in range(height-1,-1,-1):\n for x in range(width):\n if(im[y][x] == 255):\n return (x,y)\n return None\n\ndef getTag(num,aruco_dict=aruco.Dictionary_get(aruco.DICT_4X4_100),size=500):\n return aruco.drawMarker(aruco_dict,num,size)\n\ndef save_tags(aruco_dict,name,num,size=500,flip=False,label=False):\n for i in range(num):\n if i%20==0: print(\"tag %d generated.\" % (i))\n im = getTag(i,aruco_dict,size)\n if flip:\n im = cv2.flip(im,1)\n pos = getBottomLeftWhite(im)\n pos = (pos[0]+5,pos[1]-5) #shift up a little\n final=cv2.putText(im,str(i),pos,cv2.FONT_HERSHEY_COMPLEX_SMALL,1,128) #write num in gray\n\n if label:\n #add label\n final = np.concatenate((final,255*np.ones((int(size/10),size))))\n msg = \"Tag %d\" % (i)\n pos = (int(size/2)-20*int(len(msg)/2),size+int(size/20))\n final = cv2.putText(final,\"Tag %d\" % (i),pos,cv2.FONT_HERSHEY_COMPLEX_SMALL,1,0)\n\n cv2.imwrite(name+str(i)+\".jpg\",final)\n\n\ndef generate_tags(dict_name,outfilename,quantity=100,flip=False,label=False):\n aruco_dict = aruco.Dictionary_get(dict_name)\n save_tags(aruco_dict,outfilename,quantity,flip=flip,label=label)\n\n\nif(__name__ == \"__main__\"):\n print(\"you are running this file as a standalone program.\")\n label = len(sys.argv)>1\n if(label):\n print(\"You have chosen to label images of all tags.\")\n print(\"tags being outputed will be saved to autogenerated folders in your current directory. Press enter to continue?\")\n input() #wait for user to press enter\n make_folder(\"aruco_4x4_100\")\n #make_folder(\"aruco_4x4_1000\")\n #make_folder(\"aruco_5x5_100\")\n #make_folder(\"aruco_5x5_1000\"),\n generate_tags(aruco.DICT_4X4_100,\"aruco_4x4_100/aruco4x4_100_\",flip=False)\n #generate_tags(aruco.DICT_4X4_1000,\"aruco_4x4_1000/aruco4x4_1000_\",1000,flip=flip)\n #generate_tags(aruco.DICT_5X5_100,\"aruco_5x5_100/aruco5x5_100_\",flip=flip)\n #generate_tags(aruco.DICT_5X5_1000,\"aruco_5x5_1000/aruco5x5_1000_\",1000,flip=flip)\n print(\"complete!\")\n"
},
{
"alpha_fraction": 0.5014960765838623,
"alphanum_fraction": 0.5255118012428284,
"avg_line_length": 41.192691802978516,
"blob_id": "fdbe569f207cc784f58dc406c69b5a821778d50c",
"content_id": "b124a70ea89657909b20cc2fb03d79d97747a3ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12700,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 301,
"path": "/cozmo_fsm/wavefront.py",
"repo_name": "touretzkyds/cozmo-tools",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWavefront path planning algorithm.\n\"\"\"\n\nimport numpy as np\nimport heapq\nfrom math import floor, ceil, cos, sin\n\nfrom .geometry import wrap_angle, rotate_point, polygon_fill, check_concave\nfrom .rrt import StartCollides\nfrom .rrt_shapes import *\nfrom .worldmap import LightCubeObj, ChargerObj, CustomMarkerObj, MapFaceObj\n\nclass WaveFront():\n goal_marker = 2**31 - 1\n\n def __init__(self, square_size=5, bbox=None, grid_shape=(150,150), inflate_size=50):\n self.square_size = square_size # in mm\n self.bbox = bbox # in mm\n self.inflate_size = inflate_size # in mm\n self.grid_shape = grid_shape # array shape\n self.initialize_grid(bbox=bbox)\n self.obstacles = dict()\n\n def initialize_grid(self,bbox=None):\n if bbox:\n self.bbox = bbox\n self.grid_shape = (ceil((bbox[1][0] - bbox[0][0] + 4*self.inflate_size)/self.square_size),\n ceil((bbox[1][1] - bbox[0][1] + 4*self.inflate_size)/self.square_size))\n self.grid = np.zeros(self.grid_shape, dtype=np.int32)\n self.maxdist = 1\n\n def coords_to_grid(self,xcoord,ycoord):\n \"Convert world map coordinates to grid subscripts.\"\n x = int(round((xcoord-self.bbox[0][0]+2*self.inflate_size)/self.square_size))\n y = int(round((ycoord-self.bbox[0][1]+2*self.inflate_size)/self.square_size))\n if x >= 0 and x < self.grid_shape[0] and \\\n y >= 0 and y < self.grid_shape[1]:\n return (x,y)\n else:\n return (None,None)\n\n def grid_to_coords(self,gridx,gridy):\n xmin = self.bbox[0][0]\n ymin = self.bbox[0][1]\n x = gridx*self.square_size + xmin - 2*self.inflate_size\n y = gridy*self.square_size + ymin - 2*self.inflate_size\n return (x,y)\n\n def set_obstacle_cell(self, xcoord, ycoord, obstacle_id):\n (x,y) = self.coords_to_grid(xcoord,ycoord)\n if x is not None:\n self.grid[x,y] = obstacle_id\n\n def add_obstacle(self, obstacle):\n obstacle_id = -(1 + len(self.obstacles))\n self.obstacles[obstacle_id] = obstacle\n if isinstance(obstacle, Rectangle):\n centerX, centerY = obstacle.center[0,0], obstacle.center[1,0]\n width, height = obstacle.dimensions[0], obstacle.dimensions[1]\n theta = wrap_angle(obstacle.orient)\n for x in range(floor(centerX-width/2),\n ceil(centerX+width/2),\n int(self.square_size/2)):\n for y in range(floor(centerY-height/2),\n ceil(centerY+height/2),\n int(self.square_size/2)):\n new_x = ((x - centerX) * cos(theta) - (y - centerY) * sin(theta)) + centerX\n new_y = ((x - centerX) * sin(theta) + (y - centerY) * cos(theta)) + centerY\n self.set_obstacle_cell(new_x, new_y, obstacle_id)\n elif isinstance(obstacle, Polygon):\n raise NotImplemented(obstacle)\n elif isinstance(obstacle, Circle):\n raise NotImplemented(obstacle)\n elif isinstance(obstacle, Compound):\n raise NotImplemented(obstacle)\n else:\n raise Exception(\"%s has no add_obstacle() method defined for %s.\" % (self, obstacle))\n\n def set_goal_cell(self,xcoord,ycoord):\n self.set_cell_contents(xcoord,ycoord,self.goal_marker)\n\n def set_empty_cell(self,xcoord,ycoord):\n self.set_cell_contents(xcoord, ycoord, 0)\n\n def set_cell_contents(self,xcoord,ycoord,contents):\n (x,y) = self.coords_to_grid(xcoord,ycoord)\n if x is not None:\n self.grid[x,y] = contents\n else:\n print('**** bbox=', self.bbox, ' grid_shape=', self.grid_shape,\n ' x,y=', (x,y), ' xcoord,ycoord=', (xcoord,ycoord))\n print(ValueError('Coordinates (%s, %s) are outside the wavefront grid' % ((xcoord,ycoord))))\n\n def set_goal_shape(self, shape, default_offset=None, obstacle_inflation=0):\n goal_points = []\n if shape.obstacle_id.startswith('Room'):\n empty_points, goal_points = self.generate_room_goal_points(shape, default_offset)\n else: # cubes, charger, markers, mapFace\n empty_points, goal_points = self.generate_cube_goal_points(shape, obstacle_inflation)\n for point in empty_points:\n self.set_empty_cell(*rotate_point(point, shape.center[0:2,0], shape.orient))\n for point in goal_points:\n self.set_goal_cell(*rotate_point(point, shape.center[0:2,0], shape.orient))\n\n def generate_room_goal_points(self, shape, default_offset):\n offset = -1 if default_offset is None else default_offset\n if offset > 0:\n isConcave, vertices_lst = check_concave(shape)\n else:\n isConcave, vertices_lst = False, []\n if isConcave:\n for vertices in vertices_lst:\n goal_points += polygon_fill(Polygon(vertices), offset)\n else:\n goal_points = polygon_fill(shape, offset)\n empty_points = []\n return (empty_points, goal_points)\n\n def generate_cube_goal_points(self,shape,obstacle_inflation):\n # Generate points for cubes, charger, markers, mapFace in\n # standard orientation. Will rotate these later.\n if shape.obstacle_id.startswith('Cube'):\n (xsize,ysize,_) = LightCubeObj.light_cube_size\n goal_offset = 25 # distance from edge in mm\n elif shape.obstacle_id.startswith('Charger'):\n (xsize,ysize,_) = ChargerObj.charger_size\n goal_offset = 15 # distance from edge in mm\n elif shape.obstacle_id.startswith('CustomMarkerObj'):\n (xsize,ysize,_) = CustomMarkerObj.custom_marker_size\n goal_offset = 15 # distance from edge in mm\n elif shape.obstacle_id == 'MapFace':\n (xsize,ysize) = MapFaceObj.mapFace_size\n goal_offset = 15 # distance from edge in mm\n else:\n raise ValueError('Unrecognized goal shape', shape)\n ((xmin,ymin), (xmax,ymax)) = shape.get_bounding_box()\n goal_points, empty_points = [], []\n for offset in range(floor(xsize/2), ceil(xsize/2)+obstacle_inflation+2):\n empty_points.append([shape.center[0,0]-offset, shape.center[1,0]])\n empty_points.append([shape.center[0,0]+offset, shape.center[1,0]])\n for offset in range(floor(ysize/2), ceil(ysize/2)+obstacle_inflation+2):\n empty_points.append([shape.center[0,0], shape.center[1,0]-offset])\n empty_points.append([shape.center[0,0], shape.center[1,0]+offset])\n goal_points.append([shape.center[0,0]-xsize/2-goal_offset, shape.center[1,0]])\n goal_points.append([shape.center[0,0]+xsize/2+goal_offset, shape.center[1,0]])\n goal_points.append([shape.center[0,0], shape.center[1,0]-ysize/2-goal_offset])\n goal_points.append([shape.center[0,0], shape.center[1,0]+ysize/2+goal_offset])\n return (empty_points, goal_points)\n\n def check_start_collides(self,xstart,ystart):\n (x,y) = self.coords_to_grid(xstart,ystart)\n contents = self.grid[x,y]\n if contents == 0 or contents == self.goal_marker:\n return False\n else:\n collider = self.obstacles[contents]\n print('start collides:', (xstart,ystart), (x,y), collider)\n return collider\n\n def propagate(self,xstart,ystart):\n \"\"\"\n Propagate the wavefront in eight directions from the starting coordinates\n until a goal cell is reached or we fill up the grid.\n \"\"\"\n if self.check_start_collides(xstart,ystart):\n raise StartCollides()\n\n grid = self.grid\n (x,y) = self.coords_to_grid(xstart,ystart)\n goal_marker = self.goal_marker\n if grid[x,y] == goal_marker:\n return (x,y)\n fringe = [(1,(x,y))]\n heapq.heapify(fringe)\n xmax = self.grid_shape[0] - 1\n ymax = self.grid_shape[1] - 1\n while fringe:\n dist,(x,y) = heapq.heappop(fringe)\n if grid[x,y] == 0:\n grid[x,y] = dist\n else:\n continue\n dist10 = dist + 10\n dist14 = dist + 14\n self.maxdist = dist14\n if x > 0:\n cell = grid[x-1,y]\n if cell == goal_marker: return (x-1,y)\n elif cell == 0:\n heapq.heappush(fringe, (dist10,(x-1,y)))\n if y > 0:\n cell = grid[x-1,y-1]\n if cell == goal_marker: return (x-1,y-1)\n elif cell == 0:\n heapq.heappush(fringe, (dist14,(x-1,y-1)))\n if y < ymax:\n cell = grid[x-1,y+1]\n if cell == goal_marker: return (x-1,y+1)\n elif cell == 0:\n heapq.heappush(fringe, (dist14,(x-1,y+1)))\n if x < xmax:\n cell = grid[x+1,y]\n if cell == goal_marker: return (x+1,y)\n elif cell == 0:\n heapq.heappush(fringe, (dist10,(x+1,y)))\n if y > 0:\n cell = grid[x+1,y-1]\n if cell == goal_marker: return (x+1,y-1)\n elif cell == 0:\n heapq.heappush(fringe, (dist14,(x+1,y-1)))\n if y < ymax:\n cell = grid[x+1,y+1]\n if cell == goal_marker: return (x+1,y+1)\n elif cell == 0:\n heapq.heappush(fringe, (dist14,(x+1,y+1)))\n if y > 0:\n cell = grid[x,y-1]\n if cell == goal_marker: return (x,y-1)\n elif cell == 0:\n heapq.heappush(fringe, (dist10,(x,y-1)))\n if y < ymax:\n cell = grid[x,y+1]\n if cell == goal_marker: return (x,y+1)\n elif cell == 0:\n heapq.heappush(fringe, (dist10,(x,y+1)))\n return None\n\n def extract(self, search_result, wf_start):\n \"Extract the path once the goal is found, and convert back to worldmap coordinates.\"\n start_coords = self.coords_to_grid(*wf_start)\n if search_result == start_coords:\n return [self.grid_to_coords(*search_result)]\n (x,y) = search_result\n maxdist = self.goal_marker + 1\n grid = self.grid\n xmax = self.grid_shape[0] - 1\n ymax = self.grid_shape[1] - 1\n path = []\n while maxdist > 1:\n path.append((x,y))\n if x > 0:\n if 0 < grid[x-1,y] < maxdist:\n maxdist = grid[x-1,y]\n (newx,newy) = (x-1,y)\n if y > 0:\n if 0 < grid[x-1,y-1] < maxdist:\n maxdist = grid[x-1,y-1]\n (newx,newy) = (x-1,y-1)\n if y < ymax:\n if 0 < grid[x-1,y+1] < maxdist:\n maxdist = grid[x-1,y+1]\n (newx,newy) = (x-1,y+1)\n if x < xmax:\n if 0 < grid[x+1,y] < maxdist:\n maxdist = grid[x+1,y]\n (newx,newy) = (x+1,y)\n if y > 0:\n if 0 < grid[x+1,y-1] < maxdist:\n maxdist = grid[x+1,y-1]\n (newx,newy) = (x+1,y-1)\n if y < ymax:\n if 0 < grid[x+1,y+1] < maxdist:\n maxdist = grid[x+1,y+1]\n (newx,newy) = (x+1,y+1)\n if y > 0:\n if 0 < grid[x,y-1] < maxdist:\n maxdist = grid[x,y-1]\n (newx,newy) = (x,y-1)\n if y < ymax:\n if 0 < grid[x,y+1] < maxdist:\n maxdist = grid[x,y+1]\n (newx,newy) = (x,y+1)\n (x,y) = (newx,newy)\n path.append((x,y))\n path.reverse()\n square_size = self.square_size\n xmin = self.bbox[0][0]\n ymin = self.bbox[0][1]\n path_coords = [self.grid_to_coords(x,y) for (x,y) in path]\n return path_coords\n\ndef wf_test():\n start = (261,263)\n goal = (402,454)\n #\n wf = WaveFront()\n wf.grid[:,:] = 0\n wf.set_goal_cell(*goal)\n wf.set_obstacle_cell(280,280)\n wf.set_obstacle_cell(280,290)\n wf.set_obstacle_cell(290,280)\n wf.set_obstacle_cell(290,290)\n result1 = wf.propagate(*start)\n result2 = wf.extract(result1)\n print('path length =', len(result2))\n print(result2)\n print(wf.grid[75:85, 75:85])\n return result2\n\n# wf_test()\n"
}
] | 59 |
cobrce/distance_RPi | https://github.com/cobrce/distance_RPi | 9b0f13cd944a0a0dc36b0242c559f4d8beccf0d0 | 17b8d2e1b6e08e7741334f90711958a2737e822e | 7a5029e9293043a7a8b945e15e8a1f2a589fe231 | refs/heads/master | 2020-03-19T00:42:32.000817 | 2018-06-02T17:46:00 | 2018-06-02T17:46:00 | 135,497,672 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7488913536071777,
"alphanum_fraction": 0.7660753726959229,
"avg_line_length": 32.407405853271484,
"blob_id": "047034659da3e047274b87313692ada19ddb2b8e",
"content_id": "364b9e8edb088d9ceb0607c42e64056e0e4d9528",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1804,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 54,
"path": "/README.md",
"repo_name": "cobrce/distance_RPi",
"src_encoding": "UTF-8",
"text": "# distance_RPi\nA Python project to use Raspberry PI 3 + HC-SR04 (ultrasonic sensor) to measure distance (based on [this](https://tutorials-raspberrypi.com/raspberry-pi-ultrasonic-sensor-hc-sr04/) tutorial)\n\n\n\n### Needed packages\n* guizero\n* RPi.GPIO\n\n### Setup\n* Wire the trigger and echo pins of the sensor to pin 21 and 16 (Broadcom) resp. of the RPi3\n* Connect sensor Vcc to +5 and Gnd to 0\n\n### Use\n* Run the script\n* Move the slider to change the refresh rate (default is 100ms)\n\n# Web app\nThis is a Django web app to display the measured distance in web page instead of console/gui.\nThe app was developped in VisualStudio Community edition and so the templates are just modifications to the original ones.\n\n\n\n### Neede packages\n* Django==1.11.13\n\n### Setup\nI'm using a virtual environment for the app and followed these steps:\n```\ngit clone https://github.com/cobrce/distance_RPi.git\ncd distance_RPi/DistanceRPiDjango\nmkdir venv\npython3 -m venv venv\nsource venv/bin/activate\npip3 install Django==1.11.13\npython3 manage.py makemigrations\npython3 manage.py migrate\npytohn3 manage.py createsuperuse\n```\nat this point you will be asked to create a super user (provide name, email and password of your choice)\n\n### Use\nif you don't see \"(venv)\" before the bash prompt use the following line (we suppose your current directory is distance_RPi/DistanceRPiDjango)\n```\nsource venv/bin/activate\n```\nTo run the server\n```\npython3 manage.py runserver YOUR_LOCAL_IP:PORT\n```\nOpen a browser and navigate to YOUR_LOCAL_IP:PORT\n\n### Login\nby login with the super use you see a new button that allows you to Suspend/Resume the process\n"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 10,
"blob_id": "f1f906434c36a7d464013925e0ce459965f37e62",
"content_id": "3b333054b29573d83d9d3e9cb78c8a2b7b904508",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 3,
"path": "/DistanceRPiDjango/DistanceRpi/__init__.py",
"repo_name": "cobrce/distance_RPi",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPackage for DistanceRpi.\n\"\"\"\n"
},
{
"alpha_fraction": 0.6040654182434082,
"alphanum_fraction": 0.612019419670105,
"avg_line_length": 22.5625,
"blob_id": "49083b937cccf76dbd5bb361fc32a4aa7a50ea3c",
"content_id": "a0f73ccfdc8236d833874d4de2f3518034efa925",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2263,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 96,
"path": "/DistanceRPiDjango/app/views.py",
"repo_name": "cobrce/distance_RPi",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDefinition of views.\n\"\"\"\nfrom django.shortcuts import render\nfrom django.http import HttpRequest\nfrom django.template import RequestContext\nfrom datetime import datetime\nfrom django.http import HttpResponse\nfrom subprocess import Popen,PIPE,STDOUT\nfrom random import randint\nimport time\nimport os\nimport sys\nimport json\nfrom django.http import Http404\nimport signal\n\np = None\ndistance = 0\nrunning = True\n\ndef ExitHandler(*args):\n from threading import get_ident\n \n global p\n if p != None:\n p.kill()\n print(\"(%X) Reader process killed\" % get_ident())\n else:\n print(\"(%x) Reader process not running\" % get_ident())\n print(\"(%x) Server shutdown\" % get_ident())\n sys.exit(0)\n\ndef Measure():\n global distance\n global running\n \n if running:\n dist = float(p.stdout.readline())\n #print(dist)\n if dist > 250:\n distance = \"Too far\"\n elif dist < 2:\n distance = \"Too near\"\n else:\n distance = \"%0.2f\" % dist\n return distance\n \ndef CheckMethod(method):\n if method == \"GET\":\n raise Http404(\"Url not existing\")\n \ndef resumesuspend(request):\n global running\n \n CheckMethod(request.method)\n \n if request.user.is_authenticated and 'stop' in request.POST :\n try:\n running = not bool(int(request.POST['stop']))\n except:\n pass\n return HttpResponse(\"\")\n\ndef read(request):\n global running\n \n CheckMethod(request.method)\n CheckReader()\n \n if running:\n status = \"Running\"\n else:\n status = \"Suspended\"\n return HttpResponse(json.dumps({\"dist\" : \"Distance (cm) : {}\".format(Measure()),\"status\" : status}))\n\ndef CheckReader():\n global p\n if p == None:\n p = Popen([\"python3\",\"app/distance_RPi_min.py\"],stdin=PIPE,stdout=PIPE,stderr=STDOUT)\n\ndef home(request):\n \"\"\"Renders the home page.\"\"\"\n assert isinstance(request, HttpRequest)\n \n CheckReader()\n return render(request,\n 'app/index.html',\n {\n 'title':'Ultrasonic sensor distance measure',\n 'year':datetime.now().year,\n 'distance' : 300,\n })\n\ndef csrf_failure(request, reason=\"\"):\n return HttpResponse(\"csrf failure\")\n\t"
},
{
"alpha_fraction": 0.5935422778129578,
"alphanum_fraction": 0.6163342595100403,
"avg_line_length": 16.86440658569336,
"blob_id": "5c974489eca3ef40b79d4c9bae37bab8f6acc856",
"content_id": "87c03c1aa375f0c806121a8278dde099ceea5f60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1053,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 59,
"path": "/DistanceRPiDjango/app/distance_RPi_min.py",
"repo_name": "cobrce/distance_RPi",
"src_encoding": "UTF-8",
"text": "# based on https://tutorials-raspberrypi.com/raspberry-pi-ultrasonic-sensor-hc-sr04/\nimport os\nimport sys\nimport time\nimport RPi.GPIO as g\n\ntrigger = 21\necho = 16\n\nglobal initialized\ninitialized = False\n\ndef init():\n g.setmode(g.BCM)\n g.setup(echo,g.IN)\n g.setup(trigger,g.OUT)\n global initialized\n initialized = True\n\ndef Trigger():\n g.output(trigger,True)\n time.sleep(0.00001)\n g.output(trigger,False)\n\ndef WaitForEdge(value):\n while g.input(echo) != value:\n pass\n\ndef measure():\n global initialized\n if not initialized:\n init()\n \n Trigger()\n\n WaitForEdge(1)\n startime = time.time()\n\n WaitForEdge(0)\n endtime = time.time()\n\n delta = endtime - startime\n distance = delta * 34300 / 2\n return distance\n\ndef myprint(var):\n if not isinstance(var,str):\n var = str(var)\n sys.stdout.writelines(var + \"\\n\")\n sys.stdout.flush()\n\ndef main():\n init()\n while True:\n myprint(\"%0.2f\" % measure())\n time.sleep(0.1)\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5622406601905823,
"alphanum_fraction": 0.5947441458702087,
"avg_line_length": 19.671428680419922,
"blob_id": "c01ae75adbb978e56b3cd7163bfcd17dd1fda468",
"content_id": "1a2a4191ca3e97fca03f14f48422eac9306b52af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1446,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 70,
"path": "/distance_RPi.py",
"repo_name": "cobrce/distance_RPi",
"src_encoding": "UTF-8",
"text": "# based on https://tutorials-raspberrypi.com/raspberry-pi-ultrasonic-sensor-hc-sr04/\nimport os\nimport time\nimport RPi.GPIO as g\nfrom guizero import App, Text,Slider\n\ntrigger = 21\necho = 16\n\ndef init():\n g.setmode(g.BCM)\n g.setup(echo,g.IN)\n g.setup(trigger,g.OUT)\n\ndef Trigger():\n g.output(trigger,True)\n time.sleep(0.00001)\n g.output(trigger,False)\n\ndef WaitForEdge(value):\n while g.input(echo) != value:\n pass\n\ndef RepeatMeasure(time):\n txt.cancel(measure)\n txt.repeat(time,measure)\n \ndef GUI():\n app = App(title = \"Distance\",width = 230,height= 70,layout=\"grid\")\n \n Text(app,text = \"Rate (ms) :\",grid=[0,0],align = \"left\")\n slider = Slider(app,command=RepeatMeasure,start = 50,end = 2000,grid = [1,0],align = \"left\")\n slider.value = \"100\"\n \n \n Text(app,text = \"Distance (cm) : \", grid = [0,1],align=\"left\") \n global txt\n txt = Text(app,text=\"Dist\",grid = [1,1],align=\"left\")\n RepeatMeasure(100)\n \n app.display()\n \n \ndef Display(distance):\n #print(distance)\n txt.value = \"%0.2f\" % distance\n \ndef measure():\n \n Trigger()\n\n WaitForEdge(1)\n startime = time.time()\n\n WaitForEdge(0)\n endtime = time.time()\n\n delta = endtime - startime\n distance = delta * 34300 / 2\n Display(distance)\n \ndef main():\n init()\n GUI()\n\t\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n g.cleanup()"
},
{
"alpha_fraction": 0.787401556968689,
"alphanum_fraction": 0.787401556968689,
"avg_line_length": 17.285715103149414,
"blob_id": "c513a48bc01c994ff1d7b5c48e52e2adfe9b1c4b",
"content_id": "9fa95c1d05d2183ff1482f71aa8879a7a234417a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 7,
"path": "/DistanceRPiDjango/app/__init__.py",
"repo_name": "cobrce/distance_RPi",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPackage for the application.\n\"\"\"\n\nimport signal\nfrom app.views import ExitHandler\nsignal.signal(signal.SIGINT, ExitHandler)"
}
] | 6 |
k139215130/SocialBot | https://github.com/k139215130/SocialBot | fe09710b6091483b18974aba778636ec422fae53 | 242bdad6f893b01faba801df4f5e1a3238559deb | 0e74a5ddf7fe74ee21f957a527bba0e043452707 | refs/heads/master | 2023-03-05T23:46:06.343883 | 2019-11-27T15:29:45 | 2019-11-27T15:29:45 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5800589323043823,
"alphanum_fraction": 0.5952848792076111,
"avg_line_length": 24.13580322265625,
"blob_id": "155178e4a02ee3e6fa924f6de99919d625bc9d90",
"content_id": "6810d7cb7970e68f5277de02b21cd1da56ad34c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2178,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 81,
"path": "/homework1/app/utils.py",
"repo_name": "k139215130/SocialBot",
"src_encoding": "UTF-8",
"text": "\"\"\"\n查詢雙語詞彙庫 http://www.fcu.edu.tw/wSite/lp?ctNode=16185&mp=1\n請幫我查XX的英文\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\n\ndef get_keyword_vocabulary(keyword=None):\n\n \"\"\"\n 找尋頁數\n \"\"\"\n url = 'http://www.fcu.edu.tw/wSite/lp?ctNode=16185&mp=1'\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text,\"html.parser\")\n\n pagesize = soup.find(\"section\",{'class':'page'}).find('span').find('em').text\n print(pagesize)\n\n url = 'http://www.fcu.edu.tw/wSite/lp?ctNode=16185&mp=1&idPath=&nowPage=1&pagesize=' + pagesize\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text,\"html.parser\")\n\n table = soup.find(\"table\",{'class':'tb'})\n\n row_title = [] #欄位標題\n serial_number = [] #序號\n main_category = [] #主類別\n sub_category = [] #次類別\n chinese = [] #中文\n english = [] #英文\n\n # 抓取表格標題欄位\n for title in table.find(\"tr\").find_all(\"th\"):\n row_title.append(title.text.strip())\n\n # 抓取表格其他欄位\n for row in table.find_all(\"tr\")[1:]:\n fields = row.find_all(\"td\")\n serial_number.append(fields[0].text.strip())\n main_category.append(fields[1].text.strip())\n sub_category.append(fields[2].text.strip())\n chinese.append(fields[3].text.strip())\n english.append(fields[4].text.strip())\n\n \"\"\"\n print(row_title)\n print(serial_number)\n print(main_category)\n print(sub_category)\n print(chinese)\n print(english)\n \"\"\"\n\n vocabulary_df = pd.DataFrame({row_title[0]:serial_number,row_title[1]:main_category,row_title[2]:sub_category,row_title[3]:chinese,row_title[4]:english})\n\n \"\"\"\n print(vocabulary_df)\n \"\"\"\n\n msg = \"\"\n\n if keyword != None:\n vocabulary_df = vocabulary_df[vocabulary_df[\"中文\"].str.lower().str.contains(keyword.lower())]\n\n for index, row in vocabulary_df.iterrows():\n msg += \"「\" + row[\"中文\"] + '」 的英文是 「' + row[\"英文\"] + '」\\n'\n\n if msg == \"\":\n msg = \"查無 「\" + keyword + \"」 的英文\"\n\n return msg\n\n\nif __name__ == '__main__':\n print(get_keyword_vocabulary(\" \"))\n"
},
{
"alpha_fraction": 0.6111740469932556,
"alphanum_fraction": 0.6258965730667114,
"avg_line_length": 30.927711486816406,
"blob_id": "e2620981c133362277cd6c54e80486191f840b0d",
"content_id": "142734591f805766dd7cbd25def1dc419f3f2cf7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2695,
"license_type": "no_license",
"max_line_length": 208,
"num_lines": 83,
"path": "/homework1/app/app.py",
"repo_name": "k139215130/SocialBot",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, make_response, jsonify, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup\n\napp = Flask(__name__)\ndb = SQLAlchemy(app)\napp.config['SECRET_KEY'] = 'SECRET_KEY'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb.init_app(app)\n\nclass Vocabulary(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n serial_number = db.Column(db.Integer)\n main_category = db.Column(db.String(80))\n sub_category = db.Column(db.String(80))\n chinese = db.Column(db.String(250))\n english = db.Column(db.String(250))\n\n def __repr__(self):\n return '<Vocabulary %r>' % self.chinese\n\ndef get_all_vocabulary():\n print('------------開始查詢------------')\n url = 'http://www.fcu.edu.tw/wSite/lp?ctNode=16185&mp=1'\n r = requests.get(url)\n soup = BeautifulSoup(r.text,\"html.parser\")\n pagesize = soup.find(\"section\",{'class':'page'}).find('span').find('em').text\n\n url = 'http://www.fcu.edu.tw/wSite/lp?ctNode=16185&mp=1&idPath=&nowPage=1&pagesize=' + pagesize\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text,\"html.parser\")\n\n table = soup.find(\"table\",{'class':'tb'})\n\n for row in table.find_all(\"tr\")[1:]:\n fields = row.find_all(\"td\")\n vocabulary = Vocabulary(serial_number=fields[0].text.strip(), main_category=fields[1].text.strip(), sub_category=fields[2].text.strip(), chinese=fields[3].text.strip(), english=fields[4].text.strip())\n db.session.add(vocabulary)\n db.session.commit()\n print('------------查詢結束------------')\n\ndef get_keyword_vocabulary(keyword):\n result = Vocabulary.query.filter(Vocabulary.chinese.contains(keyword)).all()\n msg = ''\n if result:\n for i in result:\n msg += \"「\" + i.chinese + '」 的英文是 「' + i.english + '」\\n'\n else:\n msg += \"查無 「\" + keyword + \"」 的英文\"\n return msg\n\[email protected](\"/\", methods=['GET'])\ndef index():\n return render_template('index.html')\n\[email protected](\"/update\", methods=['GET'])\ndef update():\n get_all_vocabulary()\n return \"Success!\"\n\[email protected]('/webhook', methods=['POST'])\ndef webhook():\n req = request.get_json(silent=True, force=True)\n print(req)\n\n if req['queryResult']['parameters']['any'] != '':\n keyword = req['queryResult']['parameters']['any']\n print(keyword)\n \n res_message = {\"fulfillmentText\": get_keyword_vocabulary(keyword)}\n print(res_message)\n \n return make_response(jsonify(res_message))\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host='0.0.0.0', port=5000)"
}
] | 2 |
christofferwd/test | https://github.com/christofferwd/test | d5b5f5c753363bef659a70e3de189c882bbf4029 | a4ee3f3ba726b49fd1cd1873809568a5bd337918 | e4b4e97c4611ff8e9b848bed345b1f6bb11a94aa | refs/heads/master | 2021-07-07T19:50:42.664079 | 2017-09-30T20:07:47 | 2017-09-30T20:07:47 | 105,395,874 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6296296119689941,
"alphanum_fraction": 0.6851851940155029,
"avg_line_length": 10,
"blob_id": "1fcfeed10ca256db77e2eb8efdd93a213234755b",
"content_id": "f0c2bd72f8d3d757ce5626c6fdbf872400e997d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 5,
"path": "/hei.py",
"repo_name": "christofferwd/test",
"src_encoding": "UTF-8",
"text": "import numpy as np \n\narr = np.arange(1,10)\n\nprint(arr)"
},
{
"alpha_fraction": 0.703125,
"alphanum_fraction": 0.703125,
"avg_line_length": 9.666666984558105,
"blob_id": "90bb9bf0d3ee7c90957cda080f24df467f25c187",
"content_id": "c240d354222066449c15ac1aef7a044c147d61b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 6,
"path": "/README.md",
"repo_name": "christofferwd/test",
"src_encoding": "UTF-8",
"text": "# test\nfirst project\n\n## Subtitle\nhei, hva skjer??\ndette skjer!\n"
},
{
"alpha_fraction": 0.5806451439857483,
"alphanum_fraction": 0.5806451439857483,
"avg_line_length": 14.5,
"blob_id": "3c459264509044ee26daae5197e3ddebc3391e16",
"content_id": "6a6583b8a439ce77cdb6917e0ef2944fc9c9e857",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/nummer_2.py",
"repo_name": "christofferwd/test",
"src_encoding": "UTF-8",
"text": "\ndef addme(a,b):\n return a+b"
}
] | 3 |
isshin0330/pra_of_pickle | https://github.com/isshin0330/pra_of_pickle | 831eeae8acf26780a8fc766191ca62ab26ecf8bb | 09c20d8271d0b3d1a00400b66d76a3686e49518b | 3f926adb9f9332680c7141bcc198f8a4825f5033 | refs/heads/main | 2023-02-19T04:39:33.722311 | 2021-01-21T10:59:12 | 2021-01-21T10:59:12 | 331,200,928 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 12.5,
"blob_id": "6460fc50f08fd27ab58b15068c683f9fe63d1488",
"content_id": "fe1eebc95d28a34a90f5e01789dc579e3b32cb77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/README.md",
"repo_name": "isshin0330/pra_of_pickle",
"src_encoding": "UTF-8",
"text": "# pra_of_pickle\npickle化の練習\n\n"
},
{
"alpha_fraction": 0.7109488844871521,
"alphanum_fraction": 0.7226277589797974,
"avg_line_length": 31.619047164916992,
"blob_id": "e7e169e7cd704f908b50ae56c30aa336c2149846",
"content_id": "2503715e8b19715e1e2d76932801c69a55459e3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 21,
"path": "/pra_knn_pickle.py",
"repo_name": "isshin0330/pra_of_pickle",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sklearn.datasets import load_iris\niris = load_iris()\n\nimport pandas as pd\ndf = pd.DataFrame(iris.data, columns=iris.feature_names)\ndf[\"target\"] = iris.target\ndf.loc[df[\"target\"]==0, \"target\"] = \"setosa\"\ndf.loc[df[\"target\"]==1, \"target\"] = \"versicolor\"\ndf.loc[df[\"target\"]==2, \"target\"] = \"verginica\"\n\nfrom sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test = train_test_split(iris[\"data\"],iris[\"target\"],test_size=0.25,random_state=0)\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=1)\n\nknn.fit(x_train,y_train)\n\nimport pickle\nwith open(\"knn.pickle\", mode=\"wb\") as f:\n pickle.dump(knn, f)\n"
}
] | 2 |
jsharples/usgs_nwis | https://github.com/jsharples/usgs_nwis | aea71c420c666eab891a9d0e7d0bfb4d0c19e1bd | b0441f13be29a1f270535d04d6111c212136bd04 | 64752e020719ead324a8fc8dc449ee7e995548af | refs/heads/master | 2021-01-20T02:42:11.884218 | 2017-04-28T06:24:16 | 2017-04-28T06:24:16 | 89,441,821 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.700440526008606,
"alphanum_fraction": 0.7147576808929443,
"avg_line_length": 36.328765869140625,
"blob_id": "04ea8b9b8b6a18fe4a50462e8718d03ccc60617f",
"content_id": "abad00e49fb2b1af2c9837f5dd21e5facca3fcad",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2724,
"license_type": "permissive",
"max_line_length": 255,
"num_lines": 73,
"path": "/README.md",
"repo_name": "jsharples/usgs_nwis",
"src_encoding": "UTF-8",
"text": "# USGS NWIS\n\nA lightweight extensible package to interact with the US Geological Survey's [National Water Information System.](https://waterservices.usgs.gov/) This package should be used by those familiar with the service, or willing to read it's documentation.\n\n\n## Features\n\n* Written in Python 3 \n* Uses gzip for all requests\n* Requires only the standard library \n\n\n## Usage\n\nLet's suppose we want stream flow data for Montague County, Texas (yeehaw Smokey) for the last 30 days. The relevant county code is `48337` as we can see from the USGS [state and county codes](https://help.waterdata.usgs.gov/code/county_query?fmt=html). \n\n```python\n>>> import usgs_nwis as us\n>>> my_sites = us.SitesQuery(major_filter = {'countyCd':'48337'})\n>>> my_sites.get_site_ids(**{'period':'P30D', 'siteType':'ST'})\n['07315525']\n```\n\nThere is only one site with stream flow data in the past 30 days.\nLet's get the the data:\n\n```python\n>>> my_data = us.DataBySites(sites=['07315525'],**{'period':'P30D', 'siteType':'ST'})\n>>> my_data.make_core_data()\n```\n\nThe full data, including all explanatory information and metadata, can be accessed as a nested dict via `my_data.data`. A greatly simplified selection of the data can be accessed via `my_data.core_data`\n\n\nFor more information on available filters refer to the useful links below, particularly the testing tools.\n\n### Notes on plotting and pandas\n\n#### matplotlib\nSince I have stuck with the standard library for this project, there is no plotting functionality included. However, for those wishing to plot the data using `matplotlib` the following function will give basic plots from a `core_data` dict.\n\n```python\nimport matplotlib.pyplot as plt\ndef plot_core_data(core_data):\n\n for ts in core_data:\n plt.figure(figsize=(15,7))\n plt.plot([us.BaseQuery._date_parse(x['dateTime']) for x in ts['data']],[x['value'] for x in ts['data']])\n plt.title(ts['site']+': '+ts['name'])\n plt.ylabel(ts['description'])\n #include plot customisation here\n plt.show()\n```\n\n#### pandas\nFor `pandas` users, each time series in the `core_data` dict can be converted to a `DataFrame` like so:\n\n```python\nimport pandas as pd\npd.DataFrame(my_data.core_data[0]['data'])\n```\n\n## Useful links\n\n* [Instantaneous value testing tool](https://waterservices.usgs.gov/rest/IV-Test-Tool.html)\n* [Daily value testing tool](https://waterservices.usgs.gov/rest/DV-Test-Tool.html)\n* [Parameter listing: Physical](https://help.waterdata.usgs.gov/code/parameter_cd_query?group_cd=PHY)\n* [Hydrological Unit Codes (HUC)](https://water.usgs.gov/GIS/huc_name.html)\n* [state and County codes](https://help.waterdata.usgs.gov/code/county_query?fmt=html)\n \n\n## Licence\nMIT"
},
{
"alpha_fraction": 0.5217733979225159,
"alphanum_fraction": 0.5250365734100342,
"avg_line_length": 30.62633514404297,
"blob_id": "87ce32873e13fd3765ef315ee315a119ff3b25e3",
"content_id": "1f06f012cc32ee82c0dfc1478b846d68e24bfe7f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8887,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 281,
"path": "/usgs_nwis/usgs_nwis.py",
"repo_name": "jsharples/usgs_nwis",
"src_encoding": "UTF-8",
"text": "\n__all__ = ['SitesQuery', 'BaseQuery', 'DataBySites']\n\nfrom urllib import parse, request\nimport json\nfrom datetime import datetime, timedelta\nimport gzip\nimport io\n\n\nclass pyUSGSError(Exception):\n pass\n\n\nclass BaseQuery(object):\n \"\"\"\n The basic query class to access the USGS water data service\n \n Parameters\n ----------\n major_filter: dict \n Single key value pair, values can be lists, keys must be one of:\n * sites - for a list of specifc site ids\n * stateCd - for a state abbreviaiton e.g. \"ny\"\n * huc - for a list of Hydrologic Unit Codes\n * bBox - specifiying a lat long bounding box\n * countyCd - for a list of county numbers \n Each query to the USGS NWIS must include one, and only one, of these filters.\n service: str\n The service to query, 'dv' for daily values, 'iv' for instantaneous\n data_format: str\n The format in which to get the data. Defult is `json`, if changed the `get_data` funciton will not work.\n \n \n \"\"\"\n \n \n def __init__(self, major_filter, service='dv', data_format = 'json'):\n\n self._format = {'format': data_format}\n self.allowed_filters = [\"sites\",\"stateCd\",\"huc\",\"bBox\",\"countyCd\"]\n \n if list(major_filter.keys())[0] in self.allowed_filters: \n self.major_filter = major_filter\n else:\n raise ValueError(\"major_filter must be one of: {}\".format(', '.join(self.allowed_filters)))\n \n self.base_url = \"https://waterservices.usgs.gov/nwis/{}/?\".format(service)\n self.data = None\n self.raw_data = None\n \n \n def get_data(self, **kwargs):\n \"\"\"\n Get data form the USGS webservice and parse to a python dictionary.\n \n Parameters\n ----------\n **kwargs : dict\n A dictionary specifying the search and filter items for this query.\n \n Returns\n ----------\n dict\n A dictionary of the requested data\n \"\"\"\n \n if not self.raw_data:\n self._get_raw_data(**kwargs)\n \n self.data = json.loads(self.raw_data)\n \n return self.data\n \n def _make_request_url(self, **kwargs):\n \"\"\"\n Make the request URL from kwargs\n \"\"\"\n kwargs.update(self.major_filter)\n kwargs.update(self._format)\n \n for arg in kwargs.keys():\n \n try:\n assert not isinstance(kwargs[arg], str)\n #multiple values must be seperated by a comma\n kwargs[arg] = ','.join(map(str, kwargs[arg]))\n \n except:\n pass\n \n return self.base_url + parse.urlencode(kwargs, doseq=True)\n \n \n def _get_raw_data(self, **kwargs):\n \"\"\"\n Get the raw data response \n \"\"\"\n \n self.request_url = self._make_request_url(**kwargs)\n \n #the USGS requests that users use gzip where possible\n data_request = request.Request(\n self.request_url,\n headers={\"Accept-Encoding\": \"gzip\"})\n data_response = request.urlopen(data_request)\n \n if data_response.info().get('Content-Encoding') == 'gzip':\n result = gzip.decompress(data_response.read())\n else:\n result = data_response.read()\n \n self.raw_data = result.decode(data_response.info().get_content_charset('utf-8'))\n \n return self.raw_data\n \n @staticmethod\n def _date_parse(str_date):\n \"\"\"\n Function for parsing dates.\n \n Note that the USGS use ISO_8601 for date formats, including a ':' in the timezone. \n There does not appear to be a simple way to parse this to a datetime object.\n \"\"\"\n if len(str_date) == 29 and str_date[-3]==':':\n str_date = str_date[:-3]+str_date[-2:]\n return datetime.strptime(str_date,'%Y-%m-%dT%H:%M:%S.%f%z')\n \n if len(str_date) == 23:\n return datetime.strptime(str_date,'%Y-%m-%dT%H:%M:%S.%f')\n \n if len(str_date) == 19:\n return datetime.strptime(str_date,'%Y-%m-%dT%H:%M:%S')\n \n if len(str_date) == 16:\n return datetime.strptime(str_date,'%Y-%m-%dT%H:%M')\n \n if len(str_date) == 10:\n return datetime.strptime(str_date,'%Y-%m-%d')\n \n\nclass SitesQuery(BaseQuery):\n \"\"\"\n Class to access the Site Service\n \n Parameters\n ----------\n major_filter: dict \n Single key value pair, values can be lists, keys must be one of:\n * sites - for a list of specifc site ids\n * stateCd - for a state abbreviaiton e.g. \"ny\"\n * huc - for a list of Hydrologic Unit Codes\n * bBox - specifiying a lat long bounding box\n * countyCd - for a list of county numbers \n Each query to the USGS NWIS must include one, and only one, of these filters.\n service: str\n The service to query, 'dv' for daily values, 'iv' for instantaneous\n data_format: str\n The format in which to get the data. Defult is `json`, if changed the `get_data` funciton will not work.\n \n \n \"\"\"\n\n \n def __init__(self, major_filter):\n \n super().__init__(major_filter = major_filter, service = 'site', data_format = 'rdb')\n self.sites = None\n \n #we cannot use BaseQuery.get_data because the Site Service does not offer JSON \n def get_data(self, **kwargs): \n \"\"\"\n Get data form the USGS Site Service and parse to a python dictionary.\n \n Parameters\n ----------\n **kwargs : dict\n A dictionary specifying the search and filter items for this query.\n \n Returns\n ----------\n dict\n A dictionary of the requested site data\n \"\"\"\n\n \n if not self.raw_data:\n self.raw_data = self._get_raw_data(**kwargs)\n \n info = ''\n header = []\n data = []\n n=0\n\n for l in self.raw_data.split('\\n'):\n if len(l) > 0:\n if l[0] == '#':\n info += l + '\\n'\n else:\n if n <3:\n header.append(l.split('\\t'))\n n += 1\n else:\n data.append(l.split('\\t'))\n\n data = [{header[0][x]: y[x] for x in range(len(header[0]))} for y in data]\n self.data = {'data':data, 'info':info}\n \n return self.data\n \n def get_site_ids(self, **kwargs):\n \"\"\"\n Create a list of the sites found by this query.\n \n Parameters\n ----------\n **kwargs : dict\n A dictionary specifying the search and filter items for this query.\n \n Returns\n ----------\n list\n A list of site IDs matching the search and filters for this query.\n \"\"\"\n if not self.data:\n self.get_data(**kwargs)\n self.sites = [s['site_no'] for s in self.data['data']]\n return self.sites\n \n \n \nclass DataBySites(BaseQuery):\n \"\"\"\n Class to access data bases on a list of sites. \n \n Parameters\n ----------\n sites: list \n A list of sites IDs to query for data\n service: str\n The service to query, 'dv' for daily values, 'iv' for instantaneous\n **kwargs : dict\n A dictionary specifying the search and filter items for this query. \n \n \"\"\"\n def __init__(self, sites, service='dv', **kwargs):\n \n super().__init__(major_filter = {\"sites\":sites}, service=service)\n\n self.data = self.get_data(**kwargs)\n self.core_data = None\n \n def make_core_data(self):\n \"\"\"\n Make a simplified version of the data containing only 'core' data fields.\n \n Parameters\n ----------\n none\n \n Returns\n ----------\n dict\n A simplified dictionary of the requested site data\n \"\"\"\n core_data = []\n for ts in self.data['value']['timeSeries']:\n \n core_data.append(dict(\n location = ts['sourceInfo']['geoLocation']['geogLocation'],\n name = ts['sourceInfo']['siteName'],\n site = ts['sourceInfo']['siteCode'][0]['value'],\n unit = ts['variable']['unit']['unitCode'],\n description = ts['variable']['variableDescription'],\n qual_codes = {x['qualifierCode']: x['qualifierDescription'] for x in ts['values'][0]['qualifier']},\n data = ts['values'][0]['value'],\n time_zone = ts['sourceInfo']['timeZoneInfo']['defaultTimeZone']['zoneOffset']\n ))\n \n self.core_data = core_data\n \n return self.core_data"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 24,
"blob_id": "ec7b5c140519022be279054620b6651a1b9077f5",
"content_id": "05c24531867e082b5f9a793fae1eceaf32799d5d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 1,
"path": "/usgs_nwis/__init__.py",
"repo_name": "jsharples/usgs_nwis",
"src_encoding": "UTF-8",
"text": "\nfrom .usgs_nwis import *\n"
}
] | 3 |
HilaryChang/ML-assignment | https://github.com/HilaryChang/ML-assignment | a9804fca8ea0fea76ee8a25a472838a446513e8a | b9c64362cd24846cac3e98810876c895136e8237 | 58ee29db3f0cd622f85ce0d22f9e1a7c858ea768 | refs/heads/master | 2020-03-15T20:06:55.233909 | 2018-05-06T08:57:10 | 2018-05-06T08:57:10 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6323034763336182,
"avg_line_length": 41.03947448730469,
"blob_id": "9aefbcd480d09f33c0895f5b783fdc818ef4fb8e",
"content_id": "ea679e860a91fa87072a4e82c5c0be8c993b12a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3269,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 76,
"path": "/assignment2/vgg16.py",
"repo_name": "HilaryChang/ML-assignment",
"src_encoding": "UTF-8",
"text": "from keras.models import Sequential\r\nfrom keras.models import Model\r\nfrom keras.layers import Input, Dense, Activation, Flatten, Conv2D, MaxPooling2D, AveragePooling2D\r\nfrom keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, BatchNormalization\r\nfrom keras.optimizers import SGD\r\nfrom skimage import io\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nimport tensorflow as tf\r\nimport keras.backend.tensorflow_backend as KTF\r\nimport keras\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef VGG_16(classify_num, weights_path=None, input_shape=[224, 224, 3]):\r\n img_input = Input(shape=input_shape)\r\n\r\n # Block 1\r\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\r\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\r\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\r\n\r\n # Block 2\r\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\r\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\r\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\r\n\r\n # Block 3\r\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\r\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\r\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\r\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\r\n\r\n # Block 4\r\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\r\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\r\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\r\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\r\n\r\n # Block 5\r\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\r\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\r\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)\r\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\r\n\r\n\r\n # Classification block\r\n x = Flatten(name='flatten')(x)\r\n x = Dense(4096, activation='relu', name='fca')(x)\r\n x = Dense(4096, activation='relu', name='fcb')(x)\r\n x = Dense(classify_num, activation='softmax', name='Classification')(x)\r\n\r\n\r\n inputs = img_input\r\n # Create model.\r\n model = Model(inputs=inputs, outputs=x, name='vgg16')\r\n\r\n if weights_path:\r\n model.load_weights(weights_path)\r\n\r\n return model\r\n\r\nif __name__ == \"__main__\":\r\n img1 = cv2.resize(cv2.imread('1.pgm'), (224, 224)).astype(np.float32).tolist()\r\n img2 = cv2.resize(cv2.imread('2.pgm'), (224, 224)).astype(np.float32).tolist()\r\n data = np.array([img1, img2])\r\n\r\n encoder = OneHotEncoder()\r\n labels = encoder.fit_transform([[1], [2]]).toarray()\r\n\r\n model = VGG_16(classify_num=labels.shape[0])\r\n model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=1e-4), metrics=['accuracy'])\r\n model.fit(x=data, y=labels, epochs=1)\r\n \r\n out = model.predict(np.array([img1, img2]))\r\n print(out)\r\n print(np.argmax(out, axis=1))"
}
] | 1 |
wikty/Spiders | https://github.com/wikty/Spiders | 53045384e7d79028ef7f5edfd68070a4bf240a05 | 2d07513c6398450c3f57059a1a8c5a3c3124ad5d | 203411f5b39430be79d479eed5aec77b2489c2c1 | refs/heads/master | 2020-03-28T15:29:22.531313 | 2018-09-13T08:13:33 | 2018-09-13T08:13:33 | 148,599,200 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6272401213645935,
"alphanum_fraction": 0.6630824208259583,
"avg_line_length": 17.66666603088379,
"blob_id": "99bb88114e990bc3ffab6d373a9f7daee30c6584",
"content_id": "f447e2e09ce584289de36f0de286943b9051066e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 15,
"path": "/scrapy_tor/test/check_privoxy_http_port.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import requests\n\n# privoxy proxy port\nproxy_port = 8118\n\n# generate http session\ns = requests.Session()\ns.proxies = {\n\t\"http\": \"http://127.0.0.1:%d\" % proxy_port\n}\n\n# make http request\n#r = s.get(\"http://www.google.com\")\nr = s.get(\"https://www.atagar.com/echo.php\")\nprint(r.text)"
},
{
"alpha_fraction": 0.6578366160392761,
"alphanum_fraction": 0.6583884954452515,
"avg_line_length": 34.18446731567383,
"blob_id": "7b0003e878c91284e7106347e7f80b9a07fa13c4",
"content_id": "e36f08b244613946f8e3efbfd724c6f1fc49c680",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3626,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 103,
"path": "/ejob/ejob/middlewares.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nimport os\nfrom scrapy import signals\nfrom scrapy.http import HtmlResponse\nfrom selenium import webdriver\n\n\nclass EjobSpiderMiddleware(object):\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the spider middleware does not modify the\n # passed objects.\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_spider_input(response, spider):\n # Called for each response that goes through the spider\n # middleware and into the spider.\n\n # Should return None or raise an exception.\n return None\n\n def process_spider_output(response, result, spider):\n # Called with the results returned from the Spider, after\n # it has processed the response.\n\n # Must return an iterable of Request, dict or Item objects.\n for i in result:\n yield i\n\n def process_spider_exception(response, exception, spider):\n # Called when a spider or process_spider_input() method\n # (from other spider middleware) raises an exception.\n\n # Should return either None or an iterable of Response, dict\n # or Item objects.\n pass\n\n def process_start_requests(start_requests, spider):\n # Called with the start requests of the spider, and works\n # similarly to the process_spider_output() method, except\n # that it doesn’t have a response associated.\n\n # Must return only requests (not items).\n for r in start_requests:\n yield r\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n\nclass BaseJsRequestMiddleware(object):\n \n def __init__(self, driver_path, extra_script_file=None):\n if not os.path.isfile(driver_path):\n raise Exception('driver path [%s] not exists' % driver_path)\n if extra_script_file and not os.path.isfile(extra_script_file):\n raise Exception('extra script file [%s] not exists' % extra_script_file)\n \n self.driver = None\n self.script = None\n if extra_script_file:\n with open(extra_script_file, 'r', encoding='utf8') as f:\n self.script = f.read()\n\n def get_dirver(self, **kwargs):\n if not self.driver:\n self.driver = webdriver.PhantomJS(executable_path=self.driver_path, **kwargs)\n return self.driver\n\n def process_request(self, request, spider):\n driver = self.get_dirver()\n driver.get(request.url)\n url = driver.current_url\n encoding = request.encoding\n if self.script:\n driver.execute_script(self.script)\n body = driver.page_source.encode(encoding)\n response = HtmlResponse(url=url, body=body, encoding=encoding)\n return response # end any process_request methods\n\n\nclass PhantomjsRequestMiddleware(BaseJsRequestMiddleware):\n \n def __init__(self, phantomjs_path=None, extra_script_file=None):\n super(PhantomjsRequestMiddleware, self).__init__(phantomjs_path, extra_script_file)\n\n @classmethod\n def from_crawler(cls, crawler):\n phantomjs_path = crawler.settings.get('PHANTOMJS_PATH')\n extra_script_file = crawler.settings.get('EXTRA_SCRIPT_FILE')\n\n return cls(phantomjs_path, extra_script_file)\n"
},
{
"alpha_fraction": 0.7045055627822876,
"alphanum_fraction": 0.7126975059509277,
"avg_line_length": 28.482759475708008,
"blob_id": "891846d447654f7b2b0b803b45236c2147d91f60",
"content_id": "eab8cb7388f6d5a06caf35a8872cfda63b581aa0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1709,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 58,
"path": "/scrapy_tor/test/check_socks_to_tor.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import socks # SocksiPy module\nimport socket\nimport urllib.request\nimport stem.process\nfrom stem.util import term\n\nTOR_SOCKS_PORT = 9150\n\ndef set_socks_proxy():\n\ttry:\n\t\t# Set socks proxy\n\t\tsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, '127.0.0.1', TOR_SOCKS_PORT)\n\t\tsocket.socket = socks.socksocket\n\n\t\t# Perform DNS resolution through the socket \n\t\tdef getaddrinfo(*args):\n\t\t\treturn [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]\n\t\tsocket.getaddrinfo = getaddrinfo\n\texcept:\n\t\traise Exception(\"Socks proxy is incorrect\")\n\n# Uses urllib to fetch a site using SocksiPy for Tor over the TOR_SOCKS_PORT\ndef query(url):\n\ttry:\n\t\treturn urllib.request.urlopen(url).read()\n\texcept:\n\t\treturn \"Unable to reach %s \" % url\n\n# Start an instance of Tor configured to only exit through Russia. This prints\n# Tor's bootstrap information as it starts. Note that this likely will not\n# work if you have another Tor instance running.\ndef launch_tor():\n\tprint(term.format(\"Starting Tor:\\n\", term.Attr.BOLD))\n\tdef print_bootstrap_lines(line):\n\t\tif \"Bootstrapped \" in line:\n\t\t\tprint(term.format(line, term.Color.BLUE))\n\treturn stem.process.launch_tor_with_config(\n\t\tconfig={\n\t\t\t'SocksPort': str(TOR_SOCKS_PORT),\n\t\t\t'ExitNodes': '{ru}', # tor proxy exit node in the country Russia\n\t\t},\n\t\tinit_msg_handler = print_bootstrap_lines,\n\t)\n\ndef check_endpoint():\n\tprint(term.format(\"\\nChecking our endpoint:\\n\", term.Attr.BOLD))\n\tprint(term.format(query(\"https://www.atagar.com/echo.php\"), term.Color.BLUE))\n\ndef kill_tor(p):\n\tp.kill()\n\nif __name__ == '__main__':\n\tset_socks_proxy()\n\t#process = launch_tor()\n\t#check_endpoint()\n\t#kill_tor(process)\n\t# https://www.atagar.com/echo.php\n\tprint(query(\"http://icanhazip.com/\"))"
},
{
"alpha_fraction": 0.47826087474823,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 14.666666984558105,
"blob_id": "be57e2cf9eb835282613d1e64d958e55ec815df8",
"content_id": "0be61b73b264ffa459edce468da410ab39484c05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 46,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 3,
"path": "/netmusic/requirements.txt",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "requests==2.13.0\nScrapy==1.3.3\nselenium==3.4.3"
},
{
"alpha_fraction": 0.6691505312919617,
"alphanum_fraction": 0.6766020655632019,
"avg_line_length": 23.870370864868164,
"blob_id": "08c94c89ee56e530c1ffc6d1f03dfd9301d8adac",
"content_id": "e106307cbf0c519b0481fca7d6c210baee020e25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1342,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 54,
"path": "/china_stars/stars/stars/db.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import sqlite3, sys\nimport settings\n\nsqlite_file = settings.SQLITE_FILE\nsqlite_table = settings.SQLITE_TABLE\nsqlite_table_desc = settings.SQLITE_TABLE_DESC\n\ndef create_table():\n\tclient = sqlite3.connect(sqlite_file)\n\tcursor = client.cursor()\n\n\tsql = 'CREATE TABLE IF NOT EXISTS {table} ({fields})'\n\tfields = ['id INTEGER PRIMARY KEY AUTOINCREMENT']\n\tfor field, dtype in sqlite_table_desc.items():\n\t\tif dtype == 'i':\n\t\t\tfield = '%s INTEGER' % field\n\t\telif dtype == 'S':\n\t\t\tfield = '%s TEXT' % field\n\t\telse:\n\t\t\tfield = '%s VARCHAR(255)' % field\n\t\tfields.append(field)\n\tsql = sql.format(table=sqlite_table, fields=', '.join(fields))\n\tprint('SQL:', sql)\n\tcursor.execute(sql)\n\tclient.commit()\n\tclient.close()\n\ndef select_table():\n\tclient = sqlite3.connect(sqlite_file)\n\tcursor = client.cursor()\n\tsql = 'SELECT * FROM {table}'.format(table=sqlite_table)\n\tresults = cursor.execute(sql)\n\tprint('SQL:', sql)\n\tfor row in results:\n\t\tprint(row)\n\tclient.close()\n\ndef delete_table():\n\tclient = sqlite3.connect(sqlite_file)\n\tcursor = client.cursor()\n\tsql = 'DROP TABLE IF EXISTS {table}'.format(table=sqlite_table)\n\tcursor.execute(sql)\n\tprint('SQL:', sql)\n\tclient.close()\n\nif __name__ == '__main__':\n\tif len(sys.argv)<2:\n\t\tselect_table()\n\telif sys.argv[1] == 'create':\n\t\tcreate_table()\n\telif sys.argv[1] == 'delete':\n\t\tdelete_table()\n\telse:\n\t\tselect_table()"
},
{
"alpha_fraction": 0.7434456944465637,
"alphanum_fraction": 0.7659175992012024,
"avg_line_length": 34.63333511352539,
"blob_id": "37e8b76da3690721a71c9ab880beb6d9d46e0921",
"content_id": "9072d388b3d71f2a3cd394d711fc953236388959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1084,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 30,
"path": "/netmusic/test/login.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.select import Select\n\nimport config\n\nusername = '15620161152193'\npassword = '160318'\n\ndriver = webdriver.Firefox(executable_path=os.path.abspath(config.GECKODRIVER_PATH))\n# WebDriver will wait until the page has fully loaded (that is, the “onload” event has fired)\ndriver.get(\"http://event.wisesoe.com/\")\n# find element by its name attribute\nuser_elem = driver.find_element_by_name(\"UserName\")\nuser_elem.clear()\nuser_elem.send_keys(username)\npass_elem = driver.find_element_by_name('Password')\npass_elem.clear()\npass_elem.send_keys(password)\ndriver.find_element_by_class_name('click-logon').send_keys(Keys.RETURN) \n\n# control_elem = driver.find_element_by_id('default-menu-control')\n# control_elem = control_elem.find_element_by_link_text('My reservations').click()\n\n# select_elem = driver.find_element_by_id('ctl00_MainContent_termddl')\n# select_elem = Select(select_elem)\n# select_elem.select_by_value('2016-2017学年秋季学期')\n# driver.close()"
},
{
"alpha_fraction": 0.6356666088104248,
"alphanum_fraction": 0.6438288688659668,
"avg_line_length": 45.482757568359375,
"blob_id": "f0958aca179c701711a87a5958bb2f26cfd584e6",
"content_id": "82266053287fa4f5bb240792cbb164ba95410325",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4101,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 87,
"path": "/ejob/ejob/spiders/lagou_job_spider_test.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os, json\nfrom urllib.parse import quote\nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom ejob.items import JobItem\nfrom ejob.item_loaders import LagouJobItemLoader\n\nclass LagouJobSpiderSpider(scrapy.spiders.CrawlSpider):\n\tname = \"lagou_job_spider_test\"\n\tallowed_domains = [\"lagou.com\"]\n\turls = [\n\t\t('理财顾问', 'https://www.lagou.com/zhaopin/licaiguwen/', '123'),\n\t]\n\tstart_urls = ['https://www.lagou.com/']\n\trules = [\n\t\tRule(LinkExtractor(allow=('/zhaopin/[^/]+/\\d+/$', ), restrict_xpaths=('//*[@class=\"pager_container\"]', )), process_request='preprocess_request', follow=True),\n\t\tRule(LinkExtractor(allow=('/jobs/\\d+\\.html$', ), restrict_xpaths=('//*[@id=\"s_position_list\"]')), callback='parse_job')\n\t]\n\t# urls = [('', 'https://www.lagou.com/jobs/2123649.html', ''), ('', 'https://www.lagou.com/jobs/3248331.html', '')]\n\tsite = '拉勾网(https://www.lagou.com/)'\n\tquery_str = '&'.join(['{}'.format(quote('city=全国'))])\n\n\tdef start_requests(self):\n\t\tfor category_name, category_url, category_id in self.urls:\n\t\t\tcategory_url = '?'.join([category_url, self.query_str])\n\t\t\trequest = scrapy.Request(category_url, dont_filter=False)\n\t\t\trequest.meta['dont_redirect'] = True\n\t\t\trequest.meta['category_name'] = category_name\n\t\t\trequest.meta['category_id'] = category_id\n\t\t\tyield request\n\n\tdef preprocess_request(self, request):\n\t\t# request.replace(cookies={'index_location_city': '%E4%B8%8A%E6%B5%B7'})\n\t\t# request.replace(url='?'.join(request.url, self.query_str))\n\t\treturn request\n\n\tdef parse_job(self, response):\n\t\titem = JobItem()\n\t\tl = LagouJobItemLoader(item=JobItem(), response=response)\n\t\tl.add_value('url', response.url)\n\t\tl.add_value('site', self.site)\n\t\tl.add_value('requirements', '')\n\t\tl.add_value('description', '')\n\t\t\n\t\txpath = '//*[contains(@class, \"position-content\")]/*[contains(@class, \"position-content-l\")]'\n\t\tcl = response.xpath(xpath)\n\t\tjn = cl.xpath('*[@class=\"job-name\"]')\n\t\tl.add_value('position', jn.xpath('*[@class=\"name\"]/text()').extract_first())\n\t\tl.add_value('department', jn.xpath('*[@class=\"company\"]/text()').extract_first())\n\t\tjr =cl.xpath('*[@class=\"job_request\"]')\n\t\tt = jr.xpath('p/span/text()').extract()\n\t\tl.add_value('salary', t[0])\n\t\tl.add_value('city', t[1])\n\t\tl.add_value('exprience', t[2])\n\t\tl.add_value('education', t[3])\n\t\tl.add_value('jobtype', t[4])\n\t\tl.add_value('tags', jr.xpath('ul[contains(@class, \"position-label\")]/li/text()').extract())\n\t\tl.add_value('postdate', jr.xpath('*[@class=\"publish_time\"]/text()').re_first(r'(\\d{4}-\\d{2}-\\d{2})'))\n\t\tjd = response.xpath('//*[@id=\"job_detail\"]')\n\t\tl.add_value('temptation', jd.xpath('*[contains(@class, \"job-advantage\")]/p/text()').extract())\n\t\tl.add_value('rawpost', jd.xpath('*[contains(@class, \"job_bt\")]//p/text()').extract())\n\t\tja = jd.xpath('*[contains(@class, \"job-address\")]')\n\t\taddress = ja.xpath('*[contains(@class, \"work_addr\")]/a[contains(@href, \"https://www.lagou.com/\")]/text()').extract()\n\t\taddress += ja.xpath('*[contains(@class, \"work_addr\")]/text()').extract()\n\t\tl.add_value('address', address)\n\t\tlongitude = ja.xpath('*[@name=\"positionLng\"]/@value').extract_first(default='')\n\t\tlatitude = ja.xpath('*[@name=\"positionLat\"]/@value').extract_first(default='')\n\t\tl.add_value('location', ','.join([longitude, latitude]))\n\n\t\txpath = '//*[@id=\"job_company\"]'\n\t\tjc = response.xpath(xpath)\n\t\tl.add_value('company_name', jc.xpath('.//h2/text()').extract_first())\n\t\tfor li in jc.xpath('.//ul[contains(@class, \"c_feature\")]/li'):\n\t\t\tfeature = li.xpath('*[contains(@class, \"hovertips\")]/text()').extract_first()\n\t\t\tvalue = ''.join([s.strip() for s in li.xpath('text()').extract() if s.strip()])\n\t\t\tif '领域' in feature:\n\t\t\t\tl.add_value('company_brief', '领域: {}'.format(value))\n\t\t\telif '发展阶段' in feature:\n\t\t\t\tl.add_value('company_brief', '发展阶段: {}'.format(value))\n\t\t\telif '规模' in feature:\n\t\t\t\tl.add_value('company_brief', '规模: {}'.format(value))\n\t\t\telif '公司主页' in feature:\n\t\t\t\tl.add_value('company_url', li.xpath('a/@href').extract_first())\n\t\tyield l.load_item()"
},
{
"alpha_fraction": 0.6138165593147278,
"alphanum_fraction": 0.6157040596008301,
"avg_line_length": 22.977375030517578,
"blob_id": "d4fc7e692a5492fdc767ccb9e81a4bb003cb0117",
"content_id": "f3bd77809385a392d6d2a2c0ba1ecffa6c270de0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5298,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 221,
"path": "/ejob/db/sqlite_db.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import sqlite3\n\nfrom .base_db import BaseDb\n\nclass SqliteDb(BaseDb):\n\tdef __init__(self, sqlite_file=None, sqlite_mode=None):\n\t\tif sqlite_file:\n\t\t\tself.sqlite_file = sqlite_file\n\t\telse:\n\t\t\tself.sqlite_file = 'dump.db'\n\t\t\n\t\tif sqlite_mode:\n\t\t\tself.debug = sqlite_mode\n\t\telse:\n\t\t\tself.debug = True\n\n\t\tself.client = sqlite3.connect(self.sqlite_file)\n\n\tdef __del__(self):\n\t\tself.client.close()\n\n\tdef close(self):\n\t\tself.client.close()\n\n\tdef sql(self, q):\n\t\tcursor = self.client.cursor()\n\t\tresults = cursor.execute(q)\n\t\tself.client.commit()\n\t\treturn results\n\n\tdef create_table(self, tbl_name, fields, extra=''):\n\t\t'''\n\t\t\tfields = {'field_name': 'field_type'}\n\t\t'''\n\t\tcursor = self.client.cursor()\n\n\t\tsql = 'CREATE TABLE IF NOT EXISTS {table} ({fields})'\n\t\tfds = ['id INTEGER PRIMARY KEY AUTOINCREMENT']\n\t\tfor field_name, field_type in fields.items():\n\t\t\tif field_type == 'i':\n\t\t\t\tfield_name = '`%s` INTEGER' % field_name\n\t\t\telif field_type == 'f':\n\t\t\t\tfield_name = '`%s` FLOAT' % field_name\n\t\t\telif field_type == 's':\n\t\t\t\tfield_name = '`%s` TEXT' % field_name\n\t\t\telif field_type == 'b':\n\t\t\t\tfield_name = '`%s` BLOB' % field_name\n\t\t\telif field_type == 'n':\n\t\t\t\tfield_name = '`%s` NULL' % field_name\n\t\t\telse:\n\t\t\t\tfield_name = '`%s` TEXT' % field_name\n\t\t\tfds.append(field_name)\n\t\tif extra:\n\t\t\tfds.append(extra)\n\t\tsql = sql.format(table=tbl_name, fields=', '.join(fds))\n\t\t\n\t\tif self.debug:\n\t\t\tprint('SQL:', sql)\n\t\t\n\t\tcursor.execute(sql)\n\t\tself.client.commit()\n\n\tdef select_table(self, tbl_name, fields=[], where_condition=''):\n\t\t'''\n\t\t\tfields = [name1, name2,...]\n\t\t'''\n\t\tcursor = self.client.cursor()\n\t\tif fields:\n\t\t\tfields = ['`{}`'.format(field) for field in fields]\n\t\t\tfields = ', '.join(fields)\n\t\telse:\n\t\t\tfields = '*'\n\t\tsql = 'SELECT {fields} FROM {table}'.format(table=tbl_name, fields=fields)\n\n\t\tif where_condition:\n\t\t\tsql += ' WHERE {condition}'.format(condition=where_condition)\n\n\t\tif self.debug:\n\t\t\tprint('SQL:', sql)\n\n\t\tresults = cursor.execute(sql)\n\t\treturn [row for row in results]\n\n\tdef count_table(self, tbl_name, where={}):\n\t\t'''\n\t\t\twhere = {'field_name': '>30'}\n\t\t'''\n\t\tcursor = self.client.cursor()\n\t\tif where:\n\t\t\twhere = ' '.join([field_name+where[field_name] for field_name in where])\n\t\telse:\n\t\t\twhere = ''\n\n\t\tsql = 'SELECT COUNT(*) FROM {table}'.format(table=tbl_name)\n\t\tif where:\n\t\t\tsql += ' WHERE {where}'.format(where=where)\n\n\t\tif self.debug:\n\t\t\tprint('SQL:', sql)\n\n\t\tcursor.execute(sql)\n\t\tresult = cursor.fetchone()\n\t\treturn result[0] if result else None\n\n\tdef insert_table(self, tbl_name, fields={}):\n\t\t'''\n\t\t\tfields = {'field_name': 'field_value'}\n\t\t'''\n\t\tcursor = self.client.cursor()\n\n\t\tsql = 'INSERT INTO {table} ({keys}) VALUES ({values})'\n\t\tkeys = ['`{}`'.format(key) for key in fields.keys()]\n\t\tsql = sql.format(\n\t\t\ttable=tbl_name,\n\t\t\tkeys= ', '.join(keys),\n\t\t\tvalues=', '.join(['?']*len(keys))\n\t\t)\n\t\t\n\t\tif self.debug:\n\t\t\tprint('SQL:', sql)\n\n\t\tcursor.execute(sql, list(fields.values()))\n\t\tself.client.commit()\n\t\treturn cursor.lastrowid\n\n\tdef insert_many_table(self, tbl_name, keys, values):\n\t\t'''\n\t\tvalues is generator or iterator\n\t\t'''\n\t\tcursor = self.client.cursor()\n\n\t\tsql = 'INSERT INTO {table} ({keys}) VALUES ({values})'.format(\n\t\t\ttable=tbl_name,\n\t\t\tkeys= ', '.join(['`{}`'.format(key) for key in keys]),\n\t\t\tvalues=', '.join([':{}'.format(key) for key in keys])\n\t\t)\n\n\t\tif self.debug:\n\t\t\tprint('SQL:', sql)\n\n\t\tcursor.executemany(sql, values)\n\t\tself.client.commit()\n\t\treturn cursor.rowcount\n\n\tdef delete_table(self, tbl_name):\n\t\tcursor = self.client.cursor()\n\n\t\tsql = 'DROP TABLE IF EXISTS {table}'.format(table=tbl_name)\n\n\t\tif self.debug:\n\t\t\tprint('SQL:', sql)\n\n\t\tcursor.execute(sql)\n\t\tself.client.commit()\n\n\tdef update_table(self, tbl_name, fields, where_condition=''):\n\t\t'''\n\t\t\tfields = {\n\t\t\t\t'field_name': 'field_value', # assign to the field\n\t\t\t\t'field_name': '++field_value' # add into the field\n\t\t\t}\n\n\t\t\twhere_condition = 'username=\"wikty\"'\n\t\t'''\n\t\tcursor = self.client.cursor()\n\n\t\tfds = []\n\t\tfor field_name, field_value in fields.items():\n\t\t\tfield_name = str(field_name)\n\t\t\tfield_value = str(field_value)\n\t\t\tif field_value.startswith('++'):\n\t\t\t\tfds.append(field_name+'='+field_name+'+'+field_value[2:])\n\t\t\telse:\n\t\t\t\tfds.append(field_name+'='+field_value)\n\t\tsql = 'UPDATE {table} SET {fields}'.format(\n\t\t\ttable=tbl_name,\n\t\t\tfields=' '.join(fds)\n\t\t)\n\t\t\n\t\tif where_condition:\n\t\t\tsql += ' WHERE {condition}'.format(condition=where_condition)\n\t\t\n\t\tif self.debug:\n\t\t\tprint('SQL:', sql)\n\n\t\tcursor.execute(sql)\n\t\tself.client.commit()\n\t\treturn cursor.rowcount\n\n\t# def increment_table(self, tbl_name, field_name, , n=1):\n\t# \t'''\n\t# \t\tfield_name = 'age'\n\t# \t\tequal_condition = ['another_field_name', 'value']\n\t# \t'''\n\t# \tcursor = self.client.cursor()\n\n\t# \tsql = 'SELECT {field} FROM {table} WHERE {condition}'.format(\n\t# \t\ttable=tbl_name,\n\t# \t\tfield=field_name,\n\t# \t\tcondition=where_condition\n\t# \t)\n\n\t# \tcursor.execute(sql)\n\t# \tresult = cursor.fetchone()\n\t# \tcount = result[0] if result else None\n\n\t# \tif count is None:\n\t# \t\tfields = {}\n\t# \t\tfields[field_name] = n\n\t# \t\tself.insert_table(tbl_name, fields)\n\t# \telse:\n\t# \t\tn = count+n\n\t# \t\tsql = 'UPDATE {table} SET {field} = {n} WHERE {field}{condition}'.format(\n\t# \t\t\ttable=tbl_name,\n\t# \t\t\tfield=field_name,\n\t# \t\t\tcondition=where_condition,\n\t# \t\t\tn=n\n\t# \t\t)\n\t# \t\tcursor.execute(sql)\n\t# \t\tself.client.commit()\n\t# \treturn n"
},
{
"alpha_fraction": 0.6757425665855408,
"alphanum_fraction": 0.6944444179534912,
"avg_line_length": 32.675926208496094,
"blob_id": "ff028e63de4bc69e4c61deae2addee77eae7e43a",
"content_id": "24ea76e16858d61b71ac376191c9d7c4a38f279f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3644,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 108,
"path": "/notification/notification/spiders/event_wisesoe_spider.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport time, os, json\nimport requests\nimport scrapy\nfrom scrapy.mail import MailSender\n\ndef get_email_body(title, speaker, address, showtime, reservetime):\n\treturn \"\"\"\nTitle: {title}\nSpeaker: {speaker}\nAddress: {address}\nShowTime: {showtime}\nReserveTime: {reservetime}\n\n\"\"\".format(title=title, speaker=speaker, address=address, showtime=showtime, reservetime=reservetime)\n\ndef send_simple_message(title, body, receivers):\n\treturn requests.post(\n\t\t\"https://api.mailgun.net/v3/sandboxdd4b279d71df4a03ad2388f4af5c81d8.mailgun.org/messages\",\n\t\tauth=(\"api\", \"key-fd8e348bdec8df0586d1a4801aada0e4\"),\n\t\tdata={\n\t\t\t\"from\": \"[email protected]\",\n\t\t\t\"to\": receivers,\n\t\t\t\"subject\": title,\n\t\t\t\"text\": body}\n\t)\n\nclass EventWisesoeSpiderSpider(scrapy.Spider):\n\tname = \"event_wisesoe_spider\"\n\tlogin_url = 'http://account.wisesoe.com/WcfServices/SSOService.svc/Account/Logon?callback=jQuery180047063062154941493_1492137595375&UserName={username}&Password={password}&_={timestamp}'\n\thome_url = 'http://event.wisesoe.com/Authenticate.aspx?returnUrl=Default.aspx'\n\t\n\tdef start_requests(self):\n\t\tconfig_file = self.settings.get('EVENT_WISESOE_COM_CONFIG')\n\t\tif not os.path.exists(config_file):\n\t\t\tself.logger.error('wisesoe config file not exists')\n\t\t\treturn None\n\t\t\n\t\tself.config = {}\n\t\twith open(config_file, 'r', encoding='utf8') as f:\n\t\t\tself.config = json.loads(f.read())\n\t\tif not self.config:\n\t\t\tself.logger.error('wisesoe config file is emtpy')\n\t\t\treturn None\n\t\t\n\t\tself.username = self.config['username']\n\t\tif not self.username:\n\t\t\tself.logger.error('wisesoe username is emtpy')\n\t\t\treturn None\n\n\t\tself.password = self.config['password']\n\t\tif not self.password:\n\t\t\tself.logger.error('wisesoe password is emtpy')\n\t\t\treturn None\n\n\t\tself.receivers = self.config['receivers']\n\t\tif not self.receivers:\n\t\t\tself.logger.error('wisesoe receciver is emtpy')\n\t\t\treturn None\n\n\t\tyield scrapy.Request(\n\t\t\tself.login_url.format(\n\t\t\t\tusername=self.username, \n\t\t\t\tpassword=self.password, \n\t\t\t\ttimestamp=int(time.time())),\n\t\t\tcallback=self.parse,\n\t\t\tmeta={'cookiejar': 1}\n\t\t)\n\n\tdef parse(self, response):\n\t\t# self.logger.error(response.body.decode('utf-8'))\n\t\tyield scrapy.Request(\n\t\t\tself.home_url,\n\t\t\tcallback=self.home_parse,\n\t\t\tmeta={'cookiejar': response.meta['cookiejar']}\n\t\t)\n\n\tdef home_parse(self, response):\n\t\t# self.logger.error(response.body.decode('utf-8'))\n\t\txpath = '//*[@id=\"default-menu-control\"]//a[contains(text(), \"My reservations\")]'\n\t\turl = response.xpath(xpath).xpath('@href').extract_first()\n\t\turl = response.urljoin(url)\n\t\tyield scrapy.Request(\n\t\t\turl,\n\t\t\tcallback=self.parse_my_reservations,\n\t\t\tmeta={'cookiejar': response.meta['cookiejar']}\n\t\t)\n\n\tdef parse_my_reservations(self, response):\n\t\t# self.logger.error(response.body.decode('utf-8'))\n\t\txpath = '//table[@id=\"ctl00_MainContent_GridView1\"]/tbody/tr[position() > 1]'\n\t\tmsg = []\n\t\tmax_timestamp = self.config['timestamp']\n\t\tfor tr in response.xpath(xpath):\n\t\t\ttitle = tr.xpath('td[2]/text()').extract_first(default='').strip()\n\t\t\tspeaker = tr.xpath('td[3]/text()').extract_first(default='').strip()\n\t\t\taddress = tr.xpath('td[4]/text()').extract_first(default='').strip()\n\t\t\tshowtime = tr.xpath('td[5]/text()').extract_first(default='').strip()\n\t\t\treservation_time = tr.xpath('td[6]/text()').extract_first(default='').strip()\n\t\t\tif reservation_time:\n\t\t\t\ttimestamp = int(time.mktime(time.strptime(reservation_time, '%m/%d/%Y %I:%M:%S %p')))\n\t\t\t\tif timestamp > max_timestamp:\n\t\t\t\t\tmax_timestamp = timestamp\n\t\t\t\tif timestamp > self.config['timestamp']:\n\t\t\t\t\tmsg.append(get_email_body(title, speaker, address, showtime, reservation_time))\n\n\t\tif msg:\n\t\t\tsend_simple_message('讲座通知', '\\n'.join(msg), self.receivers)"
},
{
"alpha_fraction": 0.7120000123977661,
"alphanum_fraction": 0.7160000205039978,
"avg_line_length": 27.320755004882812,
"blob_id": "fd5bdd6e3a6c7099b7045fc4309324324a24883c",
"content_id": "a6767595384d754ed61d662e54ef101f858637da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1500,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 53,
"path": "/scrapy_tor/test/check_pycurl_to_tor.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import io\nimport pycurl\nimport stem.process\nfrom stem.util import term\n\nTOR_SOCKS_PORT = 9150\n\n# Uses pycurl to fetch a site using the proxy on the SOCKS_PORT\ndef query(url):\n\toutput = io.BytesIO()\n\n\tquery = pycurl.Curl()\n\tquery.setopt(pycurl.URL, url)\n\tquery.setopt(pycurl.PROXY, 'localhost')\n\tquery.setopt(pycurl.PROXYPORT, TOR_SOCKS_PORT)\n\tquery.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)\n\tquery.setopt(pycurl.WRITEFUNCTION, output.write)\n\n\ttry:\n\t\tquery.perform()\n\t\treturn output.getvalue().decode('utf8')\n\texcept pycurl.error as e:\n\t\treturn \"Unable to reach %s (%s)\" % (url, e)\n\n# Start an instance of Tor configured to only exit through Russia. This prints\n# Tor's bootstrap information as it starts. Note that this likely will not\n# work if you have another Tor instance running.\ndef launch_tor():\n\tprint(term.format(\"Starting Tor:\\n\", term.Attr.BOLD))\n\tdef print_bootstrap_lines(line):\n\t\tif \"Bootstrapped \" in line:\n\t\t\tprint(term.format(line, term.Color.BLUE))\n\treturn stem.process.launch_tor_with_config(\n\t\tconfig={\n\t\t\t'SocksPort': str(TOR_SOCKS_PORT),\n\t\t\t'ExitNodes': '{ru}', # tor proxy exit node in the country Russia\n\t\t},\n\t\tinit_msg_handler = print_bootstrap_lines,\n\t)\n\ndef kill_tor(p):\n\tp.kill()\n\ndef check_endpoint():\n\tprint(term.format(\"\\nChecking our endpoint:\\n\", term.Attr.BOLD))\n\t#https://www.atagar.com/echo.php\n\tprint(term.format(query(\"http://ip.cn/\"), term.Color.BLUE))\n\n\nif __name__ == '__main__':\n\t#process = launch_tor()\n\tcheck_endpoint()\n\t#kill_tor(process)"
},
{
"alpha_fraction": 0.6959064602851868,
"alphanum_fraction": 0.7017543911933899,
"avg_line_length": 23.571428298950195,
"blob_id": "d7dff505be7a7f3c1c5c11b0c0c48a2d4a2c8f79",
"content_id": "75d40ab92e11822a4d6a9aa5c4fb9e403d9e8c9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 171,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 7,
"path": "/ejob/parser/parse_jobs_description.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "from scrapy.selector import Selector\n\ndef parse(filename):\n\tcontent = ''\n\twith open(filename, 'r', encoding='utf8') as f:\n\t\tcontent = f.read()\n\tsl = Selector(text=content)"
},
{
"alpha_fraction": 0.7845118045806885,
"alphanum_fraction": 0.7946127653121948,
"avg_line_length": 41.57143020629883,
"blob_id": "e3b46b08d5da4c627e229421535ed714dee293dc",
"content_id": "804a09e115d6de4c7b2974065f05738fbf038494",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 7,
"path": "/scrapy_tor/test/check_selenium.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# Python language bindings for Selenium WebDriver\n# https://pypi.python.org/pypi/selenium\nfrom selenium.webdriver.phantomjs.webdriver import WebDriver\n\ndirver = WebDriver(executable_path=\"../example/phantomjs-2.1.1-windows/bin/phantomjs.exe\")\ndirver.get('http://www.baidu.com')\nprint(dirver.title)"
},
{
"alpha_fraction": 0.8372092843055725,
"alphanum_fraction": 0.8372092843055725,
"avg_line_length": 43,
"blob_id": "3a2632c3dec69535665b5f3ad2e4bfb2a3cd60e6",
"content_id": "a647073c4b9a60fab31eb67871eb4339388d25cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 43,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 1,
"path": "/ejob/db/__init__.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "from .sqlite_db import SqliteDb as SQLiteDB"
},
{
"alpha_fraction": 0.6935123205184937,
"alphanum_fraction": 0.7024608254432678,
"avg_line_length": 26.9375,
"blob_id": "4abbd2c0e8ae492a2c76923b9949c82d319d222a",
"content_id": "eb66996f9928cf7963ae12af199a495f626ba330",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 447,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 16,
"path": "/scrapy_tor/example/example/spiders/showip.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\n\nclass ShowIpSpider(scrapy.Spider):\n\tname = \"showip\"\n\t#start_urls = ['http://icanhazip.com']\n\turl = 'http://icanhazip.com'\n\n\tdef start_requests(self):\n\t\t# for i in range(200):\n\t\t# \tyield scrapy.Request(self.url, callback=self.parse, dont_filter=True)\n\t\tyield scrapy.Request(self.url, callback=self.parse, dont_filter=True)\n\n\tdef parse(self, response):\n\t\tself.logger.info(response.body)\n\t\tprint(response.body)\n"
},
{
"alpha_fraction": 0.4893267750740051,
"alphanum_fraction": 0.6962233185768127,
"avg_line_length": 15.45945930480957,
"blob_id": "9df2a00f007b96a5e1630ba21b6006dd1d7fc45a",
"content_id": "4b5ca1729914b855334af23b556f462a57b1b498",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 609,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 37,
"path": "/notification/requirements.txt",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "attrs==15.2.0\ncffi==1.9.1\nclick==6.7\nconstantly==15.1.0\ncryptography==1.7.1\ncssselect==1.0.0\ndocker-py==1.10.6\ndocker-pycreds==0.2.1\nidna==2.2\nincremental==16.10.1\nlxml==3.7.2\nparsel==1.1.0\npyasn1==0.1.9\npyasn1-modules==0.0.8\npycparser==2.17\npycurl==7.43.0\nPyDispatcher==2.0.5\npyOpenSSL==16.2.0\npywin32==220\nPyYAML==3.12\nqueuelib==1.4.2\nrequests==2.13.0\nretrying==1.3.3\nscrapinghub==1.9.0\nscrapoxy==1.7\nScrapy==1.3.3\nscrapy-crawlera==1.2.2\nselenium==3.0.2\nservice-identity==16.0.0\nshub==2.5.1\nsix==1.10.0\nstem==1.5.4\ntornado==4.4.2\nTwisted==16.6.0\nw3lib==1.16.0\nwebsocket-client==0.40.0\nzope.interface==4.3.3\n"
},
{
"alpha_fraction": 0.6921269297599792,
"alphanum_fraction": 0.6933019757270813,
"avg_line_length": 22.61111068725586,
"blob_id": "54716736af8070d3c8a05ab5c6ef62d7d77d5452",
"content_id": "2ec6cffad0eec75d931f99ae96c0941ea7e69b33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 851,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 36,
"path": "/china_stars/stars/stars/items.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass StarsItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\nclass StarInfoItem(scrapy.Item):\n\tstarid = scrapy.Field()\n\turl = scrapy.Field()\n\tcapital = scrapy.Field()\n\tname = scrapy.Field()\n\tanother_name = scrapy.Field()\n\tenglish_name = scrapy.Field()\n\tgender = scrapy.Field()\n\tbirthyear = scrapy.Field()\n\tbirthday = scrapy.Field()\n\tconstellation = scrapy.Field()\n\tnationality = scrapy.Field()\n\tarea = scrapy.Field()\n\tprofession = scrapy.Field()\n\theight = scrapy.Field()\n\tbloodtype = scrapy.Field()\n\tbrief = scrapy.Field()\n\tavatar = scrapy.Field()\n\talbum = scrapy.Field()\n\timage_urls = scrapy.Field()\n\timages = scrapy.Field()\n\n"
},
{
"alpha_fraction": 0.6899695992469788,
"alphanum_fraction": 0.6909827589988708,
"avg_line_length": 20.933332443237305,
"blob_id": "6fa73c5d9b5b01220f256514d752399c67e5b476",
"content_id": "8fb4941b5dde3bbe3dee7bb6e8a14a2a49f317c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 987,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 45,
"path": "/ejob/ejob/items.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass EjobItem(scrapy.Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n pass\n\nclass CatalogItem(scrapy.Item):\n\tid = scrapy.Field()\n\tname = scrapy.Field()\n\turl = scrapy.Field()\n\tcategory = scrapy.Field()\n\nclass JobItem(scrapy.Item):\n\tposition = scrapy.Field()\n\tdepartment = scrapy.Field()\n\tdescription = scrapy.Field()\n\ttags = scrapy.Field()\n\tsalary = scrapy.Field()\n\ttemptation = scrapy.Field()\n\tjobtype = scrapy.Field()\n\texprience = scrapy.Field()\n\teducation = scrapy.Field()\n\trequirements = scrapy.Field()\n\t\n\tcity = scrapy.Field()\n\taddress = scrapy.Field()\n\tlocation = scrapy.Field()\n\t\n\turl = scrapy.Field()\n\tsite = scrapy.Field()\n\trawpost = scrapy.Field()\n\tpostdate = scrapy.Field()\n\t\n\tcompany_name = scrapy.Field()\n\tcompany_url = scrapy.Field()\n\tcompany_brief = scrapy.Field()\n"
},
{
"alpha_fraction": 0.715027391910553,
"alphanum_fraction": 0.7190654873847961,
"avg_line_length": 32.346153259277344,
"blob_id": "0a221d8e29bd68cc22b5a3aff67a5850d8d571ae",
"content_id": "333ba4d14b4185914a28328ba6ac33685f95048b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3467,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 104,
"path": "/ejob/ejob/pipelines.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport os, json, codecs, hashlib\nfrom urllib.parse import quote\nimport scrapy\nfrom scrapy.exceptions import DropItem\n\nclass EjobPipeline(object):\n\tdef process_item(self, item, spider):\n\t\treturn item\n\nclass JsonStorePipeline(object):\n\t\"\"\"Store Scrapy item into a json line file that is named by spider name.\"\"\"\n\n\tdef __init__(self, datafile):\n\t\t# self.f = codecs.open(datafile, 'a+', encoding='utf-8')\n\t\tself.f = codecs.open(datafile, 'w', encoding='utf-8')\n\n\tdef process_item(self, item, spider):\n\t\ttry:\n\t\t\titem_dict = dict(item)\n\t\t\tself.f.write(json.dumps(item_dict, ensure_ascii=False)+'\\n')\n\t\texcept Exception as e:\n\t\t\traise DropItem(e)\n\t\treturn item\n\n\t@classmethod\n\tdef from_crawler(cls, crawler):\n\t\tdatadir = crawler.settings['DATA_DIR']\n\t\tdataext = crawler.settings['DATA_EXT']\n\t\tdatafile = os.path.join(datadir, crawler.spider.name + dataext)\n\t\ti = 1\n\t\twhile os.path.isfile(datafile):\n\t\t\tdatafile = os.path.join(datadir, crawler.spider.name + '_%d' % i + dataext)\n\t\t\ti += 1\n\t\treturn cls(datafile)\n\n\tdef open_spider(self, spider):\n\t\tpass\n\n\tdef close_spider(self, spider):\n\t\tif not self.f.closed:\n\t\t\tself.f.close()\n\n\nclass ScreenshotBySplashPipeline(object):\n\t\"\"\"Use Splash to render screenshot of every Scrapy item\"\"\"\n\n\tdef __init__(self, splash_url, screenshot_dir, screenshot_format, screenshot_url_field, screenshot_file_field):\n\t\tself.splash_url = splash_url\n\t\tself.screenshot_dir = screenshot_dir\n\t\tself.screenshot_format = screenshot_format\n\t\tself.screenshot_url_field = screenshot_url_field\n\t\tself.screenshot_file_field = screenshot_file_field\n\t\n\t@classmethod\n\tdef from_crawler(cls, crawler):\n\t\t# URL like this: \"http://localhost:8050/render.png?url={}\"\n\t\tsplash_url = crawler.settings['SPLASH_URL']\n\t\tscreenshot_dir = crawler.settings['SCREENSHOT_DIR'] # screenshot files' storage directory\n\t\t# may be is \"url\", the page to be screenshot\n\t\tscreenshot_url_field = crawler.settings['SCREENSHOT_URL_FIELD']\n\t\t# may be is \"screenshot\", the generated screenshot location\n\t\tscreenshot_file_field = crawler.settings['SCRAEENSHOT_FILE_FIELD']\n\t\t# png, jpg, gif and so on\n\t\tscreenshot_format = crawler.settings['SCREENSHOT_FORMAT']\n\t\treturn cls(splash_url, screenshot_dir, screenshot_format, screenshot_url_field, screenshot_file_field)\n\n\tdef process_item(self, item, spider):\n\t\ttry:\n\t\t\turl_field = self.screenshot_url_field\n\t\t\tsplash_url = self.splash_url\n\t\t\tscreenshot_ext = self.screenshot_format\n\t\t\tencoded_item_url = quote(item[url_field])\n\t\t\turl = splash_url.format(encoded_item_url)\n\t\t\turl_hash = hashlib.md5(url.encode(\"utf8\")).hexdigest()\n\t\t\tfilename = \"{}.{}\".format(url_hash, screenshot_ext)\n\t\t\trequest = scrapy.Request(url)\n\t\t\trequest.meta['screenshot_filename'] = filename\n\t\t\t# Deferred Process item\n\t\t\tdfd = spider.crawler.engine.download(request, spider)\n\t\t\tdfd.addBoth(self.return_item, item)\n\t\texcept Exception as e:\n\t\t\traise DropItem(e)\n\t\treturn dfd\n\n\tdef return_item(self, response, item):\n\t\tscreenshot_dir = self.screenshot_dir\n\t\tfile_field = self.screenshot_file_field\n\t\tif response.status != 200:\n\t\t\t# Error happened, return item.\n\t\t\treturn item\n\n\t\t# Save screenshot to file\n\t\tfilename = response.meta['filename']\n\t\twith open(os.path.join(screenshot_dir, filename), 'wb') as f:\n\t\t\tf.write(response.body)\n\t\titem[file_field] = filename\n\t\treturn item"
},
{
"alpha_fraction": 0.6968944072723389,
"alphanum_fraction": 0.6981366276741028,
"avg_line_length": 25.83333396911621,
"blob_id": "ce3edf180c39b13ccae7a087ba696152e1dd44c9",
"content_id": "bcc8cfcb93ba4b6012c572b671c95a09c8da44b4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1610,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 60,
"path": "/ejob/ejob/item_loaders.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "from scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import TakeFirst, MapCompose, Join, Compose, Identity\nfrom w3lib.html import remove_tags\n\ndef take_first_nonempty_from_iterable():\n\treturn TakeFirst()\n\ndef str_strip(chars=' '):\n\treturn MapCompose(lambda s: s.strip(chars))\n\ndef list_strip(chars=' '):\n\treturn MapCompose(lambda l: ''.join([s.strip(chars) for s in l]))\n\n# def str_strip(chars=' '):\n# \treturn Compose(lambda l:l[0].strip(chars))\n\ndef html_strip():\n\treturn MapCompose(remove_tags)\n\ndef comma_join():\n\treturn Join(separator=',')\n\ndef newline_join():\n\treturn Join(separator='\\n')\n\ndef list_join_if_nonempty(separator=''):\n\treturn lambda loader, l: separator.join([s.strip() for s in l if s.strip()])\n\ndef filter_word(word):\n\treturn MapCompose(lambda s: None if s == word else s)\n\nclass LagouJobItemLoader(ItemLoader):\n\n\tdefault_input_processor = str_strip()\n\tdefault_output_processor = Join()\n\n\tsalary = str_strip(' /')\n\tcity_in = str_strip(' /')\n\texprience_in = str_strip(' /')\n\teducation_in = str_strip(' /')\n\tjobtype_in = str_strip(' /')\n\ttags_in = list_strip()\n\ttags_out = list_join_if_nonempty(',')\n\ttemptation_in = Identity()\n\ttemptation_out = newline_join()\n\trawpost_in = Identity()\n\trawpost_out = newline_join()\n\taddress_in = list_strip(' -\\n')\n\taddress_out = list_join_if_nonempty(',')\n\tcompany_name_in = str_strip(' \\n')\n\tcompany_brief_out = newline_join()\n\n\nclass RongypJobItemLoader(ItemLoader):\n\tdefault_input_processor = str_strip()\n\tdefault_output_processor = Join()\n\n\tposition_in = str_strip(' \\n')\n\ttemptation_out = comma_join()\n\tcompany_brief_out = newline_join()\n"
},
{
"alpha_fraction": 0.6842105388641357,
"alphanum_fraction": 0.7518796920776367,
"avg_line_length": 66,
"blob_id": "e637d63d944f9c8e1b930112be162ba35c98e494",
"content_id": "111cff98f881d3e600f764b735bb9ed773f81524",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 2,
"path": "/netmusic/test/config.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "PHANTOMJS_PATH = '../bin/phantomjs-2.1.1-windows/phantomjs.exe'\nGECKODRIVER_PATH = '../bin/geckodriver-v0.17.0-win32/geckodriver.exe'"
},
{
"alpha_fraction": 0.6071085333824158,
"alphanum_fraction": 0.6119115948677063,
"avg_line_length": 24.414634704589844,
"blob_id": "b14921d585c6c574661e77cd25bddd89e278f331",
"content_id": "5a7dbd18f44cdf775ac680ecec08615b4836650b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1041,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 41,
"path": "/china_stars/stars/stars/spiders/stars_catalog_spider.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import re, string, json\nimport scrapy\n\nclass StarsSpider(scrapy.Spider):\n\tname = \"stars-catalog-spider\"\n\tstart_urls = [\n\t\t'http://ent.qq.com/c/dalu_star.shtml',\n\t]\n\tstars_capital = [c for c in string.ascii_uppercase]\n\tstars_capital.append('0-9')\n\n\tdef start_requests(self):\n\t\tfor url in self.start_urls:\n\t\t\trequest = scrapy.Request(url, callback=self.parse)\n\t\t\tyield request\n\n\tdef parse(self, response):\n\t\tf = open('stars_catalog.json', 'w', encoding='utf-8')\n\t\tstars = {}\n\n\t\tfor capital in self.stars_capital:\n\t\t\tstars[capital] = []\n\t\t\tcount = 1\n\t\t\twhile True:\n\t\t\t\trowid = capital + ('%d' % count)\n\t\t\t\tcount += 1\n\t\t\t\tlinks = response.xpath('//tr[@id=\"%s\"]//a' % rowid)\n\t\t\t\tif not links:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tfor link in links:\n\t\t\t\t\t\turl = link.xpath('@href').extract_first()\n\t\t\t\t\t\tname = link.xpath('@title').extract_first()\n\t\t\t\t\t\tif url is None:\n\t\t\t\t\t\t\tself.logger.error('url is empty')\n\t\t\t\t\t\telif name:\n\t\t\t\t\t\t\tstars[capital].append({\n\t\t\t\t\t\t\t\t'name': name,\n\t\t\t\t\t\t\t\t'url': url\n\t\t\t\t\t\t\t})\n\t\tf.write(json.dumps(stars, ensure_ascii=False))"
},
{
"alpha_fraction": 0.6197879910469055,
"alphanum_fraction": 0.6219081282615662,
"avg_line_length": 32.69047546386719,
"blob_id": "3ddef98b0205f4e8950d7b99663933e8c176add0",
"content_id": "9895abb387bc99d42b9bdea998892b2d5a418c89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1415,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 42,
"path": "/china_stars/Scrapy-sqlite-item-exporter-master/exporters.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "\"\"\"\nItem Exporters are used to export/serialize items into sqlite3 database.\n\"\"\"\n\nfrom scrapy.contrib.exporter import BaseItemExporter\nimport sqlite3\n\nclass SqliteItemExporter(BaseItemExporter):\n \n def __init__(self, file, **kwargs):\n self._configure(kwargs)\n self.conn = sqlite3.connect(file.name)\n self.conn.text_factory = str\n \tself.created_tables = []\n \n def export_item(self, item): \t\t\n \titem_class_name = type(item).__name__\n \t\n \tif item_class_name not in self.created_tables:\n \t\tself._create_table(item_class_name, item.fields.iterkeys())\n \t\tself.created_tables.append(item_class_name)\n \t\n \tfield_list = []\n \tvalue_list = []\n \tfor field_name in item.iterkeys():\n \t\tfield_list.append('[%s]' % field_name)\n \t\tfield = item.fields[field_name]\n \t\tvalue_list.append(self.serialize_field(field, field_name, item[field_name]))\n \t\n \tsql = 'insert into [%s] (%s) values (%s)' % (item_class_name, ', '.join(field_list), ', '.join(['?' for f in field_list]))\n \tself.conn.execute(sql, value_list)\n \tself.conn.commit()\n \t\t\n def _create_table(self, table_name, columns):\n\t\tsql = 'create table if not exists [%s] ' % table_name\n\t\tcolumn_define = ', '.join(['[%s] text' % column for column in columns])\n\t\tsql += '(%s)' % column_define\n\t\tself.conn.execute(sql)\n\t\tself.conn.commit()\n \t\n def __del__(self):\n \tself.conn.close()\n"
},
{
"alpha_fraction": 0.7731829285621643,
"alphanum_fraction": 0.7731829285621643,
"avg_line_length": 37.0476188659668,
"blob_id": "4ae82193f99a734f9621eb50341e856c3303176e",
"content_id": "8f0c909ceee5df15c2cb7d53f910437800f11296",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 802,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 21,
"path": "/netmusic/test/firefox.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nimport config\nprint(os.path.abspath(config.GECKODRIVER_PATH))\ndriver = webdriver.Firefox(executable_path=os.path.abspath(config.GECKODRIVER_PATH))\n# WebDriver will wait until the page has fully loaded (that is, the “onload” event has fired)\ndriver.get(\"http://www.python.org\")\nassert \"Python\" in driver.title\n# find element by its name attribute\nelem = driver.find_element_by_name(\"q\")\nelem.clear()\n# sending keys, this is similar to entering keys using your keyboard.\nelem.send_keys(\"pycon\")\nelem.send_keys(Keys.RETURN)\nassert \"No results found.\" not in driver.page_source\n# The quit will exit entire browser whereas close` will close one tab, but if just one tab was open\n# driver.quit()\n# driver.close()"
},
{
"alpha_fraction": 0.7155688405036926,
"alphanum_fraction": 0.7310379147529602,
"avg_line_length": 28.262773513793945,
"blob_id": "1e5e4633ebb15d18ac34795a535d10fae26cdd07",
"content_id": "4f079019f49b850b96a1d108a67f66cbf3051a75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4008,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 137,
"path": "/china_stars/stars/stars/settings.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Scrapy settings for stars project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'stars'\n\nUSER_AGENT = 'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)'\n\nSPIDER_MODULES = ['stars.spiders']\nNEWSPIDER_MODULE = 'stars.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'stars (+http://www.yourdomain.com)'\n\n# Obey robots.txt rules\nROBOTSTXT_OBEY = True\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n#CONCURRENT_REQUESTS = 32\n\n# Configure a delay for requests for the same website (default: 0)\n# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\nDOWNLOAD_DELAY = 5\n\n# The download delay setting will honor only one of:\nCONCURRENT_REQUESTS_PER_DOMAIN = 16\nCONCURRENT_REQUESTS_PER_IP = 16\n\n# Disable cookies (enabled by default)\nCOOKIES_ENABLED = False\n\n# Disable Telnet Console (enabled by default)\n#TELNETCONSOLE_ENABLED = False\n\n# Override the default request headers:\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'stars.middlewares.MyCustomSpiderMiddleware': 543,\n#}\n\n# Enable or disable downloader middlewares\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'stars.middlewares.MyCustomDownloaderMiddleware': 543,\n#}\n\n# Enable or disable extensions\n# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html\n#EXTENSIONS = {\n# 'scrapy.extensions.telnet.TelnetConsole': None,\n#}\n\n# Configure item pipelines\n# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html\nITEM_PIPELINES = {\n\t'scrapy.pipelines.images.ImagesPipeline': 10,\n\t#'stars.pipelines.StarsCatalogPipeline': 300,\n\t'stars.pipelines.StarInfoPipeline': 301,\n\t'stars.pipelines.Sqlite3Pipeline': 302,\n}\n\nIMAGES_STORE = 'images'\n\n# thumbnails\n# IMAGES_THUMBS = {\n# \t'small': (50, 50),\n# \t'big': (270, 270),\n# }\n\n# filter out small images\n# IMAGES_MIN_HEIGHT = 110\n# IMAGES_MIN_WIDTH = 110\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See http://doc.scrapy.org/en/latest/topics/autothrottle.html\n#AUTOTHROTTLE_ENABLED = True\n# The initial download delay\n#AUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\n#AUTOTHROTTLE_MAX_DELAY = 60\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\n#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\n#AUTOTHROTTLE_DEBUG = False\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\nHTTPCACHE_ENABLED = True\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = 'logs'\n\nSQLITE_FILE = 'dump.db'\nSQLITE_TABLE = 'stars'\n# i -> int\n# s -> small string\n# S -> big string\nSQLITE_TABLE_DESC = {\n\t'starid': 'i',\n\t'url': 's',\n\t'capital': 's',\n\t'name': 's',\n\t'another_name': 's',\n\t'english_name': 's',\n\t'gender': 's',\n\t'birthyear': 's',\n\t'birthday': 's',\n\t'constellation': 's',\n\t'nationality': 's',\n\t'area': 's',\n\t'profession': 's',\n\t'height': 's',\n\t'bloodtype': 's',\n\t'brief': 'S',\n\t'avatar': 'S',\n\t'album': 'S'\n}"
},
{
"alpha_fraction": 0.7250000238418579,
"alphanum_fraction": 0.7250000238418579,
"avg_line_length": 17.487178802490234,
"blob_id": "2964b7e2415ecfb3e9f7b4faa73c0f3fbc9d6773",
"content_id": "cb588bc9f997622f2cf585df90f921fc9fa6d1ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 39,
"path": "/ejob/db/base_db.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import abc\n\nclass BaseDb(metaclass=abc.ABCMeta):\n\n\[email protected]\n\tdef close(self):\n\t\tpass\n\n\[email protected]\n\tdef sql(self, q):\n\t\tpass\n\n\[email protected]\n\tdef create_table(self, tbl_name, fields, extra=None):\n\t\tpass\n\n\[email protected]\n\tdef select_table(self, tbl_name, fields=[], where_condition=None):\n\t\tpass\n\n\[email protected]\n\tdef count_table(self, tbl_name, where_condition=None):\n\t\tpass\n\n\[email protected]\n\tdef insert_table(self, tbl_name, fields={}):\n\t\tpass\n\n\[email protected]\n\tdef insert_many_table(self, tbl_name, keys, values):\n\t\tpass\n\n\[email protected]\n\tdef delete_table(self, tbl_name):\n\t\tpass\n\n\[email protected]\n\tdef update_table(self, tbl_name, fields, where_condition):\n\t\tpass"
},
{
"alpha_fraction": 0.643553614616394,
"alphanum_fraction": 0.6457204818725586,
"avg_line_length": 29.799999237060547,
"blob_id": "0c6412dd4695a189dd7d9cb4321ee44aac8af983",
"content_id": "bfe8f6caf0fa377b85a12080d1225886ae8aeaa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 927,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 30,
"path": "/ejob/ejob/spiders/rongyp_catalog_spider.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom ejob.items import CatalogItem\n\nclass RongypCatalogSpiderSpider(scrapy.Spider):\n\tname = \"rongyp_catalog_spider\"\n\tallowed_domains = [\"rongyp.com\"]\n\tstart_urls = ['https://www.rongyp.com/index.php?m=Home&c=Job&a=jobSearch']\n\n\tdef start_requests(self):\n\t\tfor url in self.start_urls:\n\t\t\trequest = scrapy.Request(url, callback=self.parse)\n\t\t\trequest.meta['dont_redirect'] = True\n\t\t\tyield request\n\n\tdef parse(self, response):\n\t\tfor li in response.xpath('//ul[@id=\"tabBox\"]/li'):\n\t\t\tcategory = li.xpath('.//h2/text()').extract_first()\n\t\t\tfor link in li.xpath('.//dl/dd/a'):\n\t\t\t\titem = CatalogItem()\n\t\t\t\tname = link.xpath('text()').extract_first()\n\t\t\t\turl = link.xpath('@href').extract_first()\n\t\t\t\tif (not url) or (not name) or (name == '其它'):\n\t\t\t\t\tcontinue\n\t\t\t\titem['id'] = ''\n\t\t\t\titem['category'] = category\n\t\t\t\titem['name'] = name\n\t\t\t\titem['url'] = response.urljoin(url)\n\t\t\t\tyield item"
},
{
"alpha_fraction": 0.7242489457130432,
"alphanum_fraction": 0.7328326106071472,
"avg_line_length": 24.91666603088379,
"blob_id": "20560251f8964731eb257cd69a5e7f6ae2795907",
"content_id": "2e4bcca51362bfed6958a04f9c46ef25a3ee3a5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 932,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 36,
"path": "/scrapy_tor/test/check_tor_control_port.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import getpass\nimport sys\n\nimport stem\nimport stem.connection\nfrom stem.control import Controller\n\nCONTROL_PORT = 9151\n\nif __name__ == '__main__':\n\t# connect tor control socket\n\ttry:\n\t\tcontroller = Controller.from_port(port=CONTROL_PORT)\n\texcept stem.SocketError as e:\n\t\tprint(\"Unable to connect to tor on port %d: %s\" % (CONTROL_PORT, e))\n\t\tsys.exit(1)\n\n\t# authenticate tor control socket\n\ttry:\n\t\tcontroller.authenticate()\n\texcept stem.connection.MissingPassword:\n\t\tpw = getpass.getpass(\"Tor ControllerPort Password: \")\n\t\ttry:\n\t\t\tcontroller.authenticate(password=pw)\n\t\texcept stem.connection.PasswordAuthFailed:\n\t\t\tprint(\"Unable to authenticate, password is incorrect\")\n\t\t\tsys.exit(1)\n\texcept stem.connection.AuthenticationFailure as e:\n\t\tprint(\"Unable to authenticate: %s\" % e)\n\t\tsys.exit(1)\n\texcept Exception as e:\n\t\tprint(\"Wrong\")\n\t\tsys.exit(1)\n\n\tprint(\"Tor is running version %s\" % controller.get_version())\n\tcontroller.close()"
},
{
"alpha_fraction": 0.5729926824569702,
"alphanum_fraction": 0.6058394312858582,
"avg_line_length": 22.390243530273438,
"blob_id": "aea756e98488cf4831ffda76e565598569446ed2",
"content_id": "91651d050caf5c8f295ff01f4d0d334376ff12fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1918,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 82,
"path": "/scrapoxy_test/README.md",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "## install/run scrapyoxy\n\n### install scrapoxy\n\n```\nsudo npm install -g scrapoxy\n```\n\n### generate configuration file\n```\nscrapoxy init path/to/conf.json\n```\n\n### edit configuration file\n```\n\"commander\": {\n \"password\": \"your-scrapoxy-commander-password\"\n },\n \"instance\": {\n \"port\": 3128,\n \"scaling\": {\n \"min\": 1,\n \"max\": 2\n }\n },\n \"providers\": {\n \"type\": \"awsec2\",\n \"awsec2\": {\n \"accessKeyId\": \"Your-AWS-Access-Key\",\n \"secretAccessKey\": \"Your-AWS-Secret-Access-Key\",\n \"region\": \"Your-ec2-instance-region\",\n \"instance\": {\n \"InstanceType\": \"t1.micro\",\n \"ImageId\": \"your-image-id\",\n \"SecurityGroups\": [\n \"forward-proxy\"\n ]\n }\n },\n ......\n }\n}\n```\n### run scrapoxy daemon service\n```\nscrapoxy start /path/to/conf.json -d\n```\n\n## check scrapoxy proxy\n```\nscrapoxy test http://localhost:8888\n```\nor\n```\ncurl --proxy http://127.0.0.1:8888 http://api.ipify.org\n```\n\n## access scrapoxy web GUI\n<http://localhost:8889>\n\n## configure scrapy\n```\n# append the following lines to settings.py\n\n# PROXY\nPROXY = 'http://127.0.0.1:8888/?noconnect'\n\n# BLACKLISTING\nBLACKLIST_HTTP_STATUS_CODES = [ 503 ]\n\n# SCRAPOXY\nAPI_SCRAPOXY = 'http://127.0.0.1:8889/api'\nAPI_SCRAPOXY_PASSWORD = b'your-scrapoxy-commander-password'\n\nDOWNLOADER_MIDDLEWARES = {\n\t'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None, # turn off the scrapy default http proxy\n 'scrapoxy.downloadmiddlewares.proxy.ProxyMiddleware': 100, # load scrapoxy http proxy\n 'scrapoxy.downloadmiddlewares.wait.WaitMiddleware': 101,\n 'scrapoxy.downloadmiddlewares.scale.ScaleMiddleware': 102, # automate scale instance\n 'scrapoxy.downloadmiddlewares.blacklist.BlacklistDownloaderMiddleware': 950, # when website access limit is reached, tell scrapoxyd change instance\n}\n```\n"
},
{
"alpha_fraction": 0.6346423625946045,
"alphanum_fraction": 0.6360448598861694,
"avg_line_length": 35.56410217285156,
"blob_id": "162a80f2733d92bef91818f0b09096427433a36a",
"content_id": "9f7505f8bc3e35a3f6fba78e4e69b839a5bfe764",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1434,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 39,
"path": "/ejob/ejob/spiders/lagou_catalog_spider.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom ejob.items import CatalogItem\n\nclass LagouCatalogSpiderSpider(scrapy.Spider):\n\tname = \"lagou_catalog_spider\"\n\tallowed_domains = [\"lagou.com\"]\n\tstart_urls = ['http://www.lagou.com/']\n\n\tdef parse(self, response):\n\t\txpath = '//*[@id=\"sidebar\"]//*[@class=\"menu_box\"][.//h2[contains(text(), \"金融\")]]'\n\t\tmenu_box = response.xpath(xpath)\n\t\tif not menu_box:\n\t\t\tself.logger.error('Menu Element Cannot be found: %s', response.url)\n\t\t\treturn None\n\n\t\tmenu_main = menu_box.xpath('*[contains(@class, \"menu_main\")]')\n\t\tmenu_sub = menu_box.xpath('*[contains(@class, \"menu_sub\")]')\n\t\tif menu_main and menu_sub:\n\t\t\tfor link in menu_main.xpath('a'):\n\t\t\t\titem = LagouCatalogItem()\n\t\t\t\titem['category'] = '金融'\n\t\t\t\titem['id'] = link.xpath('@data-lg-tj-no').extract_first()\n\t\t\t\titem['name'] = link.xpath('text()').extract_first()\n\t\t\t\titem['url'] = response.urljoin(link.xpath('@href').extract_first())\n\t\t\t\tyield item\n\t\t\tfor dl in menu_sub.xpath('dl'):\n\t\t\t\tcategory = dl.xpath('dt/span/text()').extract_first()\n\t\t\t\tfor link in dl.xpath('dd/a'):\n\t\t\t\t\titem = CatalogItem()\n\t\t\t\t\titem['category'] = category\n\t\t\t\t\titem['id'] = link.xpath('@data-lg-tj-no').extract_first()\n\t\t\t\t\titem['name'] = link.xpath('text()').extract_first()\n\t\t\t\t\titem['url'] = response.urljoin(link.xpath('@href').extract_first())\n\t\t\t\t\tyield item\n\t\telse:\n\t\t\tself.logger.error('Menu Element Cannot be found: %s', response.url)\n\t\t\treturn None\n"
},
{
"alpha_fraction": 0.6545171141624451,
"alphanum_fraction": 0.6579439043998718,
"avg_line_length": 28.731481552124023,
"blob_id": "8ca1a1128eb886e8b5cb23c47ff65dc76916b2b2",
"content_id": "48b710c0261b4a253c7cee2af0ec2b667479a433",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3210,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 108,
"path": "/china_stars/stars/stars/pipelines.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport codecs, json, sqlite3\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass StarsCatalogPipeline(object):\n\tdef open_spider(self, spider):\n\t\tself.f = codecs.open('stars_catalog.json', 'w', encoding='utf-8')\n\t\tself.catalog = {}\n\n\tdef close_spider(self, spider):\n\t\tself.f.write(json.dumps(self.catalog, ensure_ascii=False))\n\t\tself.f.close()\n\n\tdef process_item(self, item, spider):\n\t\tif item['capital'] not in self.catalog:\n\t\t\tself.catalog[item['capital']] = []\n\t\tself.catalog[item['capital']].append({\n\t\t\t'url': item['url'],\n\t\t\t'name': item['name']\n\t\t})\n\t\treturn item\n\nclass StarInfoPipeline(object):\n\tdef process_item(self, item, spider):\n\t\treturn item\n\nclass Sqlite3Pipeline(object):\n\tdef __init__(self, sqlite_file, sqlite_table, sqlite_table_desc, image_dir):\n\t\tself.sqlite_file = sqlite_file\n\t\tself.sqlite_table = sqlite_table\n\t\tself.sqlite_table_desc = sqlite_table_desc\n\t\tself.image_dir = image_dir\n\n\t@classmethod\n\tdef from_crawler(cls, crawler):\n\t\tfile = crawler.settings.get('SQLITE_FILE', 'sqlite3.db')\n\t\ttable = crawler.settings.get('SQLITE_TABLE', 'items')\n\t\ttbl_desc = crawler.settings.get('SQLITE_TABLE_DESC')\n\t\timage_dir = crawler.settings.get('IMAGES_STORE')\n\t\tif not tbl_desc:\n\t\t\traise Exception('SQLITE_TABLE_DESC is missed in the settings file')\n\t\tif not isinstance(tbl_desc, dict):\n\t\t\traise Exception('SQLITE_TABLE_DESC must be a dictionary')\n\t\treturn cls(\n\t\t\tsqlite_file=file,\n\t\t\tsqlite_table=table,\n\t\t\tsqlite_table_desc=tbl_desc,\n\t\t\timage_dir=image_dir\n\t\t)\n\n\tdef open_spider(self, spider):\n\t\tself.client = sqlite3.connect(self.sqlite_file)\n\t\tself.cursor = self.client.cursor()\n\t\tsql = 'CREATE TABLE IF NOT EXISTS {table} ({fields})'\n\t\tfields = ['id INTEGER PRIMARY KEY AUTOINCREMENT']\n\t\tfor field, dtype in self.sqlite_table_desc.items():\n\t\t\tif dtype == 'i':\n\t\t\t\tfield = '%s INTEGER' % field\n\t\t\telif dtype == 'S':\n\t\t\t\tfield = '%s TEXT' % field\n\t\t\telse:\n\t\t\t\tfield = '%s VARCHAR(255)' % field\n\t\t\tfields.append(field)\n\t\tsql = sql.format(table=self.sqlite_table, fields=', '.join(fields))\n\t\tself.cursor.execute(sql)\n\t\tself.client.commit()\n\n\n\tdef close_spider(self, spider):\n\t\tself.client.close()\n\n\tdef item2dict(self, item):\n\t\td = {}\n\t\td['album'] = []\n\t\tavatar_url = item['avatar']\n\t\tfor key in self.sqlite_table_desc:\n\t\t\tif key == 'avatar':\n\t\t\t\tfor image in item['images']:\n\t\t\t\t\tif image['url'] == avatar_url:\n\t\t\t\t\t\td['avatar'] = [self.image_dir+'/'+image['path'], avatar_url]\n\t\t\telif key == 'album':\n\t\t\t\tfor image in item['images']:\n\t\t\t\t\tif image['url'] != avatar_url:\n\t\t\t\t\t\td['album'].append([self.image_dir+'/'+image['path'], image['url']])\n\t\t\telse:\n\t\t\t\td[key] = item[key]\n\t\td['avatar'] = json.dumps(d['avatar'], ensure_ascii=False)\n\t\td['album'] = json.dumps(d['album'], ensure_ascii=False)\n\t\treturn d\n\n\tdef process_item(self, item, spider):\n\t\td = self.item2dict(item)\n\t\tsql = 'INSERT INTO {table} ({keys}) VALUES ({values})'\n\t\t\n\t\tsql = sql.format(\n\t\t\ttable=self.sqlite_table,\n\t\t\tkeys= ', '.join(d.keys()),\n\t\t\tvalues=', '.join(['?']*len(d.keys()))\n\t\t)\n\t\t\n\t\t\n\t\tself.cursor.execute(sql, list(d.values()))\n\t\tself.client.commit()\n\t\treturn item"
},
{
"alpha_fraction": 0.682692289352417,
"alphanum_fraction": 0.682692289352417,
"avg_line_length": 25,
"blob_id": "8944aea4ffb497087c59f37044aba2b9caee80af",
"content_id": "8797ce780df7c9b7273cc0058f48c93a658e3fc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 368,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/README.md",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "一些爬虫项目:\n\n* **ejob** 爬取拉勾等就业网站数据;\n* **netmusic** 爬取网易云音乐数据;\n* **scrapy_tor** 利用 Tor 来规避反爬虫机制;\n* **scrapoxy_test** 利用 scrapoxy 和 AWS 来规避反爬虫机制;\n* **notification** 爬取学院的讲座通知并邮件提醒抢讲座;\n* **china_stars** 爬取中国的明星数据;\n"
},
{
"alpha_fraction": 0.6086956262588501,
"alphanum_fraction": 0.6240409016609192,
"avg_line_length": 23.5,
"blob_id": "155fb08d7d6cec2770b842f8b9e179a6b786262f",
"content_id": "2a21210e6dd5e031443ee4f8142f98578b05cb0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 16,
"path": "/china_stars/Scrapy-sqlite-item-exporter-master/README.md",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "Scrapy-sqlite-item-exporter\n===========================\n\nExport items to sqlite3 database crawled by scrapy 1.4\n\nHow to use\n===========================\n\n\t1. Place exporters.py to your project script directory.\n\t2. In settings.py,\n\n\t\tFEED_EXPORTERS = {\n\t\t 'sqlite': '<script directory>.exporters.SqliteItemExporter',\n\t\t}\n\n\t3. In terminal,$ scrapy crawl <spider name> -o sqlite.db -t sqlite"
},
{
"alpha_fraction": 0.4743589758872986,
"alphanum_fraction": 0.6948717832565308,
"avg_line_length": 15.25,
"blob_id": "30ecc4af07171cdd92e927b92686c26bb5d48dbe",
"content_id": "668615ee53d7741863ef482b6c9df59cccbf84b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 390,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 24,
"path": "/scrapy_tor/requirements.txt",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "attrs==15.2.0\ncffi==1.9.1\nconstantly==15.1.0\ncryptography==1.7.1\ncssselect==1.0.0\nidna==2.2\nincremental==16.10.1\nlxml==3.7.2\nparsel==1.1.0\npyasn1==0.1.9\npyasn1-modules==0.0.8\npycparser==2.17\nPyDispatcher==2.0.5\npyOpenSSL==16.2.0\npywin32==220\nqueuelib==1.4.2\nrequests==2.13.0\nScrapy==1.1.1\nservice-identity==16.0.0\nsix==1.10.0\nstem==1.5.4\nTwisted==16.6.0\nw3lib==1.16.0\nzope.interface==4.3.3\n"
},
{
"alpha_fraction": 0.61964350938797,
"alphanum_fraction": 0.6241941452026367,
"avg_line_length": 28.64044952392578,
"blob_id": "c7a95ae3146d6b2c25ab5dc3173a7937cfb6e8a1",
"content_id": "f207f1ec14fada1e66521b18cd473292ea757d86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2689,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 89,
"path": "/china_stars/stars/stars/spiders/stars_spider.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import re, string, json, urllib\nimport scrapy\nfrom stars.items import StarInfoItem\n\nclass StarsSpider(scrapy.Spider):\n\tname = \"stars-spider\"\n\tstart_urls = [\n\t\t'http://ent.qq.com/c/dalu_star.shtml',\n\t]\n\tstars_capital = [c for c in string.ascii_uppercase]\n\tstars_capital.append('0-9')\n\tstarinfo_fields = {\n\t\t'name':'姓名', \n\t\t'another_name':'原名', \n\t\t'gender':'性别', \n\t\t'english_name':'英文名', \n\t\t'birthyear':'出生年', \n\t\t'birthday':'生日', \n\t\t'constellation':'星座', \n\t\t'nationality':'国籍', \n\t\t'area':'地域', \n\t\t'profession':'职业', \n\t\t'height':'身高', \n\t\t'bloodtype':'血型'\n\t}\n\tstarinfo_url = 'http://datalib.ent.qq.com/star/%d/starinfo.shtml'\n\n\tdef start_requests(self):\n\t\tfor url in self.start_urls:\n\t\t\trequest = scrapy.Request(url, callback=self.parse)\n\t\t\tyield request\n\n\tdef parse(self, response):\n\t\tfor capital in self.stars_capital:\n\t\t\tcount = 1\n\t\t\twhile True:\n\t\t\t\trowid = capital + ('%d' % count)\n\t\t\t\tcount += 1\n\t\t\t\tlinks = response.xpath('//tr[@id=\"%s\"]//a' % rowid)\n\t\t\t\tif not links:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tfor link in links:\n\t\t\t\t\t\turl = link.xpath('@href').extract_first()\n\t\t\t\t\t\tname = link.xpath('@title').extract_first()\n\t\t\t\t\t\tif url is None:\n\t\t\t\t\t\t\tself.logger.error('url is empty')\n\t\t\t\t\t\telif name:\n\t\t\t\t\t\t\trequest = scrapy.Request(url, callback=self.parse_star)\n\t\t\t\t\t\t\trequest.meta['capital'] = capital\n\t\t\t\t\t\t\trequest.meta['name'] = name\n\t\t\t\t\t\t\trequest.meta['id'] = int(url.split('/')[-2])\n\t\t\t\t\t\t\tyield request\n\n\tdef parse_star(self, response):\n\t\tstarinfo = StarInfoItem()\n\t\tstarinfo['starid'] = response.meta['id']\n\t\tstarinfo['capital'] = response.meta['capital']\n\t\tstarinfo['name'] = response.meta['name']\n\t\tstarinfo['url'] = response.url\n\t\tavatar_url = response.xpath('//div[@id=\"star_face\"]/a/img/@src').extract_first(default='')\n\t\tstarinfo['avatar'] = avatar_url\n\t\tstarinfo['album'] = []\n\t\timage_urls = [avatar_url]\n\t\tcount = 1\n\t\twhile True:\n\t\t\timgs = response.xpath('//*[@id=\"demo%d\"]//img/@src' % count).extract()\n\t\t\tcount += 1\n\t\t\tif not imgs:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\timage_urls += imgs\n\t\tstarinfo['image_urls'] = image_urls\n\t\t\n\t\txpath = '//div[@id=\"infos\"]//td[strong[contains(text(), \"{field}\")]]/text()'\n\t\tfor k, field in self.starinfo_fields.items():\n\t\t\tvalue = response.xpath(xpath.format(field=field)).extract_first()\n\t\t\tif value:\n\t\t\t\tstarinfo[k] = value.strip()\n\t\t\telse:\n\t\t\t\tstarinfo[k] = ''\n\n\t\tstarinfo_url = self.starinfo_url % starinfo['starid']\n\t\tbody = urllib.request.urlopen(starinfo_url).read().decode('gbk').encode('utf-8').decode('utf-8')\n\t\tr = response.replace(body=body)\n\t\txpath = '//div[@id=\"left\"]/table[2]//td[@class=\"line22\"]/text()'\n\t\tstarinfo['brief'] = r.xpath(xpath).extract_first('').strip()\n\n\t\tyield starinfo"
},
{
"alpha_fraction": 0.7560575008392334,
"alphanum_fraction": 0.76221764087677,
"avg_line_length": 28.349397659301758,
"blob_id": "56c872396fdfe048d1a94e60db0169f3db39d8b9",
"content_id": "fe77103b42653f100615faeeaf76385e0bd25d55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2435,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 83,
"path": "/netmusic/test/api.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "##\n# selenium.webdriver module provides all the WebDriver implementations. \n# Currently supported WebDriver implementations are Firefox, Chrome, IE and Remote.\n##\nfrom selenium import webdriver\n# webdriver.Firefox(\n# firefox_profile=None, \n# firefox_binary=None, \n# timeout=30, \n# capabilities=None, \n# proxy=None, \n# executable_path='geckodriver', \n# firefox_options=None, \n# log_path='geckodriver.log')\n# webdriver.FirefoxProfile\n# webdriver.Chrome(\n# executable_path='chromedriver', \n# port=0, \n# chrome_options=None, \n# service_args=None, \n# desired_capabilities=None, \n# service_log_path=None)\n# webdriver.ChromeOptions\n# webdriver.Ie\n# webdriver.Opera\n# webdriver.PhantomJS\n# webdriver.Remote\n# webdriver.DesiredCapabilities\n# webdriver.ActionChains\n# webdriver.TouchActions\n# webdriver.Proxy\n\n\n##\n# Keys class provide keys in the keyboard like RETURN, F1, ALT etc.\n##\nfrom selenium.webdriver.common.keys import Keys\n\n# exceptions\n# from selenium.common.exceptions import [TheNameOfTheExceptionClass]\n\n##\n# ActionChains are a way to automate low level interactions such as mouse movements, \n# mouse button actions, key press, and context menu interactions. \n# This is useful for doing more complex actions like hover over and drag and drop.\n##\nfrom selenium.webdriver.common.action_chains import ActionChains\n# chain pattern\n\n# menu = driver.find_element_by_css_selector(\".nav\")\n# hidden_submenu = driver.find_element_by_css_selector(\".nav #submenu1\")\n# ActionChains(driver).move_to_element(menu).click(hidden_submenu).perform()\n\n# queue pattern\n# menu = driver.find_element_by_css_selector(\".nav\")\n# hidden_submenu = driver.find_element_by_css_selector(\".nav #submenu1\")\n# actions = ActionChains(driver)\n# actions.move_to_element(menu)\n# actions.click(hidden_submenu)\n# actions.perform()\n\n##\n# Use this class to interact with alert prompts. \n# It contains methods for dismissing, accepting, \n# inputting, and getting text from alert prompts.\n##\nfrom selenium.webdriver.common.alert import Alert\n\n##\n# utils\n##\n# selenium.webdriver.common.utils.find_connectable_ip\n# selenium.webdriver.common.utils.free_port\n# selenium.webdriver.common.utils.join_host_port\n# selenium.webdriver.common.utils.keys_to_typing\n\n##\n# Color conversion support class\n##\nfrom selenium.webdriver.support.color import Color\n# print(Color.from_string('#00ff33').rgba)\n# print(Color.from_string('rgb(1, 255, 3)').hex)\n# print(Color.from_string('blue').rgba)"
},
{
"alpha_fraction": 0.6535778641700745,
"alphanum_fraction": 0.6590028405189514,
"avg_line_length": 40.191490173339844,
"blob_id": "cb5347aa88696eefb9871c0df20910a6229fec9b",
"content_id": "b3699760001ce5149812bf0b8cba224a13e3e1d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3899,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 94,
"path": "/ejob/ejob/spiders/rongyp_job_spider_test.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os, json, re\nfrom urllib.parse import quote\nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\nfrom ejob.items import JobItem\nfrom ejob.item_loaders import RongypJobItemLoader\n\ndef filter_job_url(url):\n\tm = re.search(r'openings_id=\\d+$', url)\n\tif not m:\n\t\treturn None\n\treturn url\n\ndef process_page_request(request):\n\treturn request\n\nclass RongypJobSpiderSpider(CrawlSpider):\n\tname = \"rongyp_job_spider_test\"\n\tallowed_domains = [\"rongyp.com\"]\n\tstart_urls = ['http://www.rongyp.com/']\n\turls = [('category', 'https://www.rongyp.com/index.php?m=Home&c=Job&a=jobSearch&tb_city=&tb_jobtype=&tb_jobtype_two=2132&tb_salary=&tb_workyear=&tb_degree=&tb_worknature=&dayscope=&keyword=&orderby=&company_size=')]\n\trules = [\n\t\tRule(LinkExtractor(tags=('a', ), restrict_xpaths=('//*[class=\"rightmember-page\"]', )), process_request=process_page_request, follow=True),\n\t\tRule(LinkExtractor(tags=('a', ), attrs=('href', ), unique=True, restrict_xpaths=('//*[@class=\"ryp-search-list\"]/*[@class=\"ryp-search-li\"]/p', ), process_value=filter_job_url), callback='parse_job'),\n\t]\n\t# query_str = '&'.join(['{}'.format(quote('city=全国'))])\n\n\tdef __init__(self, urlfile=None, *args, **kwargs):\n\t\t# scrapy crawl myspider -a category=electronics\n\t\tsuper(RongypJobSpiderSpider, self).__init__(*args, **kwargs)\n\t\tif urlfile:\n\t\t\twith open(urlfile, 'r', encoding='utf8') as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tline = line.strip()\n\t\t\t\t\tif not line:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tself.urls.append((item['name'], item['url']))\n\t\telse:\n\t\t\tprint('URL file is missed')\n\n\tdef start_requests(self):\n\t\tfor category_name, category_url in self.urls:\n\t\t\t# category_url = '?'.join([category_url, self.query_str])\n\t\t\trequest = scrapy.Request(category_url, dont_filter=False)\n\t\t\trequest.meta['dont_redirect'] = True\n\t\t\trequest.meta['category_name'] = category_name\n\t\t\tyield request\n\n\t# def preprocess_value(self, value):\n\t# \tm = re.search(r'openings_id=\\d+$', value)\n\t# \tprint(value, m)\n\t# \tif not m:\n\t# \t\treturn None\n\t# \treturn value\n\n\tdef preprocess_request(self, request):\n\t\t# request.replace(cookies={'index_location_city': '%E4%B8%8A%E6%B5%B7'})\n\t\t# request.replace(url='?'.join(request.url, self.query_str))\n\t\treturn request\n\n\tdef parse_job(self, response):\n\n\t\titem = JobItem()\n\t\tl = RongypJobItemLoader(item=JobItem(), response=response)\n\t\tinfo = response.xpath('//*[contains(@class, \"ryp-info\")]/*[contains(@class, \"ryp-mask\")]')\n\t\tl.add_value('position', info.xpath('h1/text()').extract_first())\n\t\tl.add_value('salary', info.xpath('h6/*[@class=\"salary\"]/text()').extract_first())\n\t\tt = ''.join(info.xpath('h6/text()').extract())\n\t\tt = [s.strip() for s in t.split('|') if s.strip()]\n\t\tif len(t) < 3:\n\t\t\tt += ['']*(3-len(t))\n\t\tl.add_value('jobtype', t[0])\n\t\tl.add_value('education', t[1])\n\t\tl.add_value('exprience', t[2])\n\t\tl.add_value('temptation', response.xpath('//*[contains(@class, \"ryp-weals\")]/a/text()').extract())\n\t\tl.add_value('rawpost', response.xpath('//*[contains(@class, \"ryp-detail-content\")]/p/text()').extract())\n\t\tcompany = response.xpath('//*[contains(@class, \"ryp-detail-right\")]//*[@class=\"company\"]')\n\t\tl.add_value('company_name', company.xpath('h3/a/text()').extract_first())\n\t\tcompany_brief = ''\n\t\tfor detail in company.xpath('*[@class=\"detail\"]'):\n\t\t\tdetail_name = ''.join(detail.xpath('text()').extract())\n\t\t\tdetail_value = detail.xpath('span/text()').extract_first(default='')\n\t\t\tif '区域' in detail_name:\n\t\t\t\tl.add_value('company_brief', '区域: {}'.format(detail_value))\n\t\t\telif '行业' in detail_name:\n\t\t\t\tl.add_value('company_brief', '行业: {}'.format(detail_value))\n\t\t\telif '规模' in detail_name:\n\t\t\t\tl.add_value('company_brief', '规模: {}'.format(detail_value))\n\t\t\n\t\tl.add_value('address', response.xpath('//*[contains(@class, \"ryp-map\")]//*[contains(@class, \"company-adress\")]/text()').extract_first())\n\t\tyield l.load_item()"
},
{
"alpha_fraction": 0.6861924529075623,
"alphanum_fraction": 0.6903765797615051,
"avg_line_length": 20.727272033691406,
"blob_id": "92a0691e55e351201f34ec5b09bec35f5d9172ae",
"content_id": "edeaca1c1f6d1f085be04e8da56615ae40701b65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 11,
"path": "/ejob/ejob/spiders/pinggu_catalog_spider.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass PingguCatalogSpiderSpider(scrapy.Spider):\n\tname = \"pinggu_catalog_spider\"\n\tallowed_domains = [\"pinggu.org\"]\n\tstart_urls = ['http://bbs.pinggu.org/z_rc.php']\n\n\tdef parse(self, response):\n\t\tpass\n"
},
{
"alpha_fraction": 0.6295387744903564,
"alphanum_fraction": 0.6354268789291382,
"avg_line_length": 28.550724029541016,
"blob_id": "a38a5a9a107371c3464baba3e1395d64a6b2c887",
"content_id": "ec392b01c086f27cecc782018332bc7ab471765d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2096,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 69,
"path": "/china_stars/stars/stars/spiders/starinfo_spier.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "import re, string, json, urllib\nimport scrapy\nfrom stars.items import StarInfoItem\n\nclass StarsTestSpider(scrapy.Spider):\n\tname = \"starinfo-spider\"\n\tstart_urls = [\n\t\t'http://datalib.ent.qq.com/star/2829/index.shtml',\n\t]\n\tstarinfo_fields = {\n\t\t'name':'姓名', \n\t\t'another_name':'原名', \n\t\t'gender':'性别', \n\t\t'english_name':'英文名', \n\t\t'birthyear':'出生年', \n\t\t'birthday':'生日', \n\t\t'constellation':'星座', \n\t\t'nationality':'国籍', \n\t\t'area':'地域', \n\t\t'profession':'职业', \n\t\t'height':'身高', \n\t\t'bloodtype':'血型'\n\t}\n\tstarinfo_url = 'http://datalib.ent.qq.com/star/%d/starinfo.shtml'\n\n\tdef start_requests(self):\n\t\tfor url in self.start_urls:\n\t\t\trequest = scrapy.Request(url, callback=self.parse_star)\n\t\t\trequest.meta['name'] = '张一山'\n\t\t\trequest.meta['id'] = int(url.split('/')[-2])\n\t\t\trequest.meta['capital'] = 'Z'\n\n\t\t\tyield request\n\n\tdef parse_star(self, response):\n\t\tstarinfo = StarInfoItem()\n\t\tstarinfo['starid'] = response.meta['id']\n\t\tstarinfo['capital'] = response.meta['capital']\n\t\tstarinfo['name'] = response.meta['name']\n\t\tstarinfo['url'] = response.url\n\t\tavatar_url = response.xpath('//div[@id=\"star_face\"]/a/img/@src').extract_first(default='')\n\t\tstarinfo['avatar'] = avatar_url\n\t\tstarinfo['album'] = []\n\t\timage_urls = [avatar_url]\n\t\tcount = 1\n\t\twhile True:\n\t\t\timgs = response.xpath('//*[@id=\"demo%d\"]//img/@src' % count).extract()\n\t\t\tcount += 1\n\t\t\tif not imgs:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\timage_urls += imgs\n\t\tstarinfo['image_urls'] = image_urls\n\t\t\n\t\txpath = '//div[@id=\"infos\"]//td[strong[contains(text(), \"{field}\")]]/text()'\n\t\tfor k, field in self.starinfo_fields.items():\n\t\t\tvalue = response.xpath(xpath.format(field=field)).extract_first()\n\t\t\tif value:\n\t\t\t\tstarinfo[k] = value.strip()\n\t\t\telse:\n\t\t\t\tstarinfo[k] = ''\n\n\t\tstarinfo_url = self.starinfo_url % starinfo['starid']\n\t\tbody = urllib.request.urlopen(starinfo_url).read().decode('gbk').encode('utf-8').decode('utf-8')\n\t\tr = response.replace(body=body)\n\t\txpath = '//div[@id=\"left\"]/table[2]//td[@class=\"line22\"]/text()'\n\t\tstarinfo['brief'] = r.xpath(xpath).extract_first('').strip()\n\n\t\tyield starinfo"
},
{
"alpha_fraction": 0.8549783825874329,
"alphanum_fraction": 0.8722943663597107,
"avg_line_length": 37.41666793823242,
"blob_id": "6d970b087782dd2420c0a381c2ba81324738bfcc",
"content_id": "111514b30c071b2e5f7e935978eb560c766b4cb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1202,
"license_type": "no_license",
"max_line_length": 356,
"num_lines": 12,
"path": "/scrapy_tor/readme.md",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "---\ntitle: 利用Tor来突破反爬虫机制\nauthor: Xiaowen Bin\ndate: 2017/01/27\ncategories: tor, scrapy\n---\n\n## 反爬虫\n\n互联网的众多资源是成本低廉的数据来源,使用爬虫可以方便的获取这些数据,尤其成熟的爬虫框架使得数据爬取可以通过简单的几行代码来搞定。数据爬取成本的低廉,为网站拥有者带来了灾难。许多网站以其自身内容作为业务的基石,如果有抄袭者大量窃取网站内容为己所用,则显然侵犯到原创网站的利益。此外部分恶意爬虫开发者,编写的爬虫在短时间内发起大量的网络请求,从而将网络拥塞,阻碍正常用户对网站的访问。为了避免这些灾难的发生,网站开发人员通常会为网站撑起一把保护伞,来保护网站免受恶意网络请求,在各种保护机制中,最常见的就是基于用户IP访问数量和访问频率的限制,当某个用户访问超过阀值后,该用户就会被认定为具有恶意访问嫌疑,可能会弹出验证码窗口让用户填写以验证是正常用户而为网络爬虫或者直接封锁用户IP在较短时间内不能继续访问网站。\n\n## 突破反爬虫\n\n"
},
{
"alpha_fraction": 0.592270016670227,
"alphanum_fraction": 0.6200326681137085,
"avg_line_length": 29.131147384643555,
"blob_id": "9c20890748eece3452ee33445e059589e2be0866",
"content_id": "944639a480e231f07b0a88b4645279df8a887137",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1837,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 61,
"path": "/scrapy_tor/test/renew_tor_connection.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport time\nimport urllib\n\nimport requests\nimport stem\nimport stem.connection\nfrom stem import Signal\nfrom stem.control import Controller\n\nPROXY_PORT = 8118 # privoxy proxy port\nTOR_CONTROL_PORT = 9151\nTOR_CONTROL_PASSWORD = '123456'\n\ndef create_http_session(proxy_port):\n s = requests.Session()\n s.proxies = {\n \"http\": \"http://127.0.0.1:%d\" % proxy_port\n }\n s.headers.update({\n 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n })\n \n return s\n\ndef query(session, url):\n # communicate with TOR via a local proxy (privoxy)\n r = session.get(url)\n return r.text\n\n# signal TOR for a new connection \ndef renew_tor_connection(control_port, password):\n with Controller.from_port(port=control_port) as controller:\n controller.authenticate(password=password)\n controller.signal(Signal.NEWNYM)\n controller.close()\n\nif __name__ == '__main__':\n interval = 2 # two seconds\n oldIP = \"0.0.0.0\"\n newIP = \"0.0.0.0\"\n http_session = create_http_session(PROXY_PORT)\n\n for i in range(7):\n renew_tor_connection(TOR_CONTROL_PORT, TOR_CONTROL_PASSWORD)\n if newIP != \"0.0.0.0\":\n oldIP = newIP\n newIP = query(http_session, \"http://icanhazip.com/\")\n\n seconds = 0\n # loop until the \"new\" IP address\n # is different than the \"old\" IP address,\n # as it may take the TOR network some\n # time to effect a different IP address \n while oldIP == newIP:\n time.sleep(interval)\n seconds += interval\n newIP = query(http_session, \"http://icanhazip.com/\")\n print (\"%d seconds elapsed awaiting a different IP address.\" % seconds)\n # new IP address\n print (\"newIP: %s\" % newIP)"
},
{
"alpha_fraction": 0.6646646857261658,
"alphanum_fraction": 0.6656656861305237,
"avg_line_length": 34.27058792114258,
"blob_id": "24a8c7e5aa1e5efc247c2ae923b85587e3b83e4b",
"content_id": "1962679bcfeafef37970244ff7ef99d21662b345",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2999,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 85,
"path": "/notification/notification/middlewares.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nfrom scrapy import signals\nfrom scrapy.http import HtmlResponse\nfrom selenium import webdriver\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\n\n\nclass NotificationSpiderMiddleware(object):\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the spider middleware does not modify the\n # passed objects.\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_spider_input(response, spider):\n # Called for each response that goes through the spider\n # middleware and into the spider.\n\n # Should return None or raise an exception.\n return None\n\n def process_spider_output(response, result, spider):\n # Called with the results returned from the Spider, after\n # it has processed the response.\n\n # Must return an iterable of Request, dict or Item objects.\n for i in result:\n yield i\n\n def process_spider_exception(response, exception, spider):\n # Called when a spider or process_spider_input() method\n # (from other spider middleware) raises an exception.\n\n # Should return either None or an iterable of Response, dict\n # or Item objects.\n pass\n\n def process_start_requests(start_requests, spider):\n # Called with the start requests of the spider, and works\n # similarly to the process_spider_output() method, except\n # that it doesn’t have a response associated.\n\n # Must return only requests (not items).\n for r in start_requests:\n yield r\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n\nclass PhantomjsMiddleware(object):\n def __init__(self, phantomjs_path=None, extra_script=None):\n if not phantomjs_path:\n raise Exception('phantomjs path should not be empty')\n self.script = None\n if extra_script:\n with open(extra_script, 'r', encoding='utf8') as f:\n self.script = f.read()\n self.driver = webdriver.PhantomJS(phantomjs_path)\n\n @classmethod\n def from_crawler(cls, crawler):\n phantomjs_path = crawler.settings.get('PHANTOMJS_PATH')\n extra_script = crawler.settings.get('EXTRA_SCRIPT')\n\n return cls(phantomjs_path, extra_script)\n\n def process_request(self, request, spider):\n self.driver.get(request.url)\n if self.script:\n self.driver.execute_script(self.script)\n body = self.driver.page_source.encode('utf8')\n response = HtmlResponse(url=self.driver.current_url, body=body)\n return response # end any process_request methods"
},
{
"alpha_fraction": 0.6464182138442993,
"alphanum_fraction": 0.6507744193077087,
"avg_line_length": 35.25438690185547,
"blob_id": "b175c41617dd868c418a1fb4ddca059e830e79f9",
"content_id": "6117fcd4daa8f59eeac4d799c826c9e1495fafa2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4132,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 114,
"path": "/scrapy_tor/example/example/middlewares.py",
"repo_name": "wikty/Spiders",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nimport random\n\nfrom scrapy.downloadermiddlewares.useragent import UserAgentMiddleware\nfrom stem import Signal\nfrom stem.control import Controller\nfrom scrapy.http import HtmlResponse\nfrom selenium import webdriver\n\nclass RandomUserAgentMiddleware(UserAgentMiddleware):\n def __init__(self, agents=[]):\n super(RandomUserAgentMiddleware, self).__init__()\n if not agents:\n agents = ['Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)']\n self.agents = agents\n\n @classmethod\n def from_crawler(cls, crawler):\n # instance of the current class\n ua_list = []\n with open(crawler.settings.get('USER_AGENT_LIST'), 'r') as f:\n ua_list = [ua.strip() for ua in f.readlines()]\n\n return cls(ua_list)\n\n def process_request(self, request, spider):\n ua = random.choice(self.agents)\n request.headers.setdefault('User-Agent', ua)\n\n\nclass ProxyMiddleware(object):\n def __init__(self, http_proxy=None, tor_control_port=None, tor_password=None):\n if not http_proxy:\n raise Exception('http proxy setting should not be empty')\n if not tor_control_port:\n raise Exception('tor control port setting should not be empty')\n if not tor_password:\n raise Exception('tor password setting should not be empty')\n self.http_proxy = http_proxy\n self.tor_control_port = tor_control_port\n self.tor_password = tor_password\n self.count = 1\n self.times = 50\n\n @classmethod\n def from_crawler(cls, crawler):\n http_proxy = crawler.settings.get('HTTP_PROXY')\n tor_control_port = crawler.settings.get('TOR_CONTROL_PORT')\n tor_password = crawler.settings.get('TOR_PASSWORD')\n\n return cls(http_proxy, tor_control_port, tor_password)\n\n def process_request(self, request, spider):\n self.count = (self.count+1) % self.times\n if not self.count:\n # access tor ControlPort to signal tor get a new IP\n with Controller.from_port(port=self.tor_control_port) as controller:\n controller.authenticate(password=self.tor_password)\n controller.signal(Signal.NEWNYM)\n \n # scrapy support http proxy\n #request.meta['proxy'] = self.http_proxy\n\n\nclass PhantomjsRequestMiddleware(object):\n def __init__(self, phantomjs_path=None):\n if not phantomjs_path:\n raise Exception('phantomjs path should not be empty')\n self.driver = webdriver.PhantomJS(phantomjs_path)\n\n @classmethod\n def from_crawler(cls, crawler):\n phantomjs_path = crawler.settings.get('PHANTOMJS_PATH')\n\n return cls(phantomjs_path)\n\n def process_request(self, request, spider):\n self.driver.get(request.url)\n body = self.driver.page_source.encode('utf8')\n response = HtmlResponse(url=self.driver.current_url, body=body)\n return response # end any process_request methods\n\n\nclass DynamicPageProxyRequestMiddleware(object):\n def __init__(self, phantomjs_path=None, proxy=None):\n if not phantomjs_path:\n raise Exception('phantomjs path should not be empty')\n if not proxy:\n raise Exception('proxy should not be empty')\n service_args = [\n '--proxy=%s' % proxy,\n '--proxy-type=http',\n '--ignore-ssl-errors=true',\n ]\n self.driver = webdriver.PhantomJS(phantomjs_path, service_args=service_args)\n\n @classmethod\n def from_crawler(cls, crawler):\n phantomjs_path = crawler.settings.get('PHANTOMJS_PATH')\n proxy = crawler.settings.get('HTTP_PROXY')\n\n return cls(phantomjs_path, proxy)\n\n def process_request(self, request, spider):\n self.driver.get(request.url)\n body = self.driver.page_source.encode('utf8')\n response = HtmlResponse(url=self.driver.current_url, body=body)\n return response # end any process_request methods"
}
] | 42 |
x1E0/flask-sign | https://github.com/x1E0/flask-sign | 2e65f2f08e561f328c1ddcc06362922d0aa1980c | 527efd94537544e3c79ddde558b60016fb7ee3db | d78543cb77b9d73c2e8165b5cf6a3327a6c42295 | refs/heads/main | 2023-07-25T09:49:46.023238 | 2021-03-23T23:28:54 | 2021-03-23T23:28:54 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7836734652519226,
"alphanum_fraction": 0.7836734652519226,
"avg_line_length": 80.66666412353516,
"blob_id": "1c49cb1e8771710ebbd46502b13d227430b857c1",
"content_id": "7384e086be1358e08aac8785a48d8c7fecc7a1e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 245,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 3,
"path": "/README.md",
"repo_name": "x1E0/flask-sign",
"src_encoding": "UTF-8",
"text": "# flask-sign\nPart of a script I wrote to encode a flask-cookie payload for a ctf chall, I'll add the signing part whenever I need it for another CTF\nIts pretty bad rn because I wrote it w/sleep deprivation so it might be more useful eventually.\n"
},
{
"alpha_fraction": 0.5681098103523254,
"alphanum_fraction": 0.5902851223945618,
"avg_line_length": 27.696969985961914,
"blob_id": "123cad68f6126fd0de5d6c8b9fbad23d9ff57e92",
"content_id": "080ad95c25d9058a8456a2529885ae49d91497b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1894,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 66,
"path": "/flask_sign.py",
"repo_name": "x1E0/flask-sign",
"src_encoding": "UTF-8",
"text": "import hashlib\nimport json\nimport zlib\nfrom base64 import *\nfrom itsdangerous import base64_decode, base64_encode\n\ndef decode(payload):\n compressed = False\n if payload[0] == \".\": \n compressed = True\n \n payload = payload[1:] \n data = payload.split(\".\")[0]\n data = base64_decode(data)\n \n if compressed:\n data = zlib.decompress(data)\n \n data = data.decode(\"utf-8\") \n return data\n\ndef hook(obj):\n if len(obj) != 1:\n return obj\n \n key, val = next(iter(obj.items())) \n # I only handled 1 situation\n if key == ' t':\n pass #return tuple(val)\n elif key == ' u':\n pass #return UUID(val)\n elif key == ' b':\n return b64decode(val)\n elif key == ' m':\n pass #return Markup(val)\n elif key == ' d':\n pass #return parse_date(val)\n \n return obj\n\ndef encode(cookie):\n \"\"\" kinda trash concatenation but wrote w/lots of sleep dep\"\"\"\n out = \"{\"\n for key, val in iter(cookie.items()): \n # {\"isinternal\":0,\"role\":{\" b\":\"Q1VTVE9NRVI=\"},\"username\":{\" b\":\"Z3Vlc3Q=\"}}\n if isinstance(val, bytes):\n encoded = b64encode(val)\n out += f'\"{key}\"' + \":{\" + f'\" b\":\"{encoded.decode()}\"' + '},'\n else:\n out += f'\"{key}\":{val},'\n out = out[:-1] + '}'\n return out\n\ncookie = {\"isinternal\": 1, \"role\": b\"CUSTOMER\", \"username\": b\"supporttest\"}\n#cookie = {\"isinternal\": 0, \"role\": b\"CUSTOMER\", \"username\": b\"guest\"}\n\npayload = \".eJyrVsoszswrSS3KS8xRsjLQUSrKz0lVsqpWUkhSslIKNAwLCXO19AsK87RVqtVRKi0GKcxFKIgyDstJNg4EStYCAGeGFq0.Ezp1rg.ZlRylTQ0tzAa0sSGe4eLn5FwGbI\"\ndata = decode(payload)\nprint(json.loads(data, object_hook=hook))\nencoded = encode(cookie).encode()\nprint(encoded)\ncompressed = zlib.compress(encoded)\nprint(compressed)\nc = base64_encode(compressed)\nflask_cookie_pt1 = json.dumps(c.decode())\nprint(flask_cookie_pt1)\n"
}
] | 2 |
lumpywizard/check_email_status | https://github.com/lumpywizard/check_email_status | cf9f027f4a3a5720d322ddde8332fe969f58e500 | 3a4c5dc42ada61325d5d9baad9e2b1b78084ee2f | 49a5e3bc915c49c1d7f9e0798fa0ce82a5c9a3c3 | refs/heads/master | 2020-12-25T14:22:42.815656 | 2016-09-08T14:26:25 | 2016-09-08T14:26:25 | 67,232,641 | 0 | 0 | null | 2016-09-02T15:11:28 | 2016-09-02T15:11:40 | 2016-09-08T14:26:26 | Python | [
{
"alpha_fraction": 0.6185064911842346,
"alphanum_fraction": 0.6195887327194214,
"avg_line_length": 23.3157901763916,
"blob_id": "eafb91a910a83a1d60b4d2b9b4bebee258f69b3d",
"content_id": "92808a57c9ac812aa64decc43818261ba6e0bc86",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1848,
"license_type": "permissive",
"max_line_length": 110,
"num_lines": 76,
"path": "/check_email_status/resolvers.py",
"repo_name": "lumpywizard/check_email_status",
"src_encoding": "UTF-8",
"text": "import DNS\n\nclass MXRecord:\n _domain = None\n _priority = None\n _exchange = None\n\n def __init__(self, priority=None, exchange=None, domain=None):\n self._priority = priority\n self._exchange = exchange\n self._domain = domain\n\n @property\n def priority(self):\n return self._priority\n\n @property\n def exchange(self):\n return self._exchange\n\n @property\n def domain(self):\n return self._domain\n\n\nclass MXResolver:\n \"\"\"\n Gets an array of MXRecords associated to the domain specified.\n\n :param domain:\n :return: [MXRecord]\n \"\"\"\n @staticmethod\n def get_mx_records(domain):\n return []\n\n\nclass PyDNSMXResolver(MXResolver):\n @staticmethod\n def get_mx_records(domain):\n \"\"\"\n Gets an array of MXRecords associated to the domain specified.\n\n :param domain:\n :return: [MXRecord]\n \"\"\"\n\n DNS.DiscoverNameServers()\n request = DNS.Request()\n response = request.req(name=domain, qtype=DNS.Type.MX)\n\n mx_records = []\n for answer in response.answers:\n mx_records.append(MXRecord(priority=answer['data'][0], exchange=answer['data'][1], domain=domain))\n\n return sorted(mx_records, key=lambda record: record.priority)\n\n\nclass DNSPythonMXResolver(MXResolver):\n @staticmethod\n def get_mx_records(domain):\n \"\"\"\n Gets an array of MXRecords associated to the domain specified.\n\n :param domain:\n :return: [MXRecord]\n \"\"\"\n import dns.resolver\n\n response = dns.resolver.query(domain, 'MX')\n\n mx_records = []\n for answer in response.answers:\n mx_records.append(MXRecord(priority=answer.preference, exchange=answer.exchange, domain=domain))\n\n return sorted(mx_records, key=lambda record: record.priority)\n"
},
{
"alpha_fraction": 0.5925304293632507,
"alphanum_fraction": 0.6042802929878235,
"avg_line_length": 32.09722137451172,
"blob_id": "ec22d4bf0a57278ccb9d6679645107281662d77a",
"content_id": "bed34c2648ef4fe6ec74dd020b99385d7c125dea",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2383,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 72,
"path": "/check_email_status/__init__.py",
"repo_name": "lumpywizard/check_email_status",
"src_encoding": "UTF-8",
"text": "import re\nimport smtplib\nimport socket\n\n\ndef check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None):\n \"\"\"\n Checks if an email might be valid by getting the status from the SMTP server.\n\n :param mx_resolver: MXResolver\n :param recipient_address: string\n :param sender_address: string\n :param smtp_timeout: integer\n :param helo_hostname: string\n :return: dict\n \"\"\"\n domain = recipient_address[recipient_address.find('@') + 1:]\n if helo_hostname is None:\n helo_hostname = domain\n\n ret = {'status': 101, 'extended_status': None, 'message': \"The server is unable to connect.\"}\n\n records = []\n try:\n records = mx_resolver.get_mx_records(helo_hostname)\n except socket.gaierror:\n ret['status'] = 512\n ret['extended_status'] = \"5.1.2 Domain name address resolution failed in MX lookup.\"\n\n smtp = smtplib.SMTP(timeout=smtp_timeout)\n\n for mx in records:\n try:\n connection_status, connection_message = smtp.connect(mx.exchange)\n if connection_status == 220:\n smtp.helo(domain)\n smtp.mail(sender_address)\n status, message = smtp.rcpt(recipient_address)\n ret['status'] = status\n\n pattern = re.compile('(\\d+\\.\\d+\\.\\d+)')\n matches = re.match(pattern, message)\n if matches:\n ret['extended_status'] = matches.group(1)\n\n ret['message'] = message\n smtp.quit()\n break\n except smtplib.SMTPConnectError:\n ret['status'] = 111\n ret['message'] = \"Connection refused or unable to open an SMTP stream.\"\n except smtplib.SMTPServerDisconnected:\n ret['status'] = 111\n ret['extended_status'] = \"SMTP Server disconnected\"\n except socket.gaierror:\n ret['status'] = 512\n ret['extended_status'] = \"5.1.2 Domain name address resolution failed.\"\n\n return ret\n\n\nif __name__ == \"__main__\":\n try:\n console_input = raw_input\n except NameError:\n console_input = input\n\n recipient_email = console_input(\"Recipient Email: \")\n sender_email = console_input(\"Sender Email: \")\n from resolvers import PyDNSMXResolver\n\n print(check_email_status(PyDNSMXResolver, recipient_email, sender_email))\n"
},
{
"alpha_fraction": 0.6914893388748169,
"alphanum_fraction": 0.6978723406791687,
"avg_line_length": 35.11538314819336,
"blob_id": "dc5d59955e79777442b3e4b14253e49671e6c5fa",
"content_id": "a1c18c5f21275e1eb3bc796e2feffe5a26ef0fcc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 940,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 26,
"path": "/README.rst",
"repo_name": "lumpywizard/check_email_status",
"src_encoding": "UTF-8",
"text": "check_email_status\n=======================\n\nThis package exists to help poll smtp servers to see if a mailbox address is valid.\nIt doesn't return true or false, but a set of status codes, extended status codes,\nand full messages from the responding server.\n\nUsage\n======================\n\nHere is an example using pyDNS as the MX resolver::\n\n from check_email_status import check_email_status\n from check_email_status.resolvers import PyDNSMXResolver\n\n status = check_email_status(PyDNSMXResolver, '[email protected]', '[email protected]')\n\n # This is the SMTP status code returned by the server. (e.g.) 550\n print status['status']\n\n # This is the extended status code, for instance 5.1.1 would mean the account doesn't exist.\n # If an extended code is not sent, this is not set.\n print status['extended_status']\n\n # This is the message returned from the mail server describing the results of the query.\n print status['messsage']\n\n"
}
] | 3 |
thoklei/bigdatachallenge | https://github.com/thoklei/bigdatachallenge | d8a334eef7917d426e1d9575e94811e6c878a86e | 878bfbea5058a767c7894dc9f62b9564a82ee75a | ab97e5861164d4d328341e2fcb824be60e503edf | refs/heads/master | 2021-10-26T00:00:44.593167 | 2019-04-08T16:50:18 | 2019-04-08T16:50:18 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6846038699150085,
"alphanum_fraction": 0.7126308083534241,
"avg_line_length": 38.955223083496094,
"blob_id": "ee8a41429075e5b95736b6c7368f13674c543e60",
"content_id": "52e81ac97115daac472e3b459cf08ea332cedead",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2676,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 67,
"path": "/models.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport tensorflow as tf \nimport tensorflow.keras.layers as layers \nfrom AutoconLayer import AutoconLayer\n\ndef get_bartimaeus(sequence_length, rec_units, drop1, dense_units, drop2):\n model = tf.keras.Sequential()\n model.add(layers.LSTM(rec_units, input_shape=[sequence_length,19]))\n model.add(layers.Dropout(drop1))\n model.add(layers.Dense(dense_units, activation='tanh', kernel_initializer=tf.keras.initializers.lecun_normal()))\n model.add(layers.Dropout(drop2))\n model.add(layers.Dense(22, activation='softmax'))\n return model\n\ndef get_rnn(sequence_length, rec_units, drop1, dense_units, drop2):\n model = tf.keras.Sequential()\n model.add(layers.SimpleRNN(rec_units, input_shape=[sequence_length, 19]))\n model.add(layers.Dropout(drop1))\n model.add(layers.Dense(dense_units, activation='tanh', kernel_initializer=tf.keras.initializers.lecun_normal()))\n model.add(layers.Dropout(drop2))\n model.add(layers.Dense(22, activation='softmax'))\n return model\n\ndef get_dwarf(sequence_length, rec_units, drop):\n model = tf.keras.Sequential()\n model.add(layers.LSTM(rec_units, input_shape=[sequence_length,19]))\n model.add(layers.Dropout(drop))\n model.add(layers.Dense(22, activation='softmax'))\n return model\n\n\ndef get_nathanael(sequence_length):\n model = tf.keras.Sequential()\n model.add(layers.LSTM(60, input_shape=[sequence_length,19]))\n model.add(layers.Dropout(0.5))\n #model.add(LSTM(50))\n model.add(layers.Dense(32, activation='tanh'))\n #model.add(layers.Dropout(0.8))\n model.add(layers.Dense(22, activation='softmax'))\n return model\n\ndef get_ptolemaeus(sequence_length):\n model = tf.keras.Sequential()\n model.add(layers.LSTM(60, input_shape=[sequence_length,19]))\n model.add(layers.Dropout(0.8))\n #model.add(LSTM(50))\n model.add(layers.Dense(32, activation='tanh'))\n model.add(layers.Dropout(0.8))\n model.add(layers.Dense(22, activation='softmax'))\n return model\n\ndef get_grindelwald(sequence_length):\n model = tf.keras.Sequential()\n model.add(layers.LSTM(80, input_shape=[sequence_length, 19]))\n model.add(layers.Dropout(0.2))\n model.add(layers.Dense(64, activation='relu', kernel_initializer=tf.keras.initializers.he_normal()))\n model.add(layers.Dense(22, activation='softmax'))\n return model\n\ndef get_autoconceptor(sequence_length):\n model = tf.keras.Sequential()\n model.add(AutoconLayer(output_dim=50, alpha=200, lam=0.001, batchsize=32, layer_norm=True, reuse=None)) \n model.add(layers.Dense(32, activation='tanh'))\n #model.add(layers.Dropout(0.8))\n model.add(layers.Dense(22, activation='softmax'))\n\n return model"
},
{
"alpha_fraction": 0.5983260869979858,
"alphanum_fraction": 0.6123554706573486,
"avg_line_length": 33.89285659790039,
"blob_id": "1e884ee898be529c707c4c6083378dd9be2dea0c",
"content_id": "98964726fda4a9bb7fa77ac97343df9777e8b9b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16608,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 476,
"path": "/utils.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "import sys\nimport os\nimport csv\nimport itertools\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom scipy import signal\nfrom scipy import ndimage\nfrom multiprocessing import Pool\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\nclasses = ['run', 'walk', 'stand', 'sit', 'sit-to-stand', 'stand-to-sit', \n 'stair-up', 'stair-down', 'jump-one-leg', 'jump-two-leg', 'curve-left-step', \n 'curve-right-step', 'curve-left-spin-Lfirst', 'curve-left-spin-Rfirst', \n 'curve-right-spin-Lfirst', 'curve-right-spin-Rfirst', 'lateral-shuffle-left', \n 'lateral-shuffle-right','v-cut-left-Lfirst', 'v-cut-left-Rfirst', 'v-cut-right-Lfirst', 'v-cut-right-Rfirst']\n\nsensors = ['EMG1', 'EMG2', 'EMG3', 'EMG4', 'Microphone', 'ACC upper X', 'ACC upper Y','ACC upper Z', 'Goniometer X',\n 'ACC lower X', 'ACC lower Y', 'ACC lower Z', 'Goniometer Y', 'Gyro upper X', 'Gyro upper Y', 'Gyro upper Z',\n 'Gyro lower X', 'Gyro lower Y', 'Gyro lower Z']\n\nvariance_sensors = ['EMG1', 'EMG2', 'EMG3', 'EMG4', 'Microphone']\n\nsmooth_sensors = ['ACC upper X', 'ACC upper Y','ACC upper Z', 'Goniometer X','ACC lower X', 'ACC lower Y', \n 'ACC lower Z', 'Goniometer Y', 'Gyro upper X', 'Gyro upper Y', 'Gyro upper Z', 'Gyro lower X', \n 'Gyro lower Y', 'Gyro lower Z']\n\ndata_path = \"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bbdc_2019_Bewegungsdaten/\"\n\n\n# --------- utility functions ---------- #\n\ndef smooth(data, windowsize, std):\n \"\"\"\n Smoothes a 1d-data array with a gaussian of given size and sigma\n \"\"\"\n kernel = signal.gaussian(windowsize, std=std)\n kernel /= np.sum(kernel)\n return np.convolve(data, kernel, 'valid')\n\ndef variance_filter(data, windowsize):\n \"\"\"\n Calculates the local variance of a signal by evaluating a sliding window.\n \"\"\"\n half = windowsize//2\n res = np.zeros(data.shape[0]-windowsize)\n for i in range(half,len(data)-half):\n res[i-half] = np.std(data[i-half:i+half])\n return res\n\ndef sample(data, num_samples):\n \"\"\"\n Samples a 1d-signal num_samples times.\n \"\"\"\n samples = [int(sample) for sample in np.linspace(0, data.shape[0]-1, num_samples)]\n return data[samples]\n \n\ndef recurrent_feature_extractor(data, num_samples):\n \"\"\"\n Extracts features from a 19-dimensional sequence.\n\n data = 2d-numpy array of shape [timesteps, sensors]\n num_samples = how many samples to extract\n \"\"\"\n\n def smooth_extractor(data, num_samples):\n \"\"\"\n Samples a signal after smoothing it.\n\n data = 1d-numpy array of length timestep\n num_samples = how many samples to extract\n \"\"\"\n smoothed = smooth(data,500,200)\n sstd = np.std(smoothed)\n if sstd == 0:\n sstd = 0.00001\n smoothed = (smoothed - np.mean(smoothed))/sstd\n return sample(smoothed, num_samples)\n \n def variance_extractor(data, num_samples):\n \"\"\"\n Samples the local variance of a signal.\n\n data = 1d-numpy array of length timesteps\n num_samples = how many samples to extract\n \"\"\"\n var_data = smooth(variance_filter(data,windowsize=200), windowsize=5, std=0.8)\n vstd = np.std(var_data)\n if vstd == 0:\n vstd = 0.00001\n var_data = (var_data - np.mean(var_data))/vstd\n return sample(var_data, num_samples)\n\n\n features = []\n \n for sensor in variance_sensors:\n features.append(variance_extractor(data[:,sensors.index(sensor)], num_samples))\n \n if(np.isnan(np.array(features)).any()):\n raise ValueError(\"Error in variance\")\n \n for sensor in smooth_sensors:\n features.append(smooth_extractor(data[:,sensors.index(sensor)], num_samples))\n \n if(np.isnan(np.array(features)).any()):\n raise ValueError(\"Error in smooth\")\n \n return features\n\ndef threaded_recurrent_feature_extractor(data, num_samples):\n \"\"\"\n data = 2d-numpy array of shape [timesteps, sensors]\n \n \"\"\"\n pool = ThreadPool(8)\n \n variance_sequences = []\n smooth_sequences = []\n \n for sensor in variance_sensors:\n variance_sequences.append(data[:,sensors.index(sensor)])\n \n for sensor in smooth_sensors:\n smooth_sequences.append(data[:,sensors.index(sensor)])\n \n var_results = pool.starmap(variance_extractor, zip(variance_sequences, itertools.repeat(num_samples)))\n \n if(np.isnan(np.array(var_results)).any()):\n raise ValueError(\"NaN after variance feature extraction\")\n \n smo_results = pool.starmap(smooth_extractor, zip(smooth_sequences, itertools.repeat(num_samples)))\n \n if(np.isnan(np.array(smo_results)).any()):\n raise ValueError(\"NaN after smoothing variance extraction\")\n \n pool.close()\n pool.join()\n \n return var_results + smo_results\n\n\n\ndef old_feature_extractor(data, num_samples):\n \"\"\"\n I wrote a new version of this, but apparently the extracted features were worse, so...\n \"\"\"\n\n def old_variance_extractor(data, num_samples):\n \"\"\"\n Samples the local variance of a signal.\n Differences: variance-data is smoothed, and it is not normalized to the mean, only divided by the max\n\n data = 1d-numpy array of length timesteps\n num_samples = how many samples to extract\n \"\"\"\n var_data = smooth(variance_filter(data,windowsize=100), windowsize=100, std=25)\n vmax = np.max(var_data)\n if(vmax == 0):\n vmax = 0.00001\n var_data = var_data/vmax\n return sample(var_data, num_samples)\n\n def old_smooth_extractor(data, num_samples):\n \"\"\"\n Samples a signal after smoothing it.\n\n data = 1d-numpy array of length timestep\n num_samples = how many samples to extract\n \"\"\"\n smoothed = smooth(data,200,50)\n smax = np.max(smoothed)\n if smax == 0:\n smax = 0.00001\n normalized = smoothed/smax\n return sample(normalized, num_samples)\n\n features = []\n \n for sensor in variance_sensors:\n features.append(old_variance_extractor(data[:,sensors.index(sensor)], num_samples))\n \n if(np.isnan(np.array(features)).any()):\n raise ValueError(\"Error in variance\")\n \n for sensor in smooth_sensors:\n features.append(old_smooth_extractor(data[:,sensors.index(sensor)], num_samples))\n \n if(np.isnan(np.array(features)).any()):\n raise ValueError(\"Error in smooth\")\n \n return features\n\ndef split_dataset(file, train_name, test_name, percentage=10):\n \"\"\"\n Splits the file that contains the original dataset in two files, one for training and one for testing.\n \n file = the original file\n \"\"\"\n df = pd.read_csv(file)\n headers = list(df)\n files = df.values\n \n indices = np.random.randint(low=0, high=files.shape[0], size=files.shape[0]//percentage)\n \n testset = np.take(files, indices, axis=0)\n files = np.delete(files, indices, axis=0)\n \n odf = pd.DataFrame(files)\n odf.columns = headers\n odf.to_csv(train_name+\".csv\")\n \n tdf = pd.DataFrame(testset)\n tdf.columns = headers\n tdf.to_csv(test_name+\".csv\")\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef serialize_example(featurelist, label, id, subject):\n \"\"\"\n Creates a tf.Example message from the list of features and the label, where\n every element in the featurelist is actually a sequence=ndarray\n \"\"\"\n\n feature = {}\n for i in range(len(featurelist)):\n feature['feature'+str(i)] = tf.train.Feature(float_list=tf.train.FloatList(value=list(featurelist[i])))\n #_float_feature(featurelist[i])\n feature['label'] = _int64_feature(label)\n feature['subject'] = _int64_feature(subject)\n feature['id'] = _int64_feature(id)\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n\ndef dataset_creator(in_file, outfile, feature_extractor, *args):\n \"\"\"\n Creates a dataset (i.e. outfile.tfrecords) from in_file.csv\n \"\"\"\n df = pd.read_csv(in_file)\n id = 0\n with tf.python_io.TFRecordWriter(outfile+\".tfrecords\") as writer:\n \n for index, row in df.iterrows():\n if(index % 100 == 0):\n print(\"Digesting\",row['Datafile'])\n if(row['Label'] in classes):\n path = row['Datafile']\n data = pd.read_csv(data_path+path).values\n\n label = classes.index(row['Label'])\n subject = int(row['Subject'][-2:])\n extracted_featurelist = feature_extractor(data, args[0])\n\n serialized_example = serialize_example(extracted_featurelist, label, id, subject)\n id = id + 1\n writer.write(serialized_example)\n else:\n print(row['Label'],\"not in known classes!\")\n\n\ndef challenge_dataset_creator(in_file, outfile, feature_extractor, *args):\n \"\"\"\n Creates a dataset (i.e. outfile.tfrecords) from in_file.csv\n \"\"\"\n df = pd.read_csv(in_file)\n id = 0\n with tf.python_io.TFRecordWriter(outfile+\".tfrecords\") as writer:\n \n for index, row in df.iterrows():\n if(index % 100 == 0):\n print(\"Digesting\",row['Datafile'])\n path = row['Datafile']\n data = pd.read_csv(data_path+path).values\n\n subject = int(row['Subject'][-2:])\n extracted_featurelist = feature_extractor(data, args[0])\n\n serialized_example = serialize_example(extracted_featurelist, 0, id, subject)\n id = id + 1\n writer.write(serialized_example)\n\n\n\ndef read_recurrent_dataset(path,\n sequence_length, \n batchsize,\n filter_subjects=None, \n filter_ids=None,\n id_mode=None, \n sub_mode=None,\n training=True,):\n \"\"\"\n mode = whether to only yield elements that are in the lists or whether to ignore elements that are in the list\n \"\"\"\n if not id_mode is None and not id_mode in ['include', 'exclude']:\n raise ValueError(\"Mode unknwon: Has to be 'include' or 'exclude'\")\n\n if not sub_mode is None and not sub_mode in ['include', 'exclude']:\n raise ValueError(\"Mode unknwon: Has to be 'include' or 'exclude'\")\n\n if not filter_subjects is None:\n filter_subjects_tensor = tf.constant(filter_subjects, dtype=tf.int64)\n if not filter_ids is None:\n filter_ids_tensor = tf.constant(filter_ids, dtype=tf.int64)\n\n features = {}\n for i in range(19):\n features['feature'+str(i)] = tf.FixedLenFeature([sequence_length], tf.float32, default_value=np.zeros((sequence_length)))\n features['label'] = tf.FixedLenFeature([], tf.int64, default_value=0)\n features['subject'] = tf.FixedLenFeature([], tf.int64, default_value=0)\n features['id'] = tf.FixedLenFeature([], tf.int64, default_value=0)\n\n # global_means = tf.constant(np.load(\"global_means.npy\"), dtype=tf.float32)\n # global_vars = tf.constant(np.load(\"global_vars.npy\"), dtype=tf.float32)\n\n def _parse_function(example_proto):\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n data = []\n for i in range(19):\n data.append(parsed_features['feature'+str(i)])\n\n data = tf.reshape(data, (sequence_length,19))\n # # data 80, 19\n # data = data - global_means\n # data = data / global_vars\n\n return data, tf.one_hot(parsed_features['label'],22)\n\n def _filter_by_subjects(example_proto):\n\n parsed_features = tf.parse_single_example(example_proto, features)\n subject = parsed_features['subject']\n\n if(sub_mode == 'exclude'):\n #return not subject in filter_subjects\n return tf.logical_not(tf.reduce_any(tf.equal(subject,filter_subjects_tensor), axis=0))\n else:\n #return subject in filter_subjects\n return tf.reduce_any(tf.equal(subject,filter_subjects_tensor), axis=0)\n\n def _filter_by_ids(example_proto):\n\n parsed_features = tf.parse_single_example(example_proto, features)\n id = parsed_features['id']\n\n if(id_mode == 'exclude'):\n #return not id in filter_ids\n return tf.logical_not(tf.reduce_any(tf.equal(id,filter_ids_tensor), axis=0))\n else: \n # mode == include, return id in filter_ids\n return tf.reduce_any(tf.equal(id,filter_ids_tensor), axis=0)\n\n def _noise(example_proto):\n parsed_features = tf.parse_single_example(example_proto, features)\n\n data = []\n for i in range(19):\n data.append(parsed_features['feature'+str(i)])\n\n data = tf.reshape(data, (sequence_length,19))\n # data is of shape samples x sensors\n means, vars = tf.nn.moments(data, axes=[0])\n new_data = []\n for i in range(19):\n noise = tf.random.normal(shape=(sequence_length,1),\n mean=0.0,\n stddev=vars[i]*3.0,\n dtype=tf.float32)\n #print(\"noise:\",noise)\n #print(\"data:\",data[:,i])\n new_data.append(tf.reshape(data[:,i], [sequence_length, 1]) + noise)\n #print(\"result:\", tf.reshape(data[:,i], [sequence_length, 1]) + noise)\n\n data = tf.stack(new_data, axis=1)\n #print(data)\n\n return tf.reshape(data, [sequence_length, 19]), tf.one_hot(parsed_features['label'],22)\n\n dataset = tf.data.TFRecordDataset(path)\n\n if not filter_subjects is None:\n dataset = dataset.filter(_filter_by_subjects)\n if not filter_ids is None:\n dataset = dataset.filter(_filter_by_ids)\n \n if training:\n dataset=dataset.map(_noise)\n else:\n dataset = dataset.map(_parse_function)\n dataset.shuffle(1000)\n dataset = dataset.batch(batchsize, drop_remainder=training)\n #dataset = dataset.prefetch(1)\n dataset = dataset.repeat()\n return dataset \n\n\ndef get_partial_mean(data):\n return np.mean(data, axis=0), data.shape[0]\n \ndef get_partial_variance(data):\n return np.std(data, axis=0), data.shape[0]\n\n\ndef global_info(directory):\n\n from pathlib import Path\n\n pathlist = Path(directory).glob('**/*.csv')\n meanlist = []\n varlist = []\n weightlist = []\n \n for filename in pathlist:\n if not \"challenge.csv\" in str(filename) and not \"train.csv\" in str(filename): \n data = pd.read_csv(filename).values\n meanlist.append(np.mean(data, axis=0))\n weightlist.append(data.shape[0])\n varlist.append(np.std(data, axis=0))\n\n means = np.array(meanlist)\n vars = np.array(varlist)\n weights = np.array(weightlist) / np.sum(weightlist)\n\n weighted_means = (means.T * weights).T \n weighted_vars = (vars.T * weights).T \n\n print(weighted_means.shape)\n print(weighted_vars.shape)\n\n np.save(\"global_means.npy\",np.sum(weighted_means, axis=0))\n np.save(\"global_vars.npy\",np.sum(weighted_vars, axis=0))\n\n\n\nif __name__ == \"__main__\":\n #global_info(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bbdc_2019_Bewegungsdaten/\")\n\n dataset_creator(data_path+\"train.csv\",\n \"./data/sparse/rawdata\", recurrent_feature_extractor, 80)\n\n challenge_dataset_creator(data_path+\"challenge.csv\",\n \"./data/sparse/rawchallenge\", recurrent_feature_extractor, 80)\n# tf.enable_eager_execution()\n\n# np.random.seed(42)\n# #indices = np.random.randint(0, 6384, 638)\n# indices = np.arange(0,1)\n# print(indices)\n# ds = read_recurrent_dataset(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords\", 30, 1, filter_ids=indices, mode='include', training=True)\n\n# res = ds.take(1)\n# for r in res:\n# print(r)\n\n# ds = read_recurrent_dataset(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords\", 30, 1, filter_ids=indices, mode='include', training=True)\n\n# res = ds.take(1)\n# for r in res:\n# print(r)"
},
{
"alpha_fraction": 0.6416009664535522,
"alphanum_fraction": 0.6579745411872864,
"avg_line_length": 46.14285659790039,
"blob_id": "8f0881b3ac6d83aa19a542a54a76aaa72c4661f3",
"content_id": "93d04fbb24c109eca9b05d1448ae62097d272021",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1649,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 35,
"path": "/abgabe.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport tensorflow as tf \nimport pandas as pd\nfrom utils import read_recurrent_dataset\nfrom tensorflow.keras import layers \n\nname = \"shorty_mit0806_full\"\nsequence_length = 30\nclasses = ['run', 'walk', 'stand', 'sit', 'sit-to-stand', 'stand-to-sit', \n 'stair-up', 'stair-down', 'jump-one-leg', 'jump-two-leg', 'curve-left-step', \n 'curve-right-step', 'curve-left-spin-Lfirst', 'curve-left-spin-Rfirst', \n 'curve-right-spin-Lfirst', 'curve-right-spin-Rfirst', 'lateral-shuffle-left', \n 'lateral-shuffle-right','v-cut-left-Lfirst', 'v-cut-left-Rfirst', 'v-cut-right-Lfirst', 'v-cut-right-Rfirst']\n\ndataset = read_recurrent_dataset(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawchallenge.tfrecords\",\n sequence_length, \n 1,\n filter_subjects=None, \n filter_ids=None,\n mode=None, \n training=False)\n\n\nmodel = tf.keras.models.load_model(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/models/model_archive/new_shit/\"+name+\"/\"+name+\".h5\")\n\nresults = model.predict(dataset, steps=1738)#should be of size examples,22\n#print(results)\npredictions = np.argmax(results, axis=1) # should be of shape num_samples\n#print(predictions)\ndf = pd.read_csv(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/challenge.csv\")\npredicted_labels = [classes[int(x)] for x in predictions]\n#for index, row in df.iterrows():\ndf['Label'] = predicted_labels\n\ndf.to_csv(\"abgabe_\"+name+\".csv\", index=False)"
},
{
"alpha_fraction": 0.8157894611358643,
"alphanum_fraction": 0.8157894611358643,
"avg_line_length": 37,
"blob_id": "a9bf373ffc8eb0165e6d2472d11e23d393ecbeba",
"content_id": "6ca077338a4f998d17b1b88defb7036ac0a31ade",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 114,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 3,
"path": "/README.md",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "# bigdatachallenge\n\nCode for the Bremen Big Data Challenge, where I used LSTMs to classify sequential sensor data\n"
},
{
"alpha_fraction": 0.6044828295707703,
"alphanum_fraction": 0.6121397614479065,
"avg_line_length": 36.41145706176758,
"blob_id": "ab85344db4bf78d63fdd241e77caa815d473585f",
"content_id": "90aa54522f693be7c78c4dd2a8baae7546a38b68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7183,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 192,
"path": "/autoconceptor.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "\"\"\"\nThe Autoconceptor, adapted from Jaeger 2017, and the DynStateTuple that\nis used to store the conceptor matrix.\n\"\"\"\n\nimport numpy as np\nimport collections\nimport tensorflow as tf\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.contrib.layers.python.layers import layers\nfrom tensorflow.python.layers import base as base_layer\n\n# following the desing of LSTM state tuples\n_DynStateTuple = collections.namedtuple(\"DynStateTyple\", [\"C\", \"h\"])\n\nclass DynStateTuple(_DynStateTuple):\n \"\"\"Tuple used by RNN Models with conceptor matrices.\n\n Stores two elements: `(C, h)` in that order\n where C is the conceptor matrix\n and h is the state of the RNN\n\n adapted from LSTMStateTuple in tensorflow/python/obs/rnn_cell_impl.py\n \"\"\"\n\n __slots__ = ()\n\n @property\n def dtype(self):\n (C, h) = self\n if C.dtype != h.dtype:\n raise TypeError(\"Matrix and internal state should agree on type: %s vs %s\" %\n (str(C.dtype), str(h.dtype)))\n return C.dtype\n\n\nclass Autoconceptor(tf.nn.rnn_cell.BasicRNNCell):\n \"\"\"\n Autoconceptor, adapted from Jaeger 2017\n \"\"\"\n\n def __init__(self, num_units, alpha, lam, batchsize, \n activation=tf.nn.tanh, reuse=None, layer_norm=False, dtype=tf.float32, \n initializer=None):\n \"\"\"\n Args:\n num_units = hidden state size of RNN cell\n alpha = alpha for autoconceptor, used to calculate aperture as alpha**-2\n lam = lambda for autoconceptor, scales conceptor-matrix\n batchsize = number of training examples per batch (we need this to allocate memory properly)\n activation = which nonlinearity to use (tanh works best, relu only with layer norm)\n reuse = whether to reuse variables, just leave this as None\n layer_norm = whether to apply layer normalization, not necessary if using tanh\n initializer = which initializer to use for the weight matrix, good idea is to use init_ops.constant_initializer(0.05 * np.identity(num_units))\n \"\"\"\n super(Autoconceptor, self).__init__(num_units=num_units, activation=activation, reuse=reuse)\n self.num_units = num_units\n self.c_lambda = tf.constant(lam, name=\"lambda\")\n self.batchsize = batchsize\n self.conceptor_built = False\n self.layer_norm = layer_norm\n self._activation = activation\n self.aperture_fact = tf.constant(alpha**(-2), name=\"aperture\")\n self._state_size = self.zero_state(batchsize, dtype)\n self.initializer = initializer or init_ops.constant_initializer(0.05 * np.identity(num_units))\n\n #no idea what this does, to be honest\n self.input_spec = base_layer.InputSpec(ndim=2)\n\n # these two properties are necessary to pass assert_like_rnn_cell test in static_rnn and dynamic_rnn\n @property\n def state_size(self):\n \n return self._state_size\n\n @property\n def output_size(self):\n return self.num_units\n\n def zero_state(self, batch_size, dtype):\n \"\"\"\n Returns the zero state for the autoconceptor cell.\n\n batch_size = the number of elements per batch\n dtype = the dtype to be used, stick with tf.float32\n\n The zero state is a DynStateTuple consisting of a C-matrix filled with zeros,\n shape [batchsize, num_units, num_units] and a zero-filled hidden state of\n shape [batchsize, num_units]\n \"\"\"\n return DynStateTuple(C=tf.zeros([batch_size, self.num_units, self.num_units], dtype=dtype),\n h=tf.zeros([batch_size, self.num_units], dtype=dtype))\n\n\n def build(self, inputs_shape):\n \"\"\"\n Builds the cell by defining variables. \n Overrides method from super-class.\n \"\"\"\n print(\"inputs shape at autoconceptor: \", inputs_shape) # None, 80, 19\n if inputs_shape[2] is None:\n raise ValueError(\"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % inputs_shape)\n input_dim = inputs_shape[2]\n\n self.W_in = self.add_variable(\n \"W_in\",\n shape=[input_dim, self.num_units],\n initializer=init_ops.random_normal_initializer(),\n dtype=self.dtype)\n\n self.b_in = self.add_variable(\n \"b_in\",\n shape=[self.num_units],\n initializer= init_ops.zeros_initializer(),\n dtype=self.dtype)\n\n self.W = self.add_variable(\n \"W\",\n shape=[self.num_units, self.num_units],\n initializer=self.initializer,\n dtype=self.dtype)\n\n \n #tf.get_variable(\"gamma\", shape=shape, initializer=gamma_init)\n # tf.get_variable(\"beta\", shape=shape, initializer=beta_init)\n\n self.built = True\n\n \n # def _norm(self, inp, scope=\"layer_norm\"):\n # \"\"\" \n # Performs layer normalization on the hidden state.\n\n # inp = the input to be normalized\n # scope = name for the variable scope, just leave as default\n \n # Returns inp normalized by learned parameters gamma and beta\n # \"\"\"\n # #shape = inp.get_shape()[-1:]\n # #gamma_init = init_ops.constant_initializer(1)\n # #beta_init = init_ops.constant_initializer(1)\n # #with tf.variable_scope(scope):\n # # tf.get_variable(\"gamma\", shape=shape, initializer=gamma_init)\n # # tf.get_variable(\"beta\", shape=shape, initializer=beta_init)\n # normalized = layers.layer_norm(inp)\n # return normalized\n\n\n def call(self, inputs, h):\n \"\"\"\n Performs one step of this Autoconceptor Cell.\n\n inputs = the input batch, shape [batchsize, input_dim]\n h = the DynStateTuple containing the preceding state\n\n Returns output, state\n where output = output at this time step\n state = new hidden state and C-matrix as DynStateTuple\n \"\"\"\n\n print(\"inputs in call, should be 32x19:\",inputs)\n \n C, state = h\n\n print(\"C in call, should be 32x50x50:\", C)\n print(\"State in call, should be 32x50, I guess:\", h)\n # so far, standard RNN logic\n state = self._activation(\n (tf.matmul(inputs, self.W_in) + self.b_in) + (tf.matmul(state, self.W))\n )\n\n # if layer norm is activated, normalize layer output as explained in Ba et al. 2016\n if(self.layer_norm):\n state = layers.layer_norm(state)#self._norm(state)\n \n state = tf.reshape(state, [-1, 1, self.num_units])\n\n \n\n # updating C following update rule presented by Jaeger\n C = C + self.c_lambda * ( tf.matmul(tf.transpose((state - tf.matmul(state, C)), [0,2,1]), state) - tf.scalar_mul(self.aperture_fact,C) )\n \n # multiplying state with C\n state = tf.matmul(state, C)\n\n # Reshapes necessary for std. matrix multiplication, where one matrix\n # for all elements in batch vs. fast-weights matrix -> different for every\n # element!\n state = tf.reshape(state, [-1, self.num_units])\n\n return state, DynStateTuple(C, state)\n"
},
{
"alpha_fraction": 0.6761434078216553,
"alphanum_fraction": 0.7012773156166077,
"avg_line_length": 38.16128921508789,
"blob_id": "6e439b6de8a3c00994be817510907f89371b0672",
"content_id": "95a74eb55eac40f640ea83ab61e015e6c52efb91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2427,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 62,
"path": "/lstm_network.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport tensorflow as tf \nfrom utils import *\nfrom tensorflow.keras import layers \nfrom models import *\n\nname = \"BigRandomAvoider\"\nsequence_length = 80\nbatchsize = 32\ndata = \"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords\"\nmodel_path = \"/Users/thomasklein/Projects/BremenBigDataChallenge2019/models/\"\narchive_path = model_path + \"model_archive/new_shit/\"\n\nmodus = 'train' \n\nmodel = get_bartimaeus(sequence_length, rec_units=128, drop1=0.6, dense_units=64, drop2=0.4)#get_dwarf(sequence_length, rec_units=19, drop=0.35)\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(0.001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nif modus == 'train':\n\n #np.random.seed(42)\n indices = np.random.randint(0, 6384, 638)\n avoided_subjects = [6,7]\n dataset = read_recurrent_dataset(data, sequence_length, batchsize, filter_ids=indices, filter_subjects=avoided_subjects, id_mode='exclude', sub_mode='exclude', training=True)\n validation_set = read_recurrent_dataset(data, sequence_length, batchsize, filter_ids=indices, filter_subjects=avoided_subjects, id_mode='include', sub_mode='exclude', training=False)\n\n# elif modus == 'subject_train':\n\n# indices = [18,19]\n# dataset = read_recurrent_dataset(data, sequence_length, batchsize, filter_subjects=indices, mode='exclude', training=True)\n# validation_set = read_recurrent_dataset(data, sequence_length, batchsize, filter_subjects=indices, mode='include', training=False)\n\n\n# elif modus == 'full':\n\n# dataset = read_recurrent_dataset(data, sequence_length, batchsize, training=True)\n# validation_set = read_recurrent_dataset(data, sequence_length, batchsize, training=False)\n\n\ncallbacks = [\n# Write TensorBoard logs to `./logs` directory\ntf.keras.callbacks.TensorBoard(log_dir=archive_path+name+\"_\"+modus),\ntf.keras.callbacks.ModelCheckpoint(filepath=archive_path+name+\"_\"+modus+\"/\"+name+\"_\"+modus+\".h5\",\n save_best_only=True,\n period=1)\n]\n\n\nmodel.fit(x=dataset, \n epochs=150,\n steps_per_epoch=6384//batchsize,\n validation_data=validation_set,\n validation_steps=638//batchsize,\n callbacks = callbacks)\n\n#tf.keras.models.save_model(model,archive_path+name+\"_\"+modus+\"/\"+name+\"_\"+modus+\".h5\",overwrite=False)\n\n\nprint(\"Mission accomplished.\")"
},
{
"alpha_fraction": 0.7032884955406189,
"alphanum_fraction": 0.73243647813797,
"avg_line_length": 33.33333206176758,
"blob_id": "5fc98f507f159dfa1fceaeb8af89695cc6906918",
"content_id": "2b42965a0ae9a8d3a440986046a01c6e00bdf98c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1338,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 39,
"path": "/network.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport tensorflow as tf \n\nfrom tfrecord_converter import read_dataset\nfrom tensorflow.keras import layers \n\nmodel = tf.keras.Sequential()\nmodel.add(layers.Dense(64, activation='tanh', input_shape=(30,))) #set value to shape-1\nmodel.add(layers.Dense(64, activation='tanh'))\nmodel.add(layers.Dense(22, activation='softmax'))\n\nmodel.compile(optimizer=tf.train.AdamOptimizer(0.001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\ndataset = read_dataset(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/cafeteria/minifeat.tfrecords\")\ntestset = read_dataset(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/cafeteria/minifeat_test.tfrecords\")\n#val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))\n#val_dataset = val_dataset.batch(32).repeat()\n\ncallbacks = [\n # Write TensorBoard logs to `./logs` directory\n tf.keras.callbacks.TensorBoard(log_dir='./cafeteria/logs/minifeat2')\n]\n\n\nmodel.fit(x=dataset, \n epochs=3000,\n steps_per_epoch=6384//32,\n validation_data=testset,\n validation_steps=100,\n callbacks = callbacks)\n\nmodel.save_weights('./cafeteria/weights/minifeatmodel2')\n\n#model.load_weights('./weights/my_model')\nresult = model.predict(dataset, steps=1)\n\nprint(result)"
},
{
"alpha_fraction": 0.6411411166191101,
"alphanum_fraction": 0.6606606841087341,
"avg_line_length": 31.656862258911133,
"blob_id": "7a3979a825dbd13d5bd1314c8cdabab038d09a86",
"content_id": "b86862f9b3950059ee5aa32a12a58ccc704c619f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3330,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 102,
"path": "/legacy/tfrecord_converter.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd \nimport tensorflow as tf \n\n\n#tf.enable_eager_execution()\n\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef convert_csv_to_tfrecords(file):\n csv = pd.read_csv(file).values\n\n def serialize_example(featurelist):\n \"\"\"\n Creates a tf.Example message ready to be written to a file.\n \"\"\"\n\n # Create a dictionary mapping the feature name to the type of list\n feature = {}\n for i in range(csv.shape[1]-1):\n feature['feature'+str(i)] = _float_feature(featurelist[i])\n feature['label'] = _int64_feature(int(featurelist[-1]))\n\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n\n with tf.python_io.TFRecordWriter(\"features.tfrecords\") as writer:\n for row in csv:\n example = serialize_example(row)\n writer.write(example)\n\n\ndef read_dataset(training_path):\n # set values to shape-1\n features = {}\n for i in range(30):\n features['feature'+str(i)] = tf.FixedLenFeature([], tf.float32, default_value=0)\n features['label'] = tf.FixedLenFeature([], tf.int64, default_value=0)\n\n def _parse_function(example_proto):\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n data = []\n for i in range(30):\n data.append(parsed_features['feature'+str(i)])\n\n return data, tf.one_hot(parsed_features['label'],22)\n\n\n dataset = tf.data.TFRecordDataset(training_path)\n\n dataset = dataset.map(_parse_function)\n dataset = dataset.shuffle(1000)\n dataset = dataset.batch(32, drop_remainder=True)\n dataset = dataset.prefetch(1)\n dataset = dataset.repeat()\n return dataset \n\ndef read_recurrent_dataset(path):\n sequence_length = 100\n features = {}\n for i in range(19):\n features['feature'+str(i)] = tf.FixedLenFeature([sequence_length], tf.float32, default_value=np.zeros((sequence_length)))\n features['label'] = tf.FixedLenFeature([], tf.int64, default_value=0)\n\n def _parse_function(example_proto):\n\n parsed_features = tf.parse_single_example(example_proto, features)\n\n data = []\n for i in range(19):\n data.append(parsed_features['feature'+str(i)])\n\n return tf.reshape(data, (sequence_length,19)), tf.one_hot(parsed_features['label'],22)\n\n\n dataset = tf.data.TFRecordDataset(path)\n\n dataset = dataset.map(_parse_function)\n dataset = dataset.shuffle(1000)\n dataset = dataset.batch(32, drop_remainder=True)\n dataset = dataset.prefetch(1)\n dataset = dataset.repeat()\n return dataset \n\nif __name__ == \"__main__\":\n print(tf.__version__)\n #convert_csv_to_tfrecords(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/smallfeatures.csv\")\n\n # tf.enable_eager_execution()\n # res = read_recurrent_dataset(\"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/recurrent/rec_features.tfrecords\")\n # for val in res.take(1):\n # print(val)"
},
{
"alpha_fraction": 0.6597034931182861,
"alphanum_fraction": 0.6880053877830505,
"avg_line_length": 34.33333206176758,
"blob_id": "8123f1d349287294722730fad9aad2db842b76c1",
"content_id": "626bd8a59fb1682b031eed6fa7d17df3062ec1a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1484,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 42,
"path": "/extend_training.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "\nimport numpy as np \nimport tensorflow as tf \nfrom utils import *\nfrom tensorflow.keras import layers \n\nname = \"big_ohne_r2\"\nsequence_length = 80\ndata = \"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords\"\nmodel_path = \"/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/models/\"\narchive_path = model_path + \"model_archive/\"\n\nmodel = tf.keras.models.load_model(archive_path+name+\"/\"+name+\".h5\")\n\n# model.compile(optimizer=tf.keras.optimizers.Adam(0.001),\n# loss='categorical_crossentropy',\n# metrics=['accuracy'])\n\nnp.random.seed(42)\nindices = np.random.randint(0, 6384, 638)\n\ndataset = read_recurrent_dataset(data, sequence_length, filter_ids=indices, mode='exclude')\nvalidation_set = read_recurrent_dataset(data, sequence_length, filter_ids=indices, mode='include')\n\ncallbacks = [\n # Write TensorBoard logs to `./logs` directory\n tf.keras.callbacks.TensorBoard(log_dir=archive_path+name),\n tf.keras.callbacks.ModelCheckpoint(filepath=model_path+\"checkpoints/\"+name+\".ckpt\",\n save_best_only=True,\n period=20)\n]\n\n\nmodel.fit(x=dataset, \n epochs=50,\n steps_per_epoch=6384//32,\n validation_data=validation_set,\n validation_steps=638//32,\n callbacks = callbacks)\n\ntf.keras.models.save_model(model,archive_path+name+\".h5\",overwrite=True)\n\nprint(\"Mission accomplished.\")"
},
{
"alpha_fraction": 0.66434645652771,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 35.97142791748047,
"blob_id": "b88b59c7da567a0113b30df61c72a2a41ad24819",
"content_id": "46fc7fb25df6a2f38176ed5c86670730c10d55a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1293,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 35,
"path": "/AutoconLayer.py",
"repo_name": "thoklei/bigdatachallenge",
"src_encoding": "UTF-8",
"text": "import collections\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow.keras.layers as layers \nfrom tensorflow.python.ops import init_ops\nfrom autoconceptor import Autoconceptor\n\nclass AutoconLayer(layers.RNN):\n\n def __init__(self, output_dim, alpha, lam, batchsize, activation=tf.nn.tanh, layer_norm=False, reuse=None, **kwargs):\n self.output_dim = output_dim\n self._cell = Autoconceptor(output_dim, alpha, lam, batchsize, \n activation=tf.nn.tanh, reuse=reuse, layer_norm=layer_norm, dtype=tf.float32, \n initializer=None)\n super(AutoconLayer, self).__init__(cell=self._cell, **kwargs)\n\n def build(self, input_shape):\n print(\"input shape:\", input_shape)\n # Make sure to call the `build` method at the end\n self._cell.build(input_shape)\n #super(AutoconLayer, self).build(input_shape)\n\n def compute_output_shape(self, input_shape):\n shape = tf.TensorShape(input_shape).as_list()\n shape[-1] = self.output_dim\n return tf.TensorShape(shape)\n\n def get_config(self):\n base_config = super(AutoconLayer, self).get_config()\n base_config['output_dim'] = self.output_dim\n return base_config\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)"
}
] | 10 |
Waaggy/solaredge_meterproxy | https://github.com/Waaggy/solaredge_meterproxy | f342326741884dcdf16e20bcf5ba160fa7f05400 | 9387b603b2cbfcb45e7168943ee5b764b6c9f2e8 | 1582b885b274135e8dcb383889028b7b25fd4040 | refs/heads/master | 2023-08-02T02:51:32.391326 | 2021-09-19T08:20:44 | 2021-09-19T08:20:44 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.47610822319984436,
"alphanum_fraction": 0.5002878308296204,
"avg_line_length": 25.72307777404785,
"blob_id": "7ea44a7e44b833d4ed7ef6e406fed82371ce7689",
"content_id": "1b3bf74593909dfc65e8e95d7d689313037b5b75",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1737,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 65,
"path": "/devices/generic.py",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "def device(config):\n return False\n\n\ndef values(device):\n if not device:\n return {}\n\n return {\n # \"energy_active\"\n # \"import_energy_active\"\n # \"power_active\"\n # \"p1_power_active\"\n # \"p2_power_active\"\n # \"p3_power_active\"\n # \"voltage_ln\"\n # \"p1n_voltage\"\n # \"p2n_voltage\"\n # \"p3n_voltage\"\n # \"voltage_ll\"\n # \"p12_voltage\"\n # \"p23_voltage\"\n # \"p31_voltage\"\n # \"frequency\"\n # \"p1_energy_active\"\n # \"p2_energy_active\"\n # \"p3_energy_active\"\n # \"p1_import_energy_active\"\n # \"p2_import_energy_active\"\n # \"p3_import_energy_active\"\n # \"export_energy_active\"\n # \"p1_export_energy_active\"\n # \"p2_export_energy_active\"\n # \"p3_export_energy_active\"\n # \"energy_reactive\"\n # \"p1_energy_reactive\"\n # \"p2_energy_reactive\"\n # \"p3_energy_reactive\"\n # \"energy_apparent\"\n # \"p1_energy_apparent\"\n # \"p2_energy_apparent\"\n # \"p3_energy_apparent\"\n # \"power_factor\"\n # \"p1_power_factor\"\n # \"p2_power_factor\"\n # \"p3_power_factor\"\n # \"power_reactive\"\n # \"p1_power_reactive\"\n # \"p2_power_reactive\"\n # \"p3_power_reactive\"\n # \"power_apparent\"\n # \"p1_power_apparent\"\n # \"p2_power_apparent\"\n # \"p3_power_apparent\"\n # \"p1_current\"\n # \"p2_current\"\n # \"p3_current\"\n # \"demand_power_active\"\n # \"minimum_demand_power_active\"\n # \"maximum_demand_power_active\"\n # \"demand_power_apparent\"\n # \"p1_demand_power_active\"\n # \"p2_demand_power_active\"\n # \"p3_demand_power_active\"\n }\n"
},
{
"alpha_fraction": 0.5495233535766602,
"alphanum_fraction": 0.6140896677970886,
"avg_line_length": 55.23954391479492,
"blob_id": "51ab3f4d5a065da976df5090781713ae7b842553",
"content_id": "ff970fdebad7faed19b3d36a42bdbb4db1a2e2da",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14791,
"license_type": "permissive",
"max_line_length": 156,
"num_lines": 263,
"path": "/semp-rtu.py",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport argparse\nimport configparser\nimport importlib\nimport logging\nimport sys\nimport threading\nimport time\n\nfrom pymodbus.server.sync import StartSerialServer\nfrom pymodbus.constants import Endian\nfrom pymodbus.device import ModbusDeviceIdentification\nfrom pymodbus.transaction import ModbusRtuFramer\nfrom pymodbus.datastore import ModbusSlaveContext\nfrom pymodbus.datastore import ModbusServerContext\nfrom pymodbus.payload import BinaryPayloadBuilder\n\n\ndef t_update(ctx, stop, module, device, refresh):\n\n this_t = threading.currentThread()\n logger = logging.getLogger()\n\n while not stop.is_set():\n try:\n values = module.values(device)\n\n if not values:\n logger.debug(f\"{this_t.name}: no new values\")\n continue\n\n block_1001 = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Little)\n block_1001.add_32bit_float(values.get(\"energy_active\", 0)) # total active energy\n block_1001.add_32bit_float(values.get(\"import_energy_active\", 0)) # imported active energy\n block_1001.add_32bit_float(values.get(\"energy_active\", 0)) # total active energy non-reset\n block_1001.add_32bit_float(values.get(\"import_energy_active\", 0)) # imported active energy non-reset\n block_1001.add_32bit_float(values.get(\"power_active\", 0)) # total power\n block_1001.add_32bit_float(values.get(\"p1_power_active\", 0)) # power l1\n block_1001.add_32bit_float(values.get(\"p2_power_active\", 0)) # power l2\n block_1001.add_32bit_float(values.get(\"p3_power_active\", 0)) # power l3\n block_1001.add_32bit_float(values.get(\"voltage_ln\", 0)) # l-n voltage\n block_1001.add_32bit_float(values.get(\"p1n_voltage\", 0)) # l1-n voltage\n block_1001.add_32bit_float(values.get(\"p2n_voltage\", 0)) # l2-n voltage\n block_1001.add_32bit_float(values.get(\"p3n_voltage\", 0)) # l3-n voltage\n block_1001.add_32bit_float(values.get(\"voltage_ll\", 0)) # l-l voltage\n block_1001.add_32bit_float(values.get(\"p12_voltage\", 0)) # l1-l2 voltage\n block_1001.add_32bit_float(values.get(\"p23_voltage\", 0)) # l2-l3 voltage\n block_1001.add_32bit_float(values.get(\"p31_voltage\", 0)) # l3-l1 voltage\n block_1001.add_32bit_float(values.get(\"frequency\", 0)) # line frequency\n ctx.setValues(3, 1000, block_1001.to_registers())\n\n block_1101 = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Little)\n block_1101.add_32bit_float(values.get(\"p1_energy_active\", 0)) # total active energy l1\n block_1101.add_32bit_float(values.get(\"p2_energy_active\", 0)) # total active energy l2\n block_1101.add_32bit_float(values.get(\"p3_energy_active\", 0)) # total active energy l3\n block_1101.add_32bit_float(values.get(\"p1_import_energy_active\", 0)) # imported active energy l1\n block_1101.add_32bit_float(values.get(\"p2_import_energy_active\", 0)) # imported active energy l2\n block_1101.add_32bit_float(values.get(\"p3_import_energy_active\", 0)) # imported active energy l3\n block_1101.add_32bit_float(values.get(\"export_energy_active\", 0)) # total exported active energy\n block_1101.add_32bit_float(values.get(\"export_energy_active\", 0)) # total exported active energy non-reset\n block_1101.add_32bit_float(values.get(\"p1_export_energy_active\", 0)) # exported energy l1\n block_1101.add_32bit_float(values.get(\"p2_export_energy_active\", 0)) # exported energy l2\n block_1101.add_32bit_float(values.get(\"p3_export_energy_active\", 0)) # exported energy l3\n block_1101.add_32bit_float(values.get(\"energy_reactive\", 0)) # total reactive energy\n block_1101.add_32bit_float(values.get(\"p1_energy_reactive\", 0)) # reactive energy l1\n block_1101.add_32bit_float(values.get(\"p2_energy_reactive\", 0)) # reactive energy l2\n block_1101.add_32bit_float(values.get(\"p3_energy_reactive\", 0)) # reactive energy l3\n block_1101.add_32bit_float(values.get(\"energy_apparent\", 0)) # total apparent energy\n block_1101.add_32bit_float(values.get(\"p1_energy_apparent\", 0)) # apparent energy l1\n block_1101.add_32bit_float(values.get(\"p2_energy_apparent\", 0)) # apparent energy l2\n block_1101.add_32bit_float(values.get(\"p3_energy_apparent\", 0)) # apparent energy l3\n block_1101.add_32bit_float(values.get(\"power_factor\", 0)) # power factor\n block_1101.add_32bit_float(values.get(\"p1_power_factor\", 0)) # power factor l1\n block_1101.add_32bit_float(values.get(\"p2_power_factor\", 0)) # power factor l2\n block_1101.add_32bit_float(values.get(\"p3_power_factor\", 0)) # power factor l3\n block_1101.add_32bit_float(values.get(\"power_reactive\", 0)) # total reactive power\n block_1101.add_32bit_float(values.get(\"p1_power_reactive\", 0)) # reactive power l1\n block_1101.add_32bit_float(values.get(\"p2_power_reactive\", 0)) # reactive power l2\n block_1101.add_32bit_float(values.get(\"p3_power_reactive\", 0)) # reactive power l3\n block_1101.add_32bit_float(values.get(\"power_apparent\", 0)) # total apparent power\n block_1101.add_32bit_float(values.get(\"p1_power_apparent\", 0)) # apparent power l1\n block_1101.add_32bit_float(values.get(\"p2_power_apparent\", 0)) # apparent power l2\n block_1101.add_32bit_float(values.get(\"p3_power_apparent\", 0)) # apparent power l3\n block_1101.add_32bit_float(values.get(\"p1_current\", 0)) # current l1\n block_1101.add_32bit_float(values.get(\"p2_current\", 0)) # current l2\n block_1101.add_32bit_float(values.get(\"p3_current\", 0)) # current l3\n block_1101.add_32bit_float(values.get(\"demand_power_active\", 0)) # demand power\n block_1101.add_32bit_float(values.get(\"minimum_demand_power_active\", 0)) # minimum demand power\n block_1101.add_32bit_float(values.get(\"maximum_demand_power_active\", 0)) # maximum demand power\n block_1101.add_32bit_float(values.get(\"demand_power_apparent\", 0)) # apparent demand power\n block_1101.add_32bit_float(values.get(\"p1_demand_power_active\", 0)) # demand power l1\n block_1101.add_32bit_float(values.get(\"p2_demand_power_active\", 0)) # demand power l2\n block_1101.add_32bit_float(values.get(\"p3_demand_power_active\", 0)) # demand power l3\n ctx.setValues(3, 1100, block_1101.to_registers())\n except Exception as e:\n logger.critical(f\"{this_t.name}: {e}\")\n finally:\n time.sleep(refresh)\n\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"-c\", \"--config\", type=str, default=\"semp-rtu.conf\")\n argparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", default=False)\n args = argparser.parse_args()\n\n default_config = {\n \"server\": {\n \"device\": \"/dev/ttyUSB0\",\n \"baud\": 9600,\n \"parity\": \"E\",\n \"timeout\": 1,\n \"log_level\": \"INFO\",\n \"meters\": \"\"\n },\n \"meters\": {\n \"dst_address\": 2,\n \"type\": \"generic\",\n \"ct_current\": 5,\n \"ct_inverted\": 0,\n \"phase_offset\": 120,\n \"serial_number\": 0x0D010556,\n \"refresh_rate\": 5\n }\n }\n\n confparser = configparser.ConfigParser()\n confparser.read(args.config)\n\n if not confparser.has_section(\"server\"):\n confparser[\"server\"] = default_config[\"server\"]\n\n log_handler = logging.StreamHandler(sys.stdout)\n log_handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s: %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"))\n\n logger = logging.getLogger()\n logger.setLevel(getattr(logging, confparser[\"server\"].get(\"log_level\", fallback=default_config[\"server\"][\"log_level\"]).upper()))\n logger.addHandler(log_handler)\n\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n\n slaves = {}\n threads = []\n thread_stops = []\n\n try:\n if confparser.has_option(\"server\", \"meters\"):\n meters = [m.strip() for m in confparser[\"server\"].get(\"meters\", fallback=default_config[\"server\"][\"meters\"]).split(',')]\n\n for meter in meters:\n address = confparser[meter].getint(\"dst_address\", fallback=default_config[\"meters\"][\"dst_address\"])\n meter_type = confparser[meter].get(\"type\", fallback=default_config[\"meters\"][\"type\"])\n meter_module = importlib.import_module(f\"devices.{meter_type}\")\n meter_device = meter_module.device(confparser[meter])\n\n slave_ctx = ModbusSlaveContext()\n\n block_1601 = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Little)\n block_1601.add_32bit_int(0) # config passcode\n block_1601.add_16bit_int(confparser[meter].getint(\"ct_current\", fallback=default_config[\"meters\"][\"ct_current\"])) # ct rated current\n block_1601.add_16bit_int(confparser[meter].getint(\"ct_current\", fallback=default_config[\"meters\"][\"ct_current\"])) # ct rated current l1\n block_1601.add_16bit_int(confparser[meter].getint(\"ct_current\", fallback=default_config[\"meters\"][\"ct_current\"])) # ct rated current l2\n block_1601.add_16bit_int(confparser[meter].getint(\"ct_current\", fallback=default_config[\"meters\"][\"ct_current\"])) # ct rated current l3\n block_1601.add_16bit_int(confparser[meter].getint(\"ct_inverted\", fallback=default_config[\"meters\"][\"ct_inverted\"])) # ct direction inversion\n block_1601.add_16bit_int(1) # measurement averaging\n block_1601.add_16bit_int(0) # power scale\n block_1601.add_16bit_int(15) # demand period\n block_1601.add_16bit_int(1) # demand subintervals\n block_1601.add_16bit_int(10000) # power/energy adjustment l1\n block_1601.add_16bit_int(10000) # power/energy adjustment l2\n block_1601.add_16bit_int(10000) # power/energy adjustment l3\n block_1601.add_16bit_int(-1000) # ct phase angle adjustment l1\n block_1601.add_16bit_int(-1000) # ct phase angle adjustment l2\n block_1601.add_16bit_int(-1000) # ct phase angle adjustment l3\n block_1601.add_16bit_int(1500) # minimum power reading\n block_1601.add_16bit_int(confparser[meter].getint(\"phase_offset\", fallback=default_config[\"meters\"][\"phase_offset\"])) # phase offset\n block_1601.add_16bit_int(0) # reset energy\n block_1601.add_16bit_int(0) # reset demand\n block_1601.add_16bit_int(20000) # current scale\n block_1601.add_16bit_int(0) # io pin mode\n slave_ctx.setValues(3, 1600, block_1601.to_registers())\n\n block_1651 = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Little)\n block_1651.add_16bit_int(0) # apply config\n block_1651.add_16bit_int(address) # modbus address\n block_1651.add_16bit_int(4) # baud rate\n block_1651.add_16bit_int(0) # parity mode\n block_1651.add_16bit_int(0) # modbus mode\n block_1651.add_16bit_int(5) # message delay\n slave_ctx.setValues(3, 1650, block_1651.to_registers())\n\n block_1701 = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Little)\n block_1701.add_32bit_int(confparser[meter].getint(\"serial_number\", fallback=default_config[\"meters\"][\"serial_number\"])) # serial number\n block_1701.add_32bit_int(0) # uptime (s)\n block_1701.add_32bit_int(0) # total uptime (s)\n block_1701.add_16bit_int(202) # wattnode model\n block_1701.add_16bit_int(31) # firmware version\n block_1701.add_16bit_int(0) # wattnode options\n block_1701.add_16bit_int(0) # error status\n block_1701.add_16bit_int(0) # power fail count\n block_1701.add_16bit_int(0) # crc error count\n block_1701.add_16bit_int(0) # frame error count\n block_1701.add_16bit_int(0) # packet error count\n block_1701.add_16bit_int(0) # overrun count\n block_1701.add_16bit_int(0) # error status 1\n block_1701.add_16bit_int(0) # error status 2\n block_1701.add_16bit_int(0) # error status 3\n block_1701.add_16bit_int(0) # error status 4\n block_1701.add_16bit_int(0) # error status 5\n block_1701.add_16bit_int(0) # error status 6\n block_1701.add_16bit_int(0) # error status 7\n block_1701.add_16bit_int(0) # error status 8\n slave_ctx.setValues(3, 1700, block_1701.to_registers())\n\n update_t_stop = threading.Event()\n update_t = threading.Thread(\n target=t_update,\n name=f\"t_update_{address}\",\n args=(\n slave_ctx,\n update_t_stop,\n meter_module,\n meter_device,\n confparser[meter].getint(\"refresh_rate\", fallback=default_config[\"meters\"][\"refresh_rate\"])\n )\n )\n\n threads.append(update_t)\n thread_stops.append(update_t_stop)\n\n slaves.update({address: slave_ctx})\n logger.info(f\"Created {update_t}: {meter} {meter_type} {meter_device}\")\n\n if not slaves:\n logger.warning(f\"No meters defined in {args.config}\")\n\n identity = ModbusDeviceIdentification()\n server_ctx = ModbusServerContext(slaves=slaves, single=False)\n\n time.sleep(1)\n\n for t in threads:\n t.start()\n logger.info(f\"Starting {t}\")\n\n StartSerialServer(\n server_ctx,\n framer=ModbusRtuFramer,\n identity=identity,\n port=confparser[\"server\"].get(\"device\", fallback=default_config[\"server\"][\"device\"]),\n baudrate=confparser[\"server\"].get(\"baud\", fallback=default_config[\"server\"][\"baud\"]),\n parity=confparser[\"server\"].get(\"parity\", fallback=default_config[\"server\"][\"parity\"]),\n timeout=float(confparser[\"server\"].get(\"timeout\", fallback=default_config[\"server\"][\"timeout\"]))\n )\n except KeyboardInterrupt:\n pass\n finally:\n for t_stop in thread_stops:\n t_stop.set()\n for t in threads:\n t.join()\n"
},
{
"alpha_fraction": 0.5584079623222351,
"alphanum_fraction": 0.5856716632843018,
"avg_line_length": 37.953487396240234,
"blob_id": "2d7efa8ea6d33286c6e6cace99eb87aff556908e",
"content_id": "6be3ef3be7bb8f84719adafcbec0546a704229a9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5025,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 129,
"path": "/devices/sdm630.py",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "import logging\n\nimport sdm_modbus\n\n\ndef device(config):\n\n # Configuration parameters:\n #\n # timeout seconds to wait for a response, default: 1\n # retries number of retries, default: 3\n # unit modbus address, default: 1\n #\n # For Modbus TCP:\n # host ip or hostname\n # port modbus tcp port\n #\n # For Modbus RTU:\n # device serial device, e.g. /dev/ttyUSB0\n # stopbits number of stop bits\n # parity parity setting, N, E or O\n # baud baud rate\n\n timeout = config.getint(\"timeout\", fallback=1)\n retries = config.getint(\"retries\", fallback=3)\n unit = config.getint(\"src_address\", fallback=1)\n\n host = config.get(\"host\", fallback=False)\n port = config.getint(\"port\", fallback=False)\n device = config.get(\"device\", fallback=False)\n\n if device:\n stopbits = config.getint(\"stopbits\", fallback=1)\n parity = config.get(\"parity\", fallback=\"N\")\n baud = config.getint(\"baud\", fallback=9600)\n\n if (parity\n and parity.upper() in [\"N\", \"E\", \"O\"]):\n parity = parity.upper()\n else:\n parity = False\n\n return sdm_modbus.SDM630(\n device=device,\n stopbits=stopbits,\n parity=parity,\n baud=baud,\n timeout=timeout,\n retries=retries,\n unit=unit\n )\n else:\n return sdm_modbus.SDM630(\n host=host,\n port=port,\n timeout=timeout,\n retries=retries,\n unit=unit\n )\n\n\ndef values(device):\n if not device:\n return {}\n\n logger = logging.getLogger()\n logger.debug(f\"device: {device}\")\n\n values = device.read_all()\n\n logger.debug(f\"values: {values}\")\n\n return {\n \"energy_active\": values.get(\"total_energy_active\", 0),\n \"import_energy_active\": values.get(\"import_energy_active\", 0),\n \"power_active\": values.get(\"total_power_active\", 0),\n \"p1_power_active\": values.get(\"p1_power_active\", 0),\n \"p2_power_active\": values.get(\"p2_power_active\", 0),\n \"p3_power_active\": values.get(\"p3_power_active\", 0),\n \"voltage_ln\": values.get(\"voltage_ln\", 0),\n \"p1n_voltage\": values.get(\"p1_voltage\", 0),\n \"p2n_voltage\": values.get(\"p2_voltage\", 0),\n \"p3n_voltage\": values.get(\"p3_voltage\", 0),\n \"voltage_ll\": values.get(\"voltage_ll\", 0),\n \"p12_voltage\": values.get(\"p12_voltage\", 0),\n \"p23_voltage\": values.get(\"p23_voltage\", 0),\n \"p31_voltage\": values.get(\"p31_voltage\", 0),\n \"frequency\": values.get(\"frequency\", 0),\n \"p1_energy_active\": values.get(\"total_energy_active\", 0),\n # \"p2_energy_active\"\n # \"p3_energy_active\"\n \"p1_import_energy_active\": values.get(\"import_energy_active\", 0),\n # \"p2_import_energy_active\"\n # \"p3_import_energy_active\"\n \"export_energy_active\": values.get(\"export_energy_active\", 0),\n \"p1_export_energy_active\": values.get(\"export_energy_active\", 0),\n # \"p2_export_energy_active\"\n # \"p3_export_energy_active\"\n \"energy_reactive\": values.get(\"total_energy_reactive\", 0),\n \"p1_energy_reactive\": values.get(\"total_energy_reactive\", 0),\n # \"p2_energy_reactive\"\n # \"p3_energy_reactive\"\n \"energy_apparent\": values.get(\"total_energy_apparent\", 0),\n \"p1_energy_apparent\": values.get(\"total_energy_apparent\", 0),\n # \"p2_energy_apparent\"\n # \"p3_energy_apparent\"\n \"power_factor\": values.get(\"total_power_factor\", 0),\n \"p1_power_factor\": values.get(\"p1_power_factor\", 0),\n \"p2_power_factor\": values.get(\"p2_power_factor\", 0),\n \"p3_power_factor\": values.get(\"p3_power_factor\", 0),\n \"power_reactive\": values.get(\"total_power_reactive\", 0),\n \"p1_power_reactive\": values.get(\"p1_power_reactive\", 0),\n \"p2_power_reactive\": values.get(\"p2_power_reactive\", 0),\n \"p3_power_reactive\": values.get(\"p3_power_reactive\", 0),\n \"power_apparent\": values.get(\"total_power_apparent\", 0),\n \"p1_power_apparent\": values.get(\"p1_power_apparent\", 0),\n \"p2_power_apparent\": values.get(\"p2_power_apparent\", 0),\n \"p3_power_apparent\": values.get(\"p3_power_apparent\", 0),\n \"p1_current\": values.get(\"p1_current\", 0),\n \"p2_current\": values.get(\"p2_current\", 0),\n \"p3_current\": values.get(\"p3_current\", 0),\n \"demand_power_active\": values.get(\"total_import_demand_power_active\", 0),\n # \"minimum_demand_power_active\"\n \"maximum_demand_power_active\": values.get(\"maximum_import_demand_power_active\", 0),\n \"demand_power_apparent\": values.get(\"total_demand_power_apparent\", 0),\n \"p1_demand_power_active\": (values.get(\"p1_demand_current\", 0) * values.get(\"p1_voltage\", 0)),\n \"p2_demand_power_active\": (values.get(\"p2_demand_current\", 0) * values.get(\"p2_voltage\", 0)),\n \"p3_demand_power_active\": (values.get(\"p3_demand_current\", 0) * values.get(\"p3_voltage\", 0))\n }\n"
},
{
"alpha_fraction": 0.6274510025978088,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 9.399999618530273,
"blob_id": "d7f9dce0aa19c032b55005aa9051b721e565af35",
"content_id": "f7795dc10630b78e4047f5ba514300f316a6e978",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 51,
"license_type": "permissive",
"max_line_length": 21,
"num_lines": 5,
"path": "/Makefile",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "all: lint\n\n.PHONY: lint\nlint:\n\tflake8 --ignore=E501"
},
{
"alpha_fraction": 0.5437633991241455,
"alphanum_fraction": 0.5635583400726318,
"avg_line_length": 31.503875732421875,
"blob_id": "abebaf63aa03d0b1d3221b3016182d06e983b39f",
"content_id": "cce084a468e0782a0131b0ff8e0216b097e72281",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4193,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 129,
"path": "/devices/sdm120.py",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "import logging\n\nimport sdm_modbus\n\n\ndef device(config):\n\n # Configuration parameters:\n #\n # timeout seconds to wait for a response, default: 1\n # retries number of retries, default: 3\n # unit modbus address, default: 1\n #\n # For Modbus TCP:\n # host ip or hostname\n # port modbus tcp port\n #\n # For Modbus RTU:\n # device serial device, e.g. /dev/ttyUSB0\n # stopbits number of stop bits\n # parity parity setting, N, E or O\n # baud baud rate\n\n timeout = config.getint(\"timeout\", fallback=1)\n retries = config.getint(\"retries\", fallback=3)\n unit = config.getint(\"src_address\", fallback=1)\n\n host = config.get(\"host\", fallback=False)\n port = config.getint(\"port\", fallback=False)\n device = config.get(\"device\", fallback=False)\n\n if device:\n stopbits = config.getint(\"stopbits\", fallback=1)\n parity = config.get(\"parity\", fallback=\"N\")\n baud = config.getint(\"baud\", fallback=2400)\n\n if (parity\n and parity.upper() in [\"N\", \"E\", \"O\"]):\n parity = parity.upper()\n else:\n parity = False\n\n return sdm_modbus.SDM120(\n device=device,\n stopbits=stopbits,\n parity=parity,\n baud=baud,\n timeout=timeout,\n retries=retries,\n unit=unit\n )\n else:\n return sdm_modbus.SDM120(\n host=host,\n port=port,\n timeout=timeout,\n retries=retries,\n unit=unit\n )\n\n\ndef values(device):\n if not device:\n return {}\n\n logger = logging.getLogger()\n logger.debug(f\"device: {device}\")\n\n values = device.read_all()\n\n logger.debug(f\"values: {values}\")\n\n return {\n \"energy_active\": values.get(\"total_energy_active\", 0),\n \"import_energy_active\": values.get(\"import_energy_active\", 0),\n \"power_active\": values.get(\"power_active\", 0),\n \"p1_power_active\": values.get(\"power_active\", 0),\n # \"p2_power_active\"\n # \"p3_power_active\"\n \"voltage_ln\": values.get(\"voltage\", 0),\n \"p1n_voltage\": values.get(\"voltage\", 0),\n # \"p2n_voltage\"\n # \"p3n_voltage\"\n # \"voltage_ll\"\n # \"p12_voltage\"\n # \"p23_voltage\"\n # \"p31_voltage\"\n \"frequency\": values.get(\"frequency\", 0),\n \"p1_energy_active\": values.get(\"total_energy_active\", 0),\n # \"p2_energy_active\"\n # \"p3_energy_active\"\n \"p1_import_energy_active\": values.get(\"import_energy_active\", 0),\n # \"p2_import_energy_active\"\n # \"p3_import_energy_active\"\n \"export_energy_active\": values.get(\"export_energy_active\", 0),\n \"p1_export_energy_active\": values.get(\"export_energy_active\", 0),\n # \"p2_export_energy_active\"\n # \"p3_export_energy_active\"\n \"energy_reactive\": values.get(\"total_energy_reactive\", 0),\n \"p1_energy_reactive\": values.get(\"total_energy_reactive\", 0),\n # \"p2_energy_reactive\"\n # \"p3_energy_reactive\"\n # \"energy_apparent\"\n # \"p1_energy_apparent\"\n # \"p2_energy_apparent\"\n # \"p3_energy_apparent\"\n \"power_factor\": values.get(\"power_factor\", 0),\n \"p1_power_factor\": values.get(\"power_factor\", 0),\n # \"p2_power_factor\"\n # \"p3_power_factor\"\n \"power_reactive\": values.get(\"power_reactive\", 0),\n \"p1_power_reactive\": values.get(\"power_reactive\", 0),\n # \"p2_power_reactive\"\n # \"p3_power_reactive\"\n \"power_apparent\": values.get(\"power_apparent\", 0),\n \"p1_power_apparent\": values.get(\"power_apparent\", 0),\n # \"p2_power_apparent\"\n # \"p3_power_apparent\"\n \"p1_current\": values.get(\"current\", 0),\n # \"p2_current\"\n # \"p3_current\"\n \"demand_power_active\": values.get(\"total_demand_power_active\", 0),\n # \"minimum_demand_power_active\"\n \"maximum_demand_power_active\": values.get(\"maximum_total_demand_power_active\", 0),\n # \"demand_power_apparent\"\n \"p1_demand_power_active\": values.get(\"total_demand_power_active\", 0),\n # \"p2_demand_power_active\"\n # \"p3_demand_power_active\"\n }\n"
},
{
"alpha_fraction": 0.7481111288070679,
"alphanum_fraction": 0.7643845081329346,
"avg_line_length": 57.53061294555664,
"blob_id": "793d5e9163389745d21ab0062df1d44525e1c63c",
"content_id": "91e3402a42ab3ea49ad9ddcf7c2b9b32c5c72b3f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8603,
"license_type": "permissive",
"max_line_length": 497,
"num_lines": 147,
"path": "/README.md",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "# solaredge_meterproxy\n\nsolaredge_meterproxy is a python tool that responds to Modbus requests from SolarEdge power inverters with data from unsupported kWh meters. While SolarEdge only supports a [limited number](https://www.solaredge.com/se-supported-devices) of revenue meters, by masquerading as a supported meter it is possible to supply your own meter values to the SolarEdge inverter for production, consumption, import/export monitoring, and export limitation.\n\nThis tool simulates one or more [WattNode WNC-3Y-400-MB](https://ctlsys.com/product/wattnode-modbus/) revenue meters, functionally similar to the rebranded SE-WNC-3Y-400-MB-K1. The Modbus registers of these simulated meters can then be updated with values from otherwise unsupported kWh meters, or sourced from a variety of data sources.\n\nSolarEdge inverters only use Modbus RTU over RS485 to communicate with meters, this project supports both Modbus RTU when connected directly to an inverter over RS485, *and* Modbus TCP in case a Modbus TCP gateway is connected to the inverter. This functionality has been tested using an [ICP-DAS tGW-715](https://www.icpdas.com/en/product/tGW-715) and [Elfin EE11](http://www.hi-flying.com/elfin-ee10-elfin-ee11) Modbus TCP to RTU gateway.\n\nSupported devices and data sources:\n\n* [Eastron SDM120M](https://www.eastroneurope.com/products/view/sdm120modbus)\n* [Eastron SDM230M](https://www.eastroneurope.com/products/view/sdm230modbus)\n* [Eastron SDM630M](https://www.eastroneurope.com/products/view/sdm630modbus)\n* InfluxDB\n\n\n## Usage\n\nDecide whether you will be running a Modbus RTU or Modbus TCP server. If your device is directly connected to the inverter via RS485 using a serial device or USB dongle, choose Modbus RTU. If you have a Modbus TCP gateway connected to your inverter, choose Modbus TCP.\n\n### Modbus RTU\n\nRun `semp-rtu.py` on a device connected via RS485 to a SolarEdge inverter. \n```\n usage: semp-rtu.py [-h] [-c CONFIG] [-v]\n\n optional arguments:\n -h, --help show this help message and exit\n -c CONFIG, --config CONFIG\n -v, --verbose\n```\n\nBy default, `semp-rtu.py` assumes your RS485 device is located at `/dev/ttyUSB0` with a baud rate of `9600`. While configuring and testing solaredge_meterproxy, you should run `semp-rtu.py` in verbose mode. The Modbus server and source meter configurations can be set in `semp-rtu.conf`. See [Configuration File](https://github.com/nmakel/solaredge_meterproxy#configuration-file) for more information.\n\n### Modbus TCP\n\nRun `semp-tcp.py` on a device on the same network as a Modbus TCP gateway connected via RS485 to a SolarEdge inverter. \n```\n usage: semp-tcp.py [-h] [-c CONFIG] [-v]\n\n optional arguments:\n -h, --help show this help message and exit\n -c CONFIG, --config CONFIG\n -v, --verbose\n```\n\nBefore running `semp-tcp.py`, configure a TCP IP and port for it to listen on. Your Modbus TCP gateway will need to be configured as *TCP client*, connecting to the IP and port you assigned `semp-tcp.py`. While configuring and testing solaredge_meterproxy, you should run `semp-tcp.py` in verbose mode. The Modbus server and source meter configurations can be set in `semp-tcp.conf`. See [Configuration File](https://github.com/nmakel/solaredge_meterproxy#configuration-file) for more information.\n\n### Configure your SolarEdge Inverter\n\nConfiguration of the inverter takes place in the SetApp interface or the LCD display on the inverter. For more information, please read [SolarEdge's SetApp documentation](https://www.solaredge.com/products/installer-tools/setapp). You will need a SolarEdge installer account to access the SetApp application. The account is free, and the app is available on both iOS and Android.\n\n__If you have multiple SolarEdge inverters connected via Modbus, are currently using the SunSpec Modbus logger function, or have one or more revenue meters connected via Modbus, please read _all_ instructions below to be sure you know what you are doing. This guide assumes both RS485 ports are unused and disconnected.__\n\nFirst, ensure your SolarEdge inverter's Modbus address is set to 1:\n\n- Choose the first available RS485 device, in most cases __RS485-1__.\n- Set the __Protocol__ to __SunSpec (Non-SE Logger)__.\n- Set the __Device ID__ to __1__.\n\nNow, add a meter:\n\n- Set the __Protocol__ to __Modbus (Multi-Device)__.\n- Choose __Add Modbus Device__.\n- Choose __Meter__.\n- Select the newly added __Meter 1__.\n- Set __Meter Function__ to the functionality of the meter you will be proxying.\n- Set __Meter Protocol__ to __SolarEdge__.\n- Set __Device ID__ to __2__, or another unused Modbus ID if you have multiple devices connected.\n- Set the appropriate __CT Rating__ and __Grid Topology__ depending on your situation.\n\nThe SolarEdge inverter will now try to connect to a meter with Modbus address 2 on the RS485 device you selected. If you have configured and started solaredge_meterproxy with a matching meter configuration you should see a _Meters_ section at the bottom of the _Status_ page. Depending on the _function_ selected, metering functionality should now be available.\n\nIf, after configuring a meter in the SetApp interface, you see only meter connection errors, set `log_level` to `DEBUG` in your configuration file. After starting solaredge_meterproxy you should see Modbus read requests from the inverter. If you do, please open an issue with copies of these and your configuration file. If you don't, check your connection, RS485 adapter, or Modbus TCP gateway settings.\n\n### Configuration file\n\nThe server, and one or more source meters, can be configured in a python `configparser` formatted configuration file. If a configuration file is not specified, and `semp-rtu.conf` of `semp-tcp.conf` are not found, generic defaults will be loaded. Provide an alternate configuration file using the `--config` parameter.\n\nFor an overview of all configurable parameters, see `semp-rtu.conf` or `semp-tcp.conf`.\n\nDevice scripts contain additional, often required, configuration parameters. Consult the relevant device script for an overview when configuring source meters.\n\nAn example **Modbus RTU** configuration, with a SDM120 source that is accessible over Modbus TCP:\n\n```\n[server]\ndevice = /dev/ttyUSB0\nbaud = 9600\nlog_level = INFO\nmeters = meter1\n\n[meter1]\ntype=sdm120\nhost=10.0.0.124\nport=502\nsrc_address=1\ndst_address=2\n```\n\nAn example **Modbus TCP** configuration, with a SDM120 source that is accessible over Modbus RTU:\n\n```\n[server]\naddress = 10.0.0.123\nport = 5502\nlog_level = INFO\nmeters = sdm120\n\n[sdm120]\ntype=sdm120\ndevice=/dev/ttyUSB0\nbaud=9600\nsrc_address=1\ndst_address=2\n```\n\nIf you receive `DEBUG: Frame check failed, ignoring!!` errors, the Modbus TCP gateway is probably sending you RTU frames inside TCP packets. In that case, set the `framer = rtu` configuration parameter inside the `[server]` block.\n\n\n## Creating Device Scripts\n\nSupport for various kWh meters can be added by creating a Python script in the `devices` directory. This script should adhere to the following:\n\n* Its name corresponds to the device or source it masquerades.\n* It contains a `device()` function.\n* It contains a `values()` function.\n* Both functions accept the variables as defined in `/devices/generic.py`.\n\nFor a skeleton implementation, see `/devices/generic.py`.\n\n### device()\n\nThe `device()` function is called _once_. It gets passed a `configparser` object with the device's configuration parameters, as configured in the configuration file. It must return a data structure which contains either an active connection, or enough information to identify the device in a data store. This data structure will be passed to the `values()` function.\n\nWhile the intent is to masquerade another Modbus RTU or Modbus TCP device, it is possible to use virtually any type of data store. InfluxDB, or SQLite, for example.\n\n### values()\n\nThe `values()` function is called every `refresh_rate` seconds. It gets passed the data structure returned by `device()`, and must return a `dict`. The `/devices/generic.py` script contains a list of all possible dictionary keys. It is not required to return all, or in fact any, keys. Functionality of the SolarEdge inverter will depend on the values provided.\n\nSingle phase devices should put the single phase values in the generic _and_ first phase specific values, for example: `power_active` and `p1_power_active`, but also `voltage_ln` and `p1n_voltage`.\n\n\n## Contributing\n\nContributions are more than welcome, especially new device scripts, or modifications which broaden the use case of this tool."
},
{
"alpha_fraction": 0.5754998922348022,
"alphanum_fraction": 0.5918179750442505,
"avg_line_length": 35.25833511352539,
"blob_id": "565c61f5f2f89bc1e08aea142ae09d4df07ee6f1",
"content_id": "32213d93af4ca87c61f23c51ea12da9a58fb6a57",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4351,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 120,
"path": "/devices/influxdb.py",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "import logging\nimport requests\nimport sys\n\nimport influxdb\n\n\ndef device(config):\n\n # Configuration parameters:\n #\n # host ip or hostname\n # port influxdb port\n # database database name\n # bucket bucket name\n # where_key tag name identifying the meter\n # where_value tag value identifying the meter\n\n logger = logging.getLogger()\n\n host = config.get(\"host\", fallback=\"localhost\")\n port = config.getint(\"port\", fallback=8086)\n database = config.get(\"database\", fallback=\"measurements\")\n bucket = config.get(\"bucket\", fallback=False)\n where_key = config.get(\"where_key\", fallback=False)\n where_value = config.get(\"where_value\", fallback=False)\n\n try:\n client = influxdb.InfluxDBClient(host=host, port=port)\n client.switch_database(database)\n client.ping()\n\n logger.debug(f\"connected to database: {host}:{port}/{database}\")\n except (ConnectionRefusedError, requests.exceptions.ConnectionError):\n logger.critical(f\"database connection failed: {host}:{port}/{database}\")\n sys.exit()\n\n return {\n \"client\": client,\n \"bucket\": bucket,\n \"where_key\": where_key,\n \"where_value\": where_value\n }\n\n\ndef values(device):\n if not device:\n return {}\n\n logger = logging.getLogger()\n logger.debug(f\"device: {device}\")\n \n if device[\"where_key\"] and device[\"where_value\"]:\n values = list(device[\"client\"].query(\n f'SELECT last(*) FROM \"{device[\"bucket\"]}\" WHERE (\"{device[\"where_key\"]}\" = \\'{device[\"where_value\"]}\\')'\n ).get_points())[0]\n else:\n values = list(device[\"client\"].query(\n f'SELECT last(*) FROM \"{device[\"bucket\"]}\"'\n ).get_points())[0]\n\n logger.debug(f\"values: {values}\")\n\n return {\n \"energy_active\": values.get(\"last_total_energy_active\", 0),\n \"import_energy_active\": values.get(\"last_import_energy_active\", 0),\n \"power_active\": values.get(\"last_power_active\", 0),\n \"p1_power_active\": values.get(\"last_power_active\", 0),\n # \"p2_power_active\"\n # \"p3_power_active\"\n \"voltage_ln\": values.get(\"last_voltage\", 0),\n \"p1n_voltage\": values.get(\"last_voltage\", 0),\n # \"p2n_voltage\"\n # \"p3n_voltage\"\n # \"voltage_ll\"\n # \"p12_voltage\"\n # \"p23_voltage\"\n # \"p31_voltage\"\n \"frequency\": values.get(\"last_frequency\", 0),\n \"p1_energy_active\": values.get(\"last_total_energy_active\", 0),\n # \"p2_energy_active\"\n # \"p3_energy_active\"\n \"p1_import_energy_active\": values.get(\"last_import_energy_active\", 0),\n # \"p2_import_energy_active\"\n # \"p3_import_energy_active\"\n \"export_energy_active\": values.get(\"last_export_energy_active\", 0),\n \"p1_export_energy_active\": values.get(\"last_export_energy_active\", 0),\n # \"p2_export_energy_active\"\n # \"p3_export_energy_active\"\n \"energy_reactive\": values.get(\"last_total_energy_reactive\", 0),\n \"p1_energy_reactive\": values.get(\"last_total_energy_reactive\", 0),\n # \"p2_energy_reactive\"\n # \"p3_energy_reactive\"\n # \"energy_apparent\"\n # \"p1_energy_apparent\"\n # \"p2_energy_apparent\"\n # \"p3_energy_apparent\"\n \"power_factor\": values.get(\"last_power_factor\", 0),\n \"p1_power_factor\": values.get(\"last_power_factor\", 0),\n # \"p2_power_factor\"\n # \"p3_power_factor\"\n \"power_reactive\": values.get(\"last_power_reactive\", 0),\n \"p1_power_reactive\": values.get(\"last_power_reactive\", 0),\n # \"p2_power_reactive\"\n # \"p3_power_reactive\"\n \"power_apparent\": values.get(\"last_power_apparent\", 0),\n \"p1_power_apparent\": values.get(\"last_power_apparent\", 0),\n # \"p2_power_apparent\"\n # \"p3_power_apparent\"\n \"p1_current\": values.get(\"last_current\", 0),\n # \"p2_current\"\n # \"p3_current\"\n \"demand_power_active\": values.get(\"last_total_demand_power_active\", 0),\n # \"minimum_demand_power_active\"\n \"maximum_demand_power_active\": values.get(\"last_maximum_total_demand_power_active\", 0),\n # \"demand_power_apparent\"\n \"p1_demand_power_active\": values.get(\"last_total_demand_power_active\", 0),\n # \"p2_demand_power_active\"\n # \"p3_demand_power_active\"\n }\n"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.6969696879386902,
"avg_line_length": 15.75,
"blob_id": "9d033128cde16f82e23b1c8237be3c366cd209ae",
"content_id": "0e3082ddaf7cd2bb5860cef896963ce484aa98fa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 66,
"license_type": "permissive",
"max_line_length": 17,
"num_lines": 4,
"path": "/requirements.txt",
"repo_name": "Waaggy/solaredge_meterproxy",
"src_encoding": "UTF-8",
"text": "pymodbus>=2.3.0\ninfluxdb>=5.3.0\nrequests>=2.23.0\nsdm_modbus>=0.4.3"
}
] | 8 |
st18601019822/FundProject | https://github.com/st18601019822/FundProject | 363e412b5c6ada869325193d55bc8d902f6bfc70 | 7fc85aa795f8c58e0cd80053fd9004345b6f0586 | 1efdd01518777729432149b0074f963c19d56e57 | refs/heads/master | 2023-02-18T04:33:16.805083 | 2021-01-20T10:18:53 | 2021-01-20T10:18:53 | 331,260,543 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5550239086151123,
"alphanum_fraction": 0.5813397169113159,
"avg_line_length": 35.05172348022461,
"blob_id": "c03a3587691f30d45c05401657a133a8616faeb8",
"content_id": "65ffd50ea1e6481a280df3e61a7c7e8291493b61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2140,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 58,
"path": "/test.py",
"repo_name": "st18601019822/FundProject",
"src_encoding": "UTF-8",
"text": "from browsermobproxy import Server\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport requests\nimport json\nimport time\nimport re\nfrom urllib.request import Request, urlopen\n\n# def get_url():\nif __name__ == '__main__':\n server = Server(r'D:\\exchange_data\\browsermob-proxy-2.1.4\\bin\\browsermob-proxy.bat')\n server.start()\n proxy = server.create_proxy()\n\n # 设置driver options\n chrome_options = Options()\n chrome_options.add_argument('--ignore-certificate-errors')\n chrome_options.add_argument('--proxy-server={0}'.format(proxy.proxy))\n chrome_options.add_argument('--disable-gpu')\n chrome_driver = r'D:\\exchange_data\\chromedriver.exe'\n #\n json_url=''\n url = 'http://fundf10.eastmoney.com/jjjz_161725.html'\n firefox_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'}\n\n\n driver = webdriver.Chrome(executable_path=chrome_driver, options = chrome_options)\n # proxy.new_har('fund', options={'captureHeaders': True, 'captureContent': True})\n proxy.new_har(url)\n\n driver.get(url)\n\n result = proxy.har\n for entry in result['log']['entries']:\n # entry['request']['url']\n if \"lsjz\" in entry['request']['url']:\n json_url=entry['request']['url']\n for i in range(1,69):\n t = time.time()\n s = str(int(round(t * 1000)))\n url = re.sub(r'&pageIndex=[0-9]', \"&pageIndex=\" + str(i), json_url)\n url1 = re.sub(r'endDate=&_=[0-9]+', \"endDate=&_=\" + s, url)\n print(url1)\n request = Request(url1, headers=firefox_headers)\n html = urlopen(request)\n # 获取数据\n data = html.read()\n # 转换成字符串\n strs = str(data)\n # 获取接口返回内容\n\n # 构建请求\n print(strs)\n\n# if __name__ == '__main__':\n# # print(requests.session().get(get_url()).text.replace(\"'\", '\"').replace('/ ', '/'))\n# print(get_url())"
},
{
"alpha_fraction": 0.5169811248779297,
"alphanum_fraction": 0.605031430721283,
"avg_line_length": 35.181819915771484,
"blob_id": "1e45f44d3e15dac558b628b4251c0713a3159239",
"content_id": "8c83e3618ca0c8d635c767a5f7ef954ec0baf3b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 797,
"license_type": "no_license",
"max_line_length": 180,
"num_lines": 22,
"path": "/test1.py",
"repo_name": "st18601019822/FundProject",
"src_encoding": "UTF-8",
"text": "import re\nimport time\nimport datetime\nif __name__ == '__main__':\n ele_json='http://api.fund.eastmoney.com/f10/lsjz?callback=jQuery183013248650036662357_1611125979803&fundCode=161725&pageIndex=1&pageSize=20&startDate=&endDate=&_=1611125979816'\n # url=ele_json.find('pageIndex')\n # t=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n # ts=int(time.mktime(time.strptime(t, \"%Y-%m-%d %H:%M:%S\")))\n # print(ts)\n t = time.time()\n s=str(int(round(t * 1000)))\n url=re.sub(r'&pageIndex=[0-9]',\"&pageIndex=\"+\"2\",ele_json)\n print(url)\n url1=re.sub(r'endDate=&_=[0-9]+',\"endDate=&_=\"+s,url)\n print(url1)\n # url1=ele_json.replace(\"\")\n # print(ele_json[url:])\n # print(url)\n\n# if __name__ == '__main__':\n# print(get_record())\n# print(type(get_record()))"
},
{
"alpha_fraction": 0.6547008752822876,
"alphanum_fraction": 0.6598290801048279,
"avg_line_length": 29.789474487304688,
"blob_id": "d8b07250abe1b97f346ed9e22419393ba88c5d92",
"content_id": "39a36982c356aff0d57b5a78c3cc1ad107c55ae2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1206,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 38,
"path": "/main.py",
"repo_name": "st18601019822/FundProject",
"src_encoding": "UTF-8",
"text": "# This is a sample Python script.\n\nfrom browsermobproxy import Server\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\n\ndef print_hi():\n server = Server(r'D:\\exchange_data\\browsermob-proxy-2.1.4\\bin\\browsermob-proxy.bat')\n server.start()\n proxy = server.create_proxy()\n\n # 设置driver options\n chrome_options = Options()\n chrome_options.add_argument('--proxy-server={0}'.format(proxy.proxy))\n\n driver = webdriver.Chrome(chrome_options=chrome_options)\n #\n url = 'https://www.baidu.com/'\n proxy.new_har('fund', options={'captureHeaders': True, 'captureContent': True})\n driver.get(url)\n\n result = proxy.har\n print(result)\n\n for entry in result['log']['entries']:\n _url = entry['request']['url']\n # 根据URL找到数据接口\n # if \"lsjz?callback=\" in _url:\n _response = entry['response']\n _content = _response['content']['text']\n # 获取接口返回内容\n print(_content)\n server.stop()\n # driver.quit()\n"
}
] | 3 |
abingham/macos-display | https://github.com/abingham/macos-display | 6b9e28612eb1932d5b85ee4a12f61265c8dff0a2 | 79cd49f20b9d9e338c7cc4d20330036a82a56332 | 055bcb610ecd4aeecd6d4cbcf514b73db159be50 | refs/heads/master | 2020-05-27T10:25:35.086524 | 2019-05-25T13:36:36 | 2019-05-25T13:36:36 | 188,583,030 | 0 | 0 | Zlib | 2019-05-25T15:32:01 | 2019-05-25T13:36:44 | 2019-05-25T13:36:43 | null | [
{
"alpha_fraction": 0.6176470518112183,
"alphanum_fraction": 0.6176470518112183,
"avg_line_length": 12.600000381469727,
"blob_id": "0e33e3b030f01ce983aa81c993ebac835b1ff3d8",
"content_id": "667b7825e96f9b6c24dfa3e49227f4e36fc78e38",
"detected_licenses": [
"Zlib"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 68,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 5,
"path": "/macos_display/__init__.py",
"repo_name": "abingham/macos-display",
"src_encoding": "UTF-8",
"text": "from .display import display_name\n\n__all__ = [\n \"display_name\"\n]\n"
},
{
"alpha_fraction": 0.5854141116142273,
"alphanum_fraction": 0.5943139791488647,
"avg_line_length": 30.6015625,
"blob_id": "fcf334f2d1d14026c301212eda9a31f6dd4b8ba1",
"content_id": "538f2cd43310b07b8ac5f1fd09adf32f16fe709a",
"detected_licenses": [
"Zlib"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4045,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 128,
"path": "/macos_display/display.c",
"repo_name": "abingham/macos-display",
"src_encoding": "UTF-8",
"text": "//\n// Created by Robert Smallshire on 2019-05-23.\n//\n\n#include \"display.h\"\n\n#include <IOKit/graphics/IOGraphicsLib.h>\n#include <CoreVideo/CVBase.h>\n#include <CoreVideo/CVDisplayLink.h>\n#include <ApplicationServices/ApplicationServices.h>\n\n\n\n// Returns the io_service_t corresponding to a CG display ID, or 0 on failure.\n// The io_service_t should be released with IOObjectRelease when not needed.\n// The implementation in this function is based on an open-source version\n// which is part of the GLFW library, with various fixes, found in\n// https://github.com/glfw/glfw/blob/master/src/cocoa_monitor.m by\n// Copyright (c) 2002-2006 Marcus Geelnard Copyright (c) 2006-2016 Camilla Löwy [email protected]\nio_service_t IOServicePortFromCGDisplayID(CGDirectDisplayID displayID)\n{\n const int MAX_DISPLAYS = 16;\n CGDisplayCount displayCount;\n CGDisplayCount maxDisplays = MAX_DISPLAYS;\n CGDirectDisplayID onlineDisplays[MAX_DISPLAYS];\n CGDisplayErr dErr = CGGetOnlineDisplayList(maxDisplays, onlineDisplays, &displayCount);\n if (dErr != kCGErrorSuccess) {\n return 0;\n }\n\n io_iterator_t iter;\n io_service_t serv, servicePort = 0;\n\n CFMutableDictionaryRef matching = IOServiceMatching(\"IODisplayConnect\");\n\n // releases matching for us\n kern_return_t err = IOServiceGetMatchingServices(kIOMasterPortDefault,\n matching,\n &iter);\n if (err)\n {\n return 0;\n }\n\n while ((serv = IOIteratorNext(iter)) != 0)\n {\n CFDictionaryRef info;\n CFIndex vendorID, productID, serialID;\n CFNumberRef vendorIDRef, productIDRef, serialIDRef;\n Boolean success;\n\n info = IODisplayCreateInfoDictionary(serv,\n kIODisplayOnlyPreferredName);\n\n vendorIDRef = CFDictionaryGetValue(info,\n CFSTR(kDisplayVendorID));\n productIDRef = CFDictionaryGetValue(info,\n CFSTR(kDisplayProductID));\n serialIDRef = CFDictionaryGetValue(info,\n CFSTR(kDisplaySerialNumber));\n\n success = CFNumberGetValue(vendorIDRef, kCFNumberCFIndexType,\n &vendorID);\n success &= CFNumberGetValue(productIDRef, kCFNumberCFIndexType,\n &productID);\n if (serialIDRef != 0) {\n success &= CFNumberGetValue(serialIDRef, kCFNumberCFIndexType,\n &serialID);\n }\n\n if (!success)\n {\n CFRelease(info);\n continue;\n }\n\n if (CGDisplayVendorNumber(displayID) != vendorID ||\n CGDisplayModelNumber(displayID) != productID)\n {\n CFRelease(info);\n continue;\n }\n\n // we're a match\n servicePort = serv;\n CFRelease(info);\n break;\n }\n\n IOObjectRelease(iter);\n return servicePort;\n}\n\n// Get the name of the specified display\n//\nchar* getDisplayName(CGDirectDisplayID displayID)\n{\n io_service_t serv = IOServicePortFromCGDisplayID(displayID);\n if (!serv)\n {\n return strdup(\"Unknown\");\n }\n\n CFDictionaryRef info = IODisplayCreateInfoDictionary(serv,\n kIODisplayOnlyPreferredName);\n\n IOObjectRelease(serv);\n\n CFStringRef value;\n\n CFDictionaryRef names = CFDictionaryGetValue(info, CFSTR(kDisplayProductName));\n\n if (!names || !CFDictionaryGetValueIfPresent(names, CFSTR(\"en_US\"),\n (const void**) &value))\n {\n CFRelease(info);\n return strdup(\"Unknown\");\n }\n\n CFIndex size = CFStringGetMaximumSizeForEncoding(CFStringGetLength(value),\n kCFStringEncodingUTF8);\n char* name = calloc((size_t) (size + 1), 1);\n CFStringGetCString(value, name, size, kCFStringEncodingUTF8);\n\n CFRelease(info);\n\n return name;\n}\n"
},
{
"alpha_fraction": 0.6042600870132446,
"alphanum_fraction": 0.6093049049377441,
"avg_line_length": 24.855072021484375,
"blob_id": "5e1e57faf6e5b704b27cd1b27f7931f813b5b40a",
"content_id": "773fa755ebd2573b2f21dc8af3ba25282a1635d4",
"detected_licenses": [
"Zlib"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1784,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 69,
"path": "/macos_display/display_module.c",
"repo_name": "abingham/macos-display",
"src_encoding": "UTF-8",
"text": "#define PY_SSIZE_T_CLEAN\n\n#include <Python.h>\n\n#include \"display.h\"\n\nstatic PyObject* display_name_wrapper(PyObject* module, PyObject* arg)\n{\n PyObject *lx;\n if (PyFloat_Check(arg)) {\n double dx = PyFloat_AS_DOUBLE((PyFloatObject *)arg);\n if (!(Py_IS_FINITE(dx) && dx == floor(dx))) {\n PyErr_SetString(PyExc_ValueError,\n \"display_name() only accepts integral values\");\n return NULL;\n }\n lx = PyLong_FromDouble(dx);\n }\n else {\n lx = PyNumber_Index(arg);\n }\n\n if (lx == NULL) {\n return NULL;\n }\n\n unsigned long x = PyLong_AsUnsignedLong(lx);\n Py_DECREF(lx);\n static unsigned long MAX = 0xffffffff;\n if (x == (((unsigned long) -1) && PyErr_Occurred()) || x > MAX) {\n PyErr_SetString(PyExc_OverflowError, \"Can't convert to uint32_t\");\n return NULL;\n }\n\n char* name = getDisplayName((uint32_t) x);\n PyObject* result = PyUnicode_FromString(name);\n free(name);\n if (!result) {\n PyErr_SetString(PyExc_RuntimeError,\n \"Could not decode UTF-8 string response from IOKit\");\n return NULL;\n }\n return result;\n}\n\n// Our Module's Function Definition struct\n// We require this `NULL` to signal the end of our method\n// definition\nstatic PyMethodDef methods[] = {\n { \"display_name\", display_name_wrapper, METH_O, \"Gets user-visible display name from display id\" },\n { NULL, NULL, 0, NULL }\n};\n\n// Our Module Definition struct\nstatic struct PyModuleDef display_module = {\n PyModuleDef_HEAD_INIT,\n \"display\",\n \"macOS display names\",\n -1,\n methods\n\n};\n\n// Initializes our module using our above struct\nPyMODINIT_FUNC PyInit_display(void)\n{\n return PyModule_Create(&display_module\n);\n}\n"
},
{
"alpha_fraction": 0.5950354337692261,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 30.33333396911621,
"blob_id": "69c46b16701c13ab8888f13b95b550406f11adcc",
"content_id": "f7c62473d0f0c190b6807a2bb9bdca6dec6e7679",
"detected_licenses": [
"Zlib"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1410,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 45,
"path": "/setup.py",
"repo_name": "abingham/macos-display",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, Extension\n\nimport sys\nimport platform\n\nif platform.system() != 'Darwin':\n print(\"The macos_display package can only be installed on macOS\", file=sys.stderr)\n sys.exit(1)\n\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='macos_display',\n author='[email protected]',\n author_email=\"[email protected]\",\n description=\"Retrieve information about macOS displays\",\n long_description=long_description,\n long_description_content_type='text/x-rst',\n version='1.1.1',\n url=\"https://github.com/sixty-north/macos-display\",\n packages = ['macos_display'],\n ext_modules=[\n Extension(\n 'macos_display.display',\n ['macos_display/display_module.c',\n 'macos_display/display.c'],\n extra_link_args = [\n '-framework', 'IOKit',\n '-framework', 'CoreFoundation',\n '-framework', 'ApplicationServices'\n ]\n )\n ],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: zlib/libpng License',\n ]\n)\n"
},
{
"alpha_fraction": 0.8037974834442139,
"alphanum_fraction": 0.8037974834442139,
"avg_line_length": 25.33333396911621,
"blob_id": "2ec670f148f6e519e9cadecdd13f6d3fcc7686ea",
"content_id": "3dadf36d4ccdfb6cacb8052a8fb9d7e208233129",
"detected_licenses": [
"Zlib"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 316,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 12,
"path": "/macos_display/display.h",
"repo_name": "abingham/macos-display",
"src_encoding": "UTF-8",
"text": "#ifndef MAC_DISPLAYS_DISPLAYS_H\n#define MAC_DISPLAYS_DISPLAYS_H\n\n\n#include <IOKit/graphics/IOGraphicsLib.h>\n#include <CoreVideo/CVBase.h>\n#include <CoreVideo/CVDisplayLink.h>\n#include <ApplicationServices/ApplicationServices.h>\n\nchar* getDisplayName(CGDirectDisplayID displayID);\n//\n#endif //MAC_DISPLAYS_DISPLAYS_H\n"
}
] | 5 |
expyriment/DIF | https://github.com/expyriment/DIF | 0fa186820c08326849f4b9b9189f4c2201f66ed1 | eab8a358d99af474a45a0b771ab1af95051ff1c3 | d865a5b67fd7f01b51a4a04ae297f2e83b705d9a | refs/heads/master | 2021-12-10T14:17:20.568285 | 2021-12-10T13:54:37 | 2021-12-10T13:54:37 | 86,672,424 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5398601293563843,
"alphanum_fraction": 0.5650349855422974,
"avg_line_length": 20.636363983154297,
"blob_id": "c79a9ddedd4a590caafb807534d0b7f98ecdc6e2",
"content_id": "11f3452f21889b890f5d8e2005166f0d170cc92a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 33,
"path": "/tools/create_test_data.py",
"repo_name": "expyriment/DIF",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport os\nimport multiprocessing\nimport numpy as np\n\ndef random_file(para):\n np.random.seed(50)\n filename, size_mb = para\n with open(filename,'wb') as fl:\n for _ in range(size_mb):\n mb = np.random.bytes(1024*1024)\n fl.write(mb)\n print(\"created {0}\".format(filename))\n return\n\nif __name__ == \"__main__\":\n DIR = \"test_data\"\n N_FILE = 10\n SIZE_MB = 100\n\n try:\n os.mkdir(DIR)\n except:\n pass\n\n para = []\n for n in range(N_FILE):\n fl_name = os.path.join(DIR, \"file_öäüß_{0}.rnd\".format(n))\n para.append( (fl_name, SIZE_MB) )\n\n p = multiprocessing.Pool()\n list(p.imap_unordered(random_file, para))\n\n"
},
{
"alpha_fraction": 0.6957237124443054,
"alphanum_fraction": 0.7177631855010986,
"avg_line_length": 49.66666793823242,
"blob_id": "c4150a85ec65052c07f4be4dca52c4dd210b6419",
"content_id": "5f9768ea6a4a973c78fba327dfc214cc3412eeba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3040,
"license_type": "permissive",
"max_line_length": 711,
"num_lines": 60,
"path": "/README.md",
"repo_name": "expyriment/DIF",
"src_encoding": "UTF-8",
"text": "Data Integrity Fingerprint (DIF)\n================================\n\n**A proposal for a human-readable fingerprint of scientific datasets that allows verifying their integrity**\n\n*Released under the MIT License*\n\nOliver Lindemann ([email protected]) & Florian Krause ([email protected])\n\nIntroduction\n------------\n\n**Problem:** \nHow can we link a journal article unmistakably and indefinitely to a related (open) dataset, without relying on storage providers or other services that need to be maintained?\n\n**Solution:** \nThe author calculates checksums of all the files in the dataset the article relates to. From these checksums the author calculates the _Data Integrity Fingerprint (DIF)_ - a single \"master checksum\" that uniquly identifies the entire dataset. The author reports the DIF in the journal article. A reader of the journal article who obtained a copy of the dataset (from either the author or any other source) calculates the DIF of their copy of the dataset and compares it to the correct DIF as stated in the article. If the list of checksums of individual files in the original dataset is available, the author can furthermore investigate in detail the differences between the datasets, in case of a DIF mismatch.\n\n\n\nProcedure for calculating the DIF of a dataset\n----------------------------------------------\n\n1. Choose a (cryptographic) hash function `Hash` (e.g. SHA-256)\n\n2. For every file `f` in the (potentially nested) subtree under the dataset root directory,\n\n * calculate the checksum `c` as the hexadecimal digest (lower case letters) of `Hash(f)` (i.e. the hashed _binary contents_ of the file)\n\n * get the file path `p` as the UTF-8 encoded relative path in Unix notation (i.e. U+002F slash character as separator) from the dataset root directory to `f`\n\n * create the string `cp` (i.e the concatenation of `c` and `p`)\n \n * add `cp` to a list `l`\n \n3. Sort `l` in ascending Unicode code point order (i.e., byte- wise sorting, NOT based on the Unicode collation algorithm)\n\n4. Create the string `l[0]l[1]...l[n]` (i.e. the concatenation of all elements of `l`)\n\n5. Retrieve the DIF as the hexadecimal digest of `Hash(l[0]l[1]...l[n])`\n\nOptionally, checksums of individual files and their file paths can be saved as a checksums file (lines of `c p` for each `f).\n\n### Note\nOn a GNU/Linux system with a UTF-8 locale, the procedure to create the SHA-256 DIF is equivalent to:\n```\ncd <DATASET_ROOT_DIRECTORY>\nexport LC_ALL=C\nfind . -type f -print0 | xargs -0 shasum -a 256 | cut -c-64,69- | sort | tr -d '\\n' | shasum -a 256 | cut -c-64\n```\n\nAvailable implementations\n-------------------------\n\n* Python (reference implementation): [dataintegrityfingerprint-python](https://github.com/expyriment/dataintegrityfingerprint-python)\n\n\nExample data\n------------\nCustom implementations may be tested against [example data](https://github.com/expyriment/DIF/tree/master/example_data) to verify correctness.\n"
}
] | 2 |
seizans/sandbox-django | https://github.com/seizans/sandbox-django | 3596da0cb471f4e2c0f812109a214a54ac6fe1b0 | 415ab3ed13ac44b2b7beaa958367ac7129a3c786 | fcbb917e9a6c4ecb1b004ea1909720e0a5b0e105 | HEAD | 2016-09-05T08:53:32.340239 | 2014-04-29T08:37:21 | 2014-04-29T08:37:21 | 14,644,341 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6412556171417236,
"alphanum_fraction": 0.6412556171417236,
"avg_line_length": 19.272727966308594,
"blob_id": "bfc210f728b38560fdebf633fa5cdc9b3097b1e4",
"content_id": "53319bf20accd9d2e9c6ab688ffcea6a969ed534",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 11,
"path": "/README.rst",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "==============\nsandbox-django\n==============\n\nrequirementsについて\n====================\n\n:_base.txt: 共通で使うライブラリを指定する (バージョンは指定する)\n:deploy.txt: 本番環境やステージング環境に使うライブラリを指定する\n:ci.txt: CIサーバーが使うライブラリを指定する\n:dev.txt: 開発環境で使うライブラリを指定する\n"
},
{
"alpha_fraction": 0.5467422008514404,
"alphanum_fraction": 0.5495750904083252,
"avg_line_length": 19.764705657958984,
"blob_id": "eeb2acaa607f79ad4338c65538e74d87a5dcab5f",
"content_id": "ebf93c804de6b3cad16e2b771d39902334ac63f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 17,
"path": "/sandbox/settings/_stg.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# ステージング用の環境で共通の設定\n# base系の後で import * されるので、上書きをする挙動になる\n\nALLOWED_HOSTS = ['ステージング環境で使うホスト名を入れる']\nSECRET_KEY = 'n+i_fly3y8v%(hgp#n(9h3@brw6qjiae)$gauqd)mee1t3dp1u'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'dbname',\n 'USER': 'dbuser',\n 'PASSWORD': 'password',\n 'HOST': 'hostname',\n 'PORT': '',\n }\n}\n"
},
{
"alpha_fraction": 0.43640896677970886,
"alphanum_fraction": 0.4438902735710144,
"avg_line_length": 18.095237731933594,
"blob_id": "ad9374cac3b733f3ba4e194b5a21aae269d5e119",
"content_id": "be94b29c5f072c4722253c97faaa1bee34552766",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 21,
"path": "/sandbox/api/jsonschemas.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nnotes_requests = {\n}\n\nnotes_response = {\n 'type': 'object',\n 'required': ['hoge'],\n 'properties': {\n 'hoge': {'type': 'string'},\n 'mado': {\n 'type': 'array',\n 'minItems': 1,\n 'items': {'type': 'integer'},\n 'uniqueItems': True,\n },\n 'three': {'type': 'integer'},\n },\n}\n\nnotes2_response = notes_response\n"
},
{
"alpha_fraction": 0.623481810092926,
"alphanum_fraction": 0.6788124442100525,
"avg_line_length": 34.28571319580078,
"blob_id": "36acd4a9579b2c056881463ff78a83395e3f6d16",
"content_id": "b5d77b05505a93a2dc27eeb37710595c83affe18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Ruby",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 21,
"path": "/vagrant/Vagrantfile",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\nVagrant.configure(\"2\") do |config|\n config.ssh.forward_agent = true\n config.vm.box = \"ubuntu-14.04\"\n config.vm.network :private_network, ip: \"192.168.55.55\"\n config.vm.network :forwarded_port, guest: 80, host: 8888\n config.vm.network :forwarded_port, guest: 8000, host: 8000\n config.vm.network :forwarded_port, guest: 9200, host: 9200\n\n config.vm.provider :virtualbox do |vb|\n vb.customize [\"modifyvm\", :id, \"--memory\", \"2048\"]\n end\n\n config.vm.synced_folder \"nginx/\", \"/home/vagrant/synced/nginx\", create: true\n config.vm.provision \"docker\" do |docker|\n docker.build_image \"/home/vagrant/synced/nginx\", args: \"-t seizans/nginx\"\n docker.run \"seizans/nginx\", args: \"-P\"\n end\nend\n"
},
{
"alpha_fraction": 0.7379518151283264,
"alphanum_fraction": 0.7394578456878662,
"avg_line_length": 27.869565963745117,
"blob_id": "93bd8cd3caf99df061a74b86582f137e877131bd",
"content_id": "3e03f13c3610e2569448f4373845ecf7e4d2600b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 664,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 23,
"path": "/sandbox/core/search_indexes.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nfrom celery_haystack.indexes import CelerySearchIndex\nfrom django.utils import timezone\nfrom haystack import indexes\n\nfrom .models import Note\n\n\nclass NoteIndex(CelerySearchIndex, indexes.Indexable):\n text = indexes.CharField(document=True, use_template=False)\n\n title = indexes.CharField(model_attr='title')\n author = indexes.CharField(model_attr='author')\n\n created = indexes.DateTimeField(model_attr='created')\n updated = indexes.DateTimeField(model_attr='updated')\n\n def get_model(self):\n return Note\n\n def index_queryset(self, using=None):\n return self.get_model().objects.filter(updated__lte=timezone.now())\n"
},
{
"alpha_fraction": 0.6812842488288879,
"alphanum_fraction": 0.6891151070594788,
"avg_line_length": 29.404762268066406,
"blob_id": "21a07022c90be8f84d038bd62017fb534a706dfc",
"content_id": "bc188a6fe26a4d75f4bb79dd1c6a4ce915899cab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1297,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 42,
"path": "/sandbox/store/views.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import render\nfrom django.views.generic.edit import CreateView\nfrom haystack.query import SearchQuerySet\n\nfrom core.models import Note\nfrom core.search_indexes import NoteIndex\n\nfrom .tasks import add\n\n\ndef hello(request):\n result = add.delay(3, 8)\n while not result.ready():\n print 'hoge'\n print result.get()\n d = {'from_hello_view': 'From Hello View'}\n return render(request, 'store/hello.html', d)\n\n\ndef insert_note(request):\n note1 = Note.objects.create(title=u'タイトル', author=u'著者', content=u'内容')\n NoteIndex().update_object(note1)\n note2 = Note.objects.create(title='title1', author='author1', content='content1')\n NoteIndex().update_object(note2)\n d = {'from_hello_view': 'From Hello View'}\n return render(request, 'store/hello.html', d)\n\n\nclass NoteView(CreateView):\n model = Note\n template_name = 'store/note.html'\n success_url = reverse_lazy('note')\n\n def get_context_data(self, **kwargs):\n context = super(NoteView, self).get_context_data(**kwargs)\n context['notes'] = Note.objects.all()\n context['query'] = SearchQuerySet().models(Note).filter(title=u'定食')\n return context\nnote = NoteView.as_view()\n"
},
{
"alpha_fraction": 0.692967414855957,
"alphanum_fraction": 0.6946826577186584,
"avg_line_length": 26.761905670166016,
"blob_id": "1d1472000c5c8d8329fcf0f6c6b6d1109562fb32",
"content_id": "838a464c2ddda1ae0177e54bb67d97a1c44532f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 583,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 21,
"path": "/sandbox/core/middleware.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nfrom django.core.urlresolvers import resolve\n\n\nclass JsonSchemaValidateMiddleware(object):\n\n def process_request(self, request):\n print request.path_info\n resolver_match = resolve(request.path_info)\n print resolver_match.url_name\n print resolver_match.func\n print resolver_match.view_name\n print resolver_match.namespace\n print resolver_match.namespaces\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n pass\n\n def process_response(self, request, response):\n return response\n"
},
{
"alpha_fraction": 0.6534047722816467,
"alphanum_fraction": 0.661055862903595,
"avg_line_length": 31.674999237060547,
"blob_id": "69f8d4ead1e689d0bc8d5fceb5ff65d538939a5c",
"content_id": "132c5421f6f0f6a6722a5a8dbe7fc75f3252317f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1323,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 40,
"path": "/sandbox/api/tests.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nfrom django.test import TestCase\nimport jsonschema\nimport simplejson as json\n\nfrom . import jsonschemas\n\n\nclass JsonSchemaTestCase(TestCase):\n def assertSchema(self, schema, content):\n try:\n jsonschema.validate(json.loads(content), schema)\n except json.JSONDecodeError as e:\n # json 形式が不正の場合\n self.fail(e.message)\n except jsonschema.ValidationError as e:\n if e.validator == 'required':\n self.fail(e.message)\n else:\n self.fail(e.message)\n\n\nclass NotesTest(JsonSchemaTestCase):\n def test_schema(self):\n response = self.client.get('/api/notes')\n self.assertEqual(response.status_code, 200)\n self.assertSchema(jsonschemas.notes_response, response.content)\n self.assertEqual(response['Content-Type'], 'application/json')\n\n\nclass Notes2Test(JsonSchemaTestCase):\n def test_schema(self):\n response = self.client.get('/api/notes2')\n self.assertEqual(response.status_code, 200)\n self.assertSchema(jsonschemas.notes2_response, response.content)\n self.assertEqual(response['Content-Type'], 'application/json')\n # print response.content\n # data = json.loads(response.content)\n # jsonschema.validate(data, schema)\n"
},
{
"alpha_fraction": 0.603960394859314,
"alphanum_fraction": 0.603960394859314,
"avg_line_length": 24.25,
"blob_id": "73992354fd5a64f33da028093d8125a267bc16c6",
"content_id": "6a14e70bba6effb22f87679d861efe449c0bca8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 202,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 8,
"path": "/sandbox/store/urls.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import patterns, url\n\nurlpatterns = patterns(\n 'store.views',\n url(r'^hello$', 'hello'),\n url(r'^note$', 'note', name='note'),\n url(r'^insert-note$', 'insert_note'),\n)\n"
},
{
"alpha_fraction": 0.6913580298423767,
"alphanum_fraction": 0.6954732537269592,
"avg_line_length": 23.299999237060547,
"blob_id": "fdb9d1f9f335e34bca8c8cba450b8a9c20735ca2",
"content_id": "9d87deab6047c8fe221af9a1c1a5d17fc4f8a620",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 486,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 20,
"path": "/sandbox/api/urls.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include, patterns, url\nfrom rest_framework import viewsets, routers\n\nfrom core.models import Note\n\n\nclass NoteViewSet(viewsets.ModelViewSet):\n model = Note\n\nrouter = routers.DefaultRouter()\nrouter.register(r'notes', NoteViewSet)\n\nurlpatterns = patterns(\n 'api.views',\n url('^notes$', 'notes'),\n url('^notes2$', 'notes2'),\n\n url('^rest/', include(router.urls)),\n url('^auth/', include('rest_framework.urls', namespace='rest_framework')),\n)\n"
},
{
"alpha_fraction": 0.5267605781555176,
"alphanum_fraction": 0.5360563397407532,
"avg_line_length": 28.58333396911621,
"blob_id": "40037bad642835ba37029faf913516e131c12baa",
"content_id": "a745f54194d59da02561021b43ee88f36753f710",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3578,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 120,
"path": "/sandbox/settings/_base.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# 全アプリ、全環境に共通の設定\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'rest_framework',\n\n 'haystack',\n 'elasticstack',\n 'celery_haystack',\n 'core',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'core.middleware.JsonSchemaValidateMiddleware',\n)\n\nREST_FRAMEWORK = {\n # Use hyperlinked styles by default.\n # Only used if the `serializer_class` attribute is not set on a view.\n 'DEFAULT_MODEL_SERIALIZER_CLASS':\n 'rest_framework.serializers.HyperlinkedModelSerializer',\n\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ]\n}\n\nWSGI_APPLICATION = 'core.wsgi.application'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\nLANGUAGE_CODE = 'ja'\nTIME_ZONE = 'Asia/Tokyo'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\nSTATIC_URL = '/static/'\n\n# haystack\nHAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor'\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'elasticstack.backends.ConfigurableElasticSearchEngine',\n 'URL': '127.0.0.1:9200',\n 'INDEX_NAME': 'sandbox',\n },\n}\nELASTICSEARCH_DEFAULT_ANALYZER = 'kuromoji_analyzer'\nELASTICSEARCH_INDEX_SETTINGS = {\n 'settings': {\n \"analysis\": {\n \"analyzer\": {\n \"kuromoji_analyzer\": {\n \"type\": \"custom\",\n \"tokenizer\": \"kuromoji_tokenizer\",\n \"filter\": \"lowercase\",\n },\n \"ngram_analyzer\": {\n \"type\": \"custom\",\n \"tokenizer\": \"lowercase\",\n \"filter\": [\"haystack_ngram\"],\n },\n \"edgengram_analyzer\": {\n \"type\": \"custom\",\n \"tokenizer\": \"lowercase\",\n \"filter\": [\"haystack_edgengram\"],\n }\n },\n \"tokenizer\": {\n \"haystack_ngram_tokenizer\": {\n \"type\": \"nGram\",\n \"min_gram\": 3,\n \"max_gram\": 15,\n },\n \"haystack_edgengram_tokenizer\": {\n \"type\": \"edgeNGram\",\n \"min_gram\": 2,\n \"max_gram\": 15,\n \"side\": \"front\",\n },\n },\n \"filter\": {\n \"haystack_ngram\": {\n \"type\": \"nGram\",\n \"min_gram\": 3,\n \"max_gram\": 15,\n },\n \"haystack_edgengram\": {\n \"type\": \"edgeNGram\",\n \"min_gram\": 5,\n \"max_gram\": 15,\n },\n },\n },\n },\n}\n\n# For tests\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\nNOSE_ARGS = [\n '--cover-html',\n '--with-coverage',\n '--cover-package=core,store',\n]\n"
},
{
"alpha_fraction": 0.700564980506897,
"alphanum_fraction": 0.7062146663665771,
"avg_line_length": 24.285715103149414,
"blob_id": "eda89bb24e5a95af9f130682dcb50c2f549b0a8e",
"content_id": "c34ba53f57b4db9cc730300a0f491f6bbcbd69e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 7,
"path": "/sandbox/back/views.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\nfrom django.shortcuts import render\n\n\ndef hello(request):\n d = {'back_hello_string': 'HELLO BACK APPLICATION'}\n return render(request, 'back/hello.html', d)\n"
},
{
"alpha_fraction": 0.6755319237709045,
"alphanum_fraction": 0.6968085169792175,
"avg_line_length": 25.85714340209961,
"blob_id": "1262adeed63b70f396d76b43f0abfa633d892c2c",
"content_id": "c495ff16c085ad893adc5e7122f8d600330e08f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 752,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 28,
"path": "/sandbox/core/models.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nfrom django.db import models\n\n\nclass Note(models.Model):\n title = models.CharField(max_length=255, null=False, blank=False)\n author = models.CharField(max_length=255, null=False, blank=False)\n content = models.TextField(null=False, blank=False)\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n\nclass Company(models.Model):\n class Meta:\n db_table = 'company'\n name = models.CharField(max_length=100)\n\n\nclass Staff(models.Model):\n class Meta:\n db_table = 'staff'\n name = models.CharField(max_length=100)\n belong = models.OneToOneField('Company')\n # belong = models.ForeignKey('Company')\n\n company_name = models.CharField(max_length=100)\n"
},
{
"alpha_fraction": 0.5530726313591003,
"alphanum_fraction": 0.5642458200454712,
"avg_line_length": 13.916666984558105,
"blob_id": "8e122fbb379caa66da2db5c0e45209fde8231403",
"content_id": "bd36d09eeb3c530dd6f51fb3f83929fc5afe645e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 185,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 12,
"path": "/sandbox/core/templates/store/hello.html",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "{% extends \"store/_base.html\" %}\n\n{% block title %}Store | Hello{% endblock title %}\n\n{% block main %}\n\n<h2>Hello ページ</h2>\n<p>\n {{ from_hello_view }}\n</p>\n\n{% endblock main %}\n"
},
{
"alpha_fraction": 0.6338672637939453,
"alphanum_fraction": 0.6544622182846069,
"avg_line_length": 19.809524536132812,
"blob_id": "ddc7a1e725abbe7163763917eebec2b99a0995a9",
"content_id": "d3f9f7dee8e39ded1a65796ec35b50a5c5faf9da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 437,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 21,
"path": "/sandbox/store/tests/test_01.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nfrom django.test import TestCase\n\nfrom core.factories import StaffFactory\n\n\nclass FactoryBoyTest(TestCase):\n def setUp(self):\n pass\n\n def test_factory(self):\n staff1 = StaffFactory()\n print staff1.name\n print staff1.belong.name\n print staff1.company_name\n\n staff2 = StaffFactory()\n print staff2.name\n print staff2.belong.name\n print staff2.company_name\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7019230723381042,
"avg_line_length": 25,
"blob_id": "a22963ccca4f4097fac0146dc0398c27f8e66fea",
"content_id": "df73bdf1827d616eb06bd6785eacc444de2f0d30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 4,
"path": "/sandbox/settings/store_stg.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# 表側アプリケーションの、ステージング環境用の設定\nfrom ._store_base import * # NOQA\nfrom ._stg import * # NOQA\n"
},
{
"alpha_fraction": 0.7177914381027222,
"alphanum_fraction": 0.7239263653755188,
"avg_line_length": 22.285715103149414,
"blob_id": "639591ecda086100a50ad1b8ab108d5c266c1d8b",
"content_id": "e94f9ce1b009ed2d49e8a47ff179485143befec4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/sandbox/settings/store_dev.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# 表側アプリケーションの、開発環境用の設定\nfrom ._store_base import * # NOQA\nfrom ._dev import * # NOQA\n\n# .dev で定義されている追加分を追加する\nINSTALLED_APPS += INSTALLED_APPS_PLUS\n"
},
{
"alpha_fraction": 0.6515151262283325,
"alphanum_fraction": 0.6590909361839294,
"avg_line_length": 17.85714340209961,
"blob_id": "c42fd688e577df3fcfa5ecdcf46824d10cc4f0ff",
"content_id": "0efe1d864e87cac0b2ecc9d2439ac0dac958058d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/sandbox/back/urls.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns(\n 'back.views',\n url(r'^hello$', 'hello'),\n)\n"
},
{
"alpha_fraction": 0.5775400996208191,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 16,
"blob_id": "bcc674eb6be8af9d353621eb151ed497ceae8c58",
"content_id": "8c02169236e79ec20cf1e44242aba13099b27926",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 11,
"path": "/sandbox/settings/_store_base.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# storeアプリケーション(表側)に共通の設定\nfrom ._base import * # NOQA\n\nROOT_URLCONF = 'core.store_urls'\n\nSESSION_COOKIE_AGE = 60 * 60 * 24 * 30 # 30日間\n\nINSTALLED_APPS += (\n 'store',\n)\n"
},
{
"alpha_fraction": 0.5898617506027222,
"alphanum_fraction": 0.6082949042320251,
"avg_line_length": 17.869565963745117,
"blob_id": "a8a24db74d526bfa55c280a7bb3eb0a4b0e814b1",
"content_id": "3e894ca83328f7f0192619f1fd0a01005dd4ded0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 434,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 23,
"path": "/sandbox/api/views.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n\nfrom django.http import JsonResponse\n\n\ndef notes(request):\n content = {\n 'hoge': 'fuga',\n 'mado': [1, 3, 5],\n }\n return JsonResponse(content)\n\nfrom django.http import HttpResponse\nimport simplejson as json\n\n\ndef notes2(request):\n content = {\n 'hoge': 'fuga',\n 'mado': [1, 3, 5],\n }\n body = json.dumps(content)\n return HttpResponse(body, content_type='application/json')\n"
},
{
"alpha_fraction": 0.678205132484436,
"alphanum_fraction": 0.6820513010025024,
"avg_line_length": 21.285715103149414,
"blob_id": "e32ab5fa6375cf5b668ba2a2fe2fa2e5a10d6efc",
"content_id": "579b41da95a0bcee622473d3eb22266051480d52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 846,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 35,
"path": "/sandbox/settings/_dev.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# 開発用の環境で共通の設定\n# base系の後で import * されるので、上書きをする挙動になる\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'n+i_fly3y8v%(hgp#n(9h3@brw6qjiae)$gauqd)mee1t3dp1u'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\nINSTALLED_APPS_PLUS = (\n 'debug_toolbar',\n)\n\n\n# Celery\nBROKER_URL = 'redis://localhost'\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_BACKEND = 'redis'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_ACCEPT_CONTENT = ['json']\n"
},
{
"alpha_fraction": 0.694915235042572,
"alphanum_fraction": 0.6991525292396545,
"avg_line_length": 18.66666603088379,
"blob_id": "9f7a1c77e421bf759fdbd6265b2a28597117a792",
"content_id": "576e4d3011ec51eaddbb234f04088a64cc76ccd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 12,
"path": "/sandbox/settings/_back_base.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# backアプリケーション(管理用)に共通の設定\nfrom ._base import * # NOQA\n\nROOT_URLCONF = 'core.back_urls'\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True # 管理アプリではブラウザ閉じるとセッション期限切れにする\n\nINSTALLED_APPS += (\n 'django.contrib.admin',\n 'back',\n)\n"
},
{
"alpha_fraction": 0.7222222089767456,
"alphanum_fraction": 0.7312312126159668,
"avg_line_length": 27.95652198791504,
"blob_id": "ee4aa7b10824146913e85d2e8ff1d0815a06bbc2",
"content_id": "994e76864891189be59dbcefb653c3be4d0833a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 682,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 23,
"path": "/sandbox/core/factories.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\nimport string\n\nimport factory\nfrom factory.fuzzy import FuzzyText\n\nfrom core.models import Company, Staff\n\n\nclass CompanyFactory(factory.DjangoModelFactory):\n FACTORY_FOR = Company\n id = factory.Sequence(lambda n: n)\n name = FuzzyText(prefix='1499', length=9, chars=string.digits)\n # name = factory.LazyAttribute(lambda o: '会社名{}'.format(o.id))\n\n\nclass StaffFactory(factory.DjangoModelFactory):\n FACTORY_FOR = Staff\n id = factory.Sequence(lambda n: n)\n name = factory.LazyAttribute(lambda o: 'スタッフ名{}'.format(o.id))\n belong = factory.SubFactory(CompanyFactory)\n\n company_name = factory.LazyAttribute(lambda o: o.belong.name)\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7019230723381042,
"avg_line_length": 25,
"blob_id": "59a2448d452f18dfde788de1aa4b206a7e2a07d1",
"content_id": "e0477b9503b8b4bf9c0c5c84ee2f6adeb646bc20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 4,
"path": "/sandbox/settings/back_stg.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# 管理用アプリケーションの、ステージング環境用の設定\nfrom ._back_base import * # NOQA\nfrom ._stg import * # NOQA\n"
},
{
"alpha_fraction": 0.7177914381027222,
"alphanum_fraction": 0.7239263653755188,
"avg_line_length": 22.285715103149414,
"blob_id": "e1da04f4a808b9beacbc000041ad2f6fc20882ac",
"content_id": "6b9a9226d7ef75fe4bab3539bac40c4645c20737",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/sandbox/settings/back_dev.py",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "# coding=utf8\n# 管理用アプリケーションの、開発環境用の設定\nfrom ._back_base import * # NOQA\nfrom ._dev import * # NOQA\n\n# .dev で定義されている追加分を追加する\nINSTALLED_APPS += INSTALLED_APPS_PLUS\n"
},
{
"alpha_fraction": 0.7174887657165527,
"alphanum_fraction": 0.7443946003913879,
"avg_line_length": 17.58333396911621,
"blob_id": "c26283191b3b868e3cd7897f949fdd3312a79568",
"content_id": "09dd796ec70c5e975175db2bb863e52704ea17a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 223,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 12,
"path": "/vagrant/nginx/Dockerfile",
"repo_name": "seizans/sandbox-django",
"src_encoding": "UTF-8",
"text": "FROM ubuntu:14.04\n\nRUN apt-get update\nRUN apt-get install -y nginx\nRUN echo \"\\ndaemon off;\" >> /etc/nginx/nginx.conf\n\nVOLUME /etc/nginx/sites-enabled\nVOLUME /var/log/nginx\n\nWORKDIR /etc/nginx\nEXPOSE 80\nENTRYPOINT [\"nginx\"]\n"
}
] | 26 |
zhm/minisqlite | https://github.com/zhm/minisqlite | 6fcbeb53bbc1fe88235610b2b1b04e607bafc93f | 898acce752c8ce15c611b16f604b386808b98019 | c8eb0556bb1ae5603b278dba1faf2e937cf2e976 | refs/heads/master | 2023-01-25T01:06:40.844488 | 2021-07-29T04:02:51 | 2021-07-29T04:02:51 | 85,376,909 | 2 | 0 | BSD-3-Clause | 2017-03-18T04:58:51 | 2021-07-29T04:02:57 | 2023-01-07T04:13:29 | C++ | [
{
"alpha_fraction": 0.48843416571617126,
"alphanum_fraction": 0.4964412748813629,
"avg_line_length": 26.414634704589844,
"blob_id": "6682df4ed852d5e8a6aa1c5df9cc687742d01298",
"content_id": "43b85aa4b72720257eaa801ed4d4023643f30e60",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1124,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 41,
"path": "/binding.gyp",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "{\n \"targets\": [\n {\n \"target_name\": \"minisqlite\",\n \"sources\": [ \"addon.cc\",\n \"database.cc\",\n \"statement.cc\",\n \"open-worker.cc\",\n \"deps/sqlite3.c\" ],\n \"include_dirs\": [\n \"<!(node -e \\\"require('nan')\\\")\",\n \"./deps\",\n\t\t\t],\n \"defines\": [\n 'SQLITE_THREADSAFE=1',\n 'SQLITE_ENABLE_FTS3',\n 'SQLITE_ENABLE_FTS3_PARENTHESIS',\n 'SQLITE_ENABLE_FTS4',\n 'SQLITE_ENABLE_FTS5',\n 'SQLITE_ENABLE_JSON1',\n 'SQLITE_ENABLE_RTREE',\n 'SQLITE_ENABLE_GEOPOLY',\n 'SQLITE_ENABLE_SESSION',\n 'SQLITE_ENABLE_DBPAGE_VTAB',\n 'SQLITE_ENABLE_DBSTAT_VTAB',\n 'SQLITE_ENABLE_BYTECODE_VTAB',\n 'SQLITE_ENABLE_SNAPSHOT',\n 'SQLITE_ENABLE_COLUMN_METADATA',\n 'SQLITE_ENABLE_NULL_TRIM',\n 'SQLITE_ENABLE_SESSION',\n 'SQLITE_ENABLE_PREUPDATE_HOOK',\n 'SQLITE_ENABLE_STAT4',\n 'SQLITE_ENABLE_MATH_FUNCTIONS',\n 'SQLITE_ENABLE_LOAD_EXTENSION=1',\n 'GPKG_HAVE_CONFIG_H'\n ],\n 'conditions': [\n ]\n }\n ],\n}\n"
},
{
"alpha_fraction": 0.7293577790260315,
"alphanum_fraction": 0.747706413269043,
"avg_line_length": 15.769230842590332,
"blob_id": "67c34b416f8fd6177e6acc5007d84a82fe54596d",
"content_id": "952c6b0d9c9604a732b8fcc5110c39e676b6e4b6",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 218,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 13,
"path": "/README.md",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "# minisqlite\n\n# SQLite Version\n\n## 3.36.0\n\nThe main features are user-defined JS functions and pull-based processing so the fetching can be fully coordinated from JS.\n\n## Installation\n\n```sh\nnpm install minisqlite\n```\n"
},
{
"alpha_fraction": 0.7564102411270142,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 25,
"blob_id": "ff9d1eea86299f41b37d5e82ecaa3efb7a23f7d5",
"content_id": "4c42077fb636e7f2a0ff01e562e2355b021d4589",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 78,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 3,
"path": "/deps/sqlite3rc.h",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#ifndef SQLITE_RESOURCE_VERSION\n#define SQLITE_RESOURCE_VERSION 3,36,0\n#endif\n"
},
{
"alpha_fraction": 0.701438844203949,
"alphanum_fraction": 0.701438844203949,
"avg_line_length": 38.71428680419922,
"blob_id": "705cfc886ed7b8fefc6c604e2585907cd16b5745",
"content_id": "5aed0f657071c2a45b0f06fc30c86d5b7154bbfd",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 278,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 7,
"path": "/scripts/publish.sh",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "if [ -z \"$TARGET\" ]; then\n ./node_modules/.bin/node-pre-gyp package\n ./node_modules/.bin/node-pre-gyp publish\nelse\n ./node_modules/.bin/node-pre-gyp package --runtime=$RUNTIME --target=$TARGET\n ./node_modules/.bin/node-pre-gyp publish --runtime=$RUNTIME --target=$TARGET\nfi\n"
},
{
"alpha_fraction": 0.570588231086731,
"alphanum_fraction": 0.5841176509857178,
"avg_line_length": 18.988235473632812,
"blob_id": "b9411ffaa54a78a8e694b3b798224c1dffa004d6",
"content_id": "e0bc62ddb648ae79a692d83273d2999f1cad001a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1700,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 85,
"path": "/bench/sqlite3-bench.js",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "import sqlite3 from 'sqlite3';\nimport fs from 'fs';\nimport path from 'path';\nimport rimraf from 'rimraf';\n\nconst DATABASE_PATH = 'test2.db';\n\n// rimraf.sync(DATABASE_PATH);\n\nlet db = null;\n\nconst createDatabase = () => {\n return new Promise((resolve, reject) => {\n db = new sqlite3.Database(DATABASE_PATH, (err) => {\n if (err) {\n return reject(err);\n }\n return resolve();\n });\n });\n};\n\nconst execSQL = (client, command, callback) => {\n return new Promise((resolve, reject) => {\n const statement = db.prepare(command);\n\n statement.run();\n\n statement.finalize((err) => {\n if (err) {\n return reject(err);\n }\n return resolve();\n });\n });\n};\n\nconst ITERATIONS = 1000000;\n\nasync function runTest() {\n const client = await createDatabase();\n\n const setup = 'CREATE TABLE test_table (t1 TEXT, t2 INTEGER);';\n await execSQL(client, setup);\n\n const insert = \"INSERT INTO test_table (t1, t2) SELECT 'test',\";\n\n await execSQL(client, 'BEGIN');\n for (let i = 0; i < ITERATIONS; ++i) {\n await execSQL(client, insert + i);\n }\n await execSQL(client, 'COMMIT');\n\n db.close();\n}\n\nasync function runReadTest() {\n const now = new Date().getTime();\n\n await createDatabase();\n\n // await runTest();\n\n const select = \"SELECT * FROM test_table limit 1000000\";\n\n db.each(select, (err, row) => {\n // console.log(row);\n }, () => {\n console.log('TIME', new Date().getTime() - now);\n db.close();\n });\n\n}\n\nrunReadTest().then(() => {\n console.log('done now');\n}).catch((err) => {\n console.error('error', err);\n});\n\n// runTest().then(() => {\n// console.log('done now');\n// }).catch((err) => {\n// console.error('error', err);\n// });\n\n"
},
{
"alpha_fraction": 0.6774193644523621,
"alphanum_fraction": 0.6935483813285828,
"avg_line_length": 30,
"blob_id": "54e4f50a9613f4d27b22b6b6cbd3816f5b99e666",
"content_id": "fa968a7a01d1e754efe9448a259995e205c35528",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 62,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 2,
"path": "/bench/sqlite3.js",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "require('babel-core/register')();\nrequire('./sqlite3-bench');\n"
},
{
"alpha_fraction": 0.6504641175270081,
"alphanum_fraction": 0.6634098887443542,
"avg_line_length": 28.453237533569336,
"blob_id": "0c930fe4b967b6f5468f1b236a3d153c9b6e97c2",
"content_id": "cc3c0519965ee104b351e97b0a6d96925232c45a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8188,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 278,
"path": "/database.cc",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#include \"database.h\"\n#include \"statement.h\"\n#include \"open-worker.h\"\n\n#include <iostream>\n\nNan::Persistent<v8::Function> Database::constructor;\n\nDatabase::Database() : db_(nullptr), statements_() {\n}\n\nDatabase::~Database() {\n Close();\n}\n\nvoid Database::Init(v8::Local<v8::Object> exports) {\n Nan::HandleScope scope;\n\n v8::Local<v8::FunctionTemplate> tpl = Nan::New<v8::FunctionTemplate>(New);\n tpl->SetClassName(Nan::New(\"Database\").ToLocalChecked());\n tpl->InstanceTemplate()->SetInternalFieldCount(1);\n\n Nan::SetPrototypeMethod(tpl, \"open\", Open);\n Nan::SetPrototypeMethod(tpl, \"lastError\", LastError);\n Nan::SetPrototypeMethod(tpl, \"lastInsertID\", LastInsertID);\n Nan::SetPrototypeMethod(tpl, \"close\", Close);\n Nan::SetPrototypeMethod(tpl, \"createFunction\", CreateFunction);\n\n auto function = Nan::GetFunction(tpl).ToLocalChecked();\n\n constructor.Reset(function);\n\n Nan::Set(exports, Nan::New(\"Database\").ToLocalChecked(), function);\n\n /* sqlite3_auto_extension((void(*)(void))sqlite3_gpkg_init); */\n}\n\nNAN_METHOD(Database::New) {\n if (info.IsConstructCall()) {\n Database *obj = new Database();\n obj->Wrap(info.This());\n info.GetReturnValue().Set(info.This());\n } else {\n const int argc = 1;\n v8::Local<v8::Value> argv[argc] = { info[0] };\n v8::Local<v8::Function> cons = Nan::New<v8::Function>(constructor);\n info.GetReturnValue().Set(Nan::NewInstance(cons, argc, argv).ToLocalChecked());\n }\n}\n\nNAN_METHOD(Database::Open) {\n std::string connectionString = *Nan::Utf8String(info[0]);\n int flags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_FULLMUTEX;\n std::string vfs = \"\";\n\n if (info.Length() >= 1 && info[1]->IsInt32()) {\n flags = Nan::To<int>(info[1]).FromJust();\n }\n\n if (info.Length() >= 2 && info[2]->IsString()) {\n vfs = *Nan::Utf8String(info[2]);\n }\n\n Database* database = ObjectWrap::Unwrap<Database>(info.Holder());\n\n Nan::Callback *callback = new Nan::Callback(info[3].As<v8::Function>());\n\n Nan::AsyncQueueWorker(new OpenWorker(callback, database, connectionString, flags, vfs));\n\n info.GetReturnValue().Set(info.This());\n}\n\nNAN_METHOD(Database::Close) {\n Database* database = ObjectWrap::Unwrap<Database>(info.Holder());\n\n while (!database->statements_.empty()) {\n database->statements_.at(0)->Finalize();\n }\n\n database->Close();\n\n info.GetReturnValue().Set(info.This());\n}\n\nNAN_METHOD(Database::LastError) {\n Database* database = ObjectWrap::Unwrap<Database>(info.Holder());\n\n if (database->lastErrorMessage_.empty()) {\n info.GetReturnValue().SetNull();\n }\n else {\n auto errorObject = Nan::New<v8::Object>();\n\n for (auto const &entry : database->lastError_) {\n auto &key = entry.first;\n auto &value = entry.second;\n\n if (!value.empty()) {\n Nan::Set(errorObject, Nan::New(key).ToLocalChecked(),\n Nan::New(entry.second).ToLocalChecked());\n }\n else {\n Nan::Set(errorObject, Nan::New(key).ToLocalChecked(),\n Nan::Null());\n }\n }\n\n info.GetReturnValue().Set(errorObject);\n }\n}\n\nNAN_METHOD(Database::LastInsertID) {\n Database* database = ObjectWrap::Unwrap<Database>(info.Holder());\n\n auto id = Nan::New<v8::Number>(sqlite3_last_insert_rowid(database->db_));\n\n info.GetReturnValue().Set(id);\n}\n\nNAN_METHOD(Database::CreateFunction) {\n Database* database = ObjectWrap::Unwrap<Database>(info.Holder());\n\n std::string functionName = *Nan::Utf8String(info[0]);\n int numberOfArguments = Nan::To<int>(info[1]).FromJust();\n int textEncoding = Nan::To<int>(info[2]).FromJust();\n\n Nan::Callback *mainFunction =\n info[3]->IsFunction() ? new Nan::Callback(info[3].As<v8::Function>()) : nullptr;\n\n Nan::Callback *stepFunction =\n info[4]->IsFunction() ? new Nan::Callback(info[4].As<v8::Function>()) : nullptr;\n\n Nan::Callback *finalFunction =\n info[5]->IsFunction() ? new Nan::Callback(info[5].As<v8::Function>()) : nullptr;\n\n std::vector<Nan::Callback *> *callbacks = new std::vector<Nan::Callback *>();\n\n callbacks->push_back(mainFunction);\n callbacks->push_back(stepFunction);\n callbacks->push_back(finalFunction);\n\n auto result = sqlite3_create_function_v2(\n database->db_,\n functionName.c_str(),\n numberOfArguments,\n textEncoding,\n callbacks,\n mainFunction ? CustomFunctionMain : nullptr,\n stepFunction ? CustomFunctionStep : nullptr,\n finalFunction ? CustomFunctionFinal : nullptr,\n CustomFunctionDestroy\n );\n\n database->SetLastError(result);\n\n if (result != SQLITE_OK) {\n Nan::ThrowError(database->lastErrorMessage_.c_str());\n return;\n }\n\n auto id = Nan::New<v8::Number>(result);\n\n info.GetReturnValue().Set(id);\n}\n\nvoid Database::AddStatement(Statement *statement) {\n statements_.push_back(statement);\n}\n\nvoid Database::RemoveStatement(Statement *statement) {\n statements_.erase(std::remove(statements_.begin(), statements_.end(), statement), statements_.end());\n}\n\nvoid Database::CustomFunctionMain(sqlite3_context *context, int argc, sqlite3_value **argv) {\n std::vector<Nan::Callback *> *callbacks = (std::vector<Nan::Callback *> *)sqlite3_user_data(context);\n\n v8::Local<v8::Value> arguments[] = { Statement::ConvertValues(argc, argv) };\n\n auto result = Nan::Call(*callbacks->at(0), 1, arguments);\n\n SetResult(context, result.ToLocalChecked());\n}\n\nvoid Database::CustomFunctionStep(sqlite3_context *context, int argc, sqlite3_value **argv) {\n std::vector<Nan::Callback *> *callbacks = (std::vector<Nan::Callback *> *)sqlite3_user_data(context);\n\n AggregateContext *agg = static_cast<AggregateContext *>(sqlite3_aggregate_context(context, sizeof(AggregateContext)));\n\n if (!agg->context) {\n auto resultObject = Nan::New<v8::Object>();\n agg->context = new Nan::Persistent<v8::Object>(resultObject);\n }\n\n v8::Local<v8::Value> arguments[] = { Statement::ConvertValues(argc, argv), Nan::New(*agg->context) };\n\n Nan::Call(*callbacks->at(1), 2, arguments);\n}\n\nvoid Database::CustomFunctionFinal(sqlite3_context *context) {\n std::vector<Nan::Callback *> *callbacks = (std::vector<Nan::Callback *> *)sqlite3_user_data(context);\n\n AggregateContext *agg = static_cast<AggregateContext *>(sqlite3_aggregate_context(context, sizeof(AggregateContext)));\n\n v8::Local<v8::Value> arguments[] = { Nan::New(*agg->context) };\n\n auto result = Nan::Call(*callbacks->at(2), 1, arguments);\n\n if (agg->context) {\n agg->context->Reset();\n delete agg->context;\n }\n\n SetResult(context, result.ToLocalChecked());\n}\n\nvoid Database::CustomFunctionDestroy(void *pointer) {\n std::vector<Nan::Callback *> *callbacks = (std::vector<Nan::Callback *> *)pointer;\n\n if (callbacks->at(0)) {\n delete callbacks->at(0);\n }\n if (callbacks->at(1)) {\n delete callbacks->at(1);\n }\n if (callbacks->at(2)) {\n delete callbacks->at(2);\n }\n\n delete callbacks;\n}\n\nvoid Database::Close() {\n if (db_) {\n sqlite3_close(db_);\n db_ = nullptr;\n }\n}\n\nvoid Database::SetLastError(int code) {\n switch (code) {\n case SQLITE_OK:\n case SQLITE_ROW:\n lastErrorMessage_ = \"\";\n break;\n\n case SQLITE_MISUSE:\n lastErrorMessage_ = \"misuse\";\n break;\n\n default:\n lastErrorMessage_ = sqlite3_errmsg(db_);\n break;\n }\n\n if (lastErrorMessage_.empty()) {\n return;\n }\n\n lastError_[\"message\"] = lastErrorMessage_;\n}\n\nvoid Database::SetResult(sqlite3_context *context, v8::Local<v8::Value> result) {\n if (result->IsNumber()) {\n sqlite3_result_int64(context, (sqlite3_int64)Nan::To<int64_t>(result).FromJust());\n } else if (result->IsNull() || result->IsUndefined()) {\n sqlite3_result_null(context);\n } else if (result->IsString()) {\n sqlite3_result_text(context, *Nan::Utf8String(result), -1, SQLITE_TRANSIENT);\n } else if (result->IsBoolean()) {\n sqlite3_result_int64(context, (int)Nan::To<bool>(result).FromJust());\n } else if (node::Buffer::HasInstance(result)) {\n v8::Local<v8::Object> buffer = Nan::To<v8::Object>(result).ToLocalChecked();\n sqlite3_result_blob(context, node::Buffer::Data(buffer), node::Buffer::Length(buffer), SQLITE_TRANSIENT);\n } else {\n Nan::Utf8String value(result);\n sqlite3_result_text(context, *value, -1, SQLITE_TRANSIENT);\n }\n}\n"
},
{
"alpha_fraction": 0.7189189195632935,
"alphanum_fraction": 0.7243243455886841,
"avg_line_length": 15.818181991577148,
"blob_id": "adc10fbf360d608448edb41e70f10fb6089e5b57",
"content_id": "0c222bf5fad556c35b50a976abc5ec57095f61b5",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 185,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 11,
"path": "/addon.cc",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#include \"statement.h\"\n#include \"database.h\"\n\nusing v8::FunctionTemplate;\n\nNAN_MODULE_INIT(Init) {\n Database::Init(target);\n Statement::Init(target);\n}\n\nNODE_MODULE(minisqlite, Init)\n"
},
{
"alpha_fraction": 0.695732831954956,
"alphanum_fraction": 0.7077922224998474,
"avg_line_length": 16.387096405029297,
"blob_id": "9fdb37503b52aab9b3ee21b333c60519c03b1f01",
"content_id": "abf1a14f58e0ec97f299c6f7513b3e56540b9613",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1078,
"license_type": "permissive",
"max_line_length": 111,
"num_lines": 62,
"path": "/statement.h",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#ifndef __STATEMENT_H__\n#define __STATEMENT_H__\n\n#include <map>\n\n#include \"sqlite3.h\"\n#include \"database.h\"\n#include <nan.h>\n\nclass ConnectWorker;\n\nclass Statement : public Nan::ObjectWrap {\npublic:\n static void Init(v8::Local<v8::Object> exports);\n\n static v8::Local<v8::Object> ConvertValues(int count, sqlite3_value **values);\n\n void Finalize();\n\nprivate:\n explicit Statement();\n\n ~Statement();\n\n static NAN_METHOD(New);\n\n static NAN_METHOD(Query);\n\n static NAN_METHOD(GetResults);\n\n static NAN_METHOD(Close);\n\n static NAN_METHOD(IsFinished);\n\n static NAN_METHOD(CreateFunction);\n\n static Nan::Persistent<v8::Function> constructor;\n\n void Close();\n\n void CreateNextStatement();\n\n v8::Local<v8::Value> ProcessSingleResult(bool returnMetadata);\n\n static v8::Local<v8::Object> CreateResult(sqlite3_stmt *statement, bool includeValues, bool includeMetadata);\n\n Database *database_;\n\n sqlite3_stmt *statement_;\n\n std::string sql_;\n\n std::string lastErrorMessage_;\n\n std::map<std::string, std::string> lastError_;\n\n bool finished_;\n\n bool empty_;\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.5894384384155273,
"alphanum_fraction": 0.5979918241500854,
"avg_line_length": 20.685483932495117,
"blob_id": "75edb0a5dffdf530434aeb68ee09f8dee4c6ddde",
"content_id": "03822e52aaf02d04d4846623fea75dbb7a49adc7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2689,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 124,
"path": "/bench/minisqlite-bench.js",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "import { createPool } from '../src';\nimport fs from 'fs';\nimport path from 'path';\nimport rimraf from 'rimraf';\n\nconst db = 'bench/test1.db';\nconst sql = fs.readFileSync(path.join(__dirname, '../test/test.sql')).toString();\n\nconst pool = createPool({db: db});\n\nconst execSQL = (client, command, callback) => {\n return new Promise((resolve, reject) => {\n client.query(command).each((err, {finished, columns, values, index}) => {\n if (err) {\n return reject(err);\n }\n\n /* eslint-disable callback-return */\n if (callback) {\n callback(err, {finished, columns, values, index});\n }\n /* eslint-enable callback-return */\n\n if (finished) {\n return resolve();\n }\n });\n });\n};\n\nconst acquire = () => {\n return new Promise((resolve, reject) => {\n pool.acquire((err, client) => {\n if (err) {\n return reject(err);\n }\n\n resolve(client);\n });\n });\n};\n\nconst ITERATIONS = 1000000;\n\nasync function setupDatabase() {\n rimraf.sync(db);\n\n const client = await acquire();\n\n const setup = 'CREATE TABLE test_table (t1 TEXT, t2 INTEGER);';\n\n await execSQL(client, setup);\n\n const insert = \"INSERT INTO test_table (t1, t2) SELECT 'test',\";\n\n await execSQL(client, 'BEGIN');\n\n for (let i = 0; i < ITERATIONS; ++i) {\n await execSQL(client, insert + i);\n }\n\n await execSQL(client, 'COMMIT');\n\n pool.release(client);\n}\n\nasync function readAllRows() {\n const now = new Date().getTime();\n\n const client = await acquire();\n\n const select = \"SELECT * FROM test_table limit 1000000\";\n\n await execSQL(client, select, (err, finished, columns, values, index) => {\n // console.log(values);\n });\n\n console.log('TIME', new Date().getTime() - now);\n\n pool.release(client);\n}\n\nasync function customFunction() {\n const SUMCUSTOM = (args, context) => {\n context.result = (context.result || 0) + args[0];\n };\n\n const SUMCUSTOMFINAL = (context) => {\n return context.result;\n };\n\n const client = await acquire();\n\n client.createFunction('SUMCUSTOM', null, null, null, SUMCUSTOM, SUMCUSTOMFINAL);\n\n const now = new Date().getTime();\n\n const select = 'SELECT SUMCUSTOM(t2) FROM test_table';\n\n await execSQL(client, select, (err, {finished, columns, values, index}) => {\n });\n\n console.log('CUSTOMTIME', new Date().getTime() - now);\n\n pool.release(client);\n}\n\ncustomFunction().then(() => {\n console.log('done');\n pool.drain(() => {\n pool.destroyAllNow();\n });\n}).catch((err) => {\n console.error('error', err);\n});\n\n// runTest().then(() => {\n// console.log('done');\n// pool.drain(() => {\n// pool.destroyAllNow();\n// });\n// }).catch((err) => {\n// console.error('error', err);\n// });\n"
},
{
"alpha_fraction": 0.65742027759552,
"alphanum_fraction": 0.662968099117279,
"avg_line_length": 24.75,
"blob_id": "84ffc954754e3e476c9b8247d19cf10ea94e6d47",
"content_id": "91f108bb6e624b79af8c861caafbbc09bde2c29a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 721,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 28,
"path": "/open-worker.cc",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#include \"open-worker.h\"\n\nOpenWorker::OpenWorker(Nan::Callback *callback, Database *database, std::string connectionString, int flags, std::string vfs)\n : AsyncWorker(callback),\n database_(database),\n connectionString_(connectionString),\n flags_(flags),\n vfs_(vfs)\n{}\n\nOpenWorker::~OpenWorker()\n{}\n\nvoid OpenWorker::Execute() {\n const char *vfs = vfs_.length() ? vfs_.c_str() : nullptr;\n\n int result = sqlite3_open_v2(connectionString_.c_str(), &database_->db_, flags_, vfs);\n\n database_->SetLastError(result);\n\n if (result != SQLITE_OK) {\n database_->Close();\n SetErrorMessage(database_->lastErrorMessage_.c_str());\n return;\n }\n\n sqlite3_enable_load_extension(database_->db_, 1);\n}\n"
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.644444465637207,
"avg_line_length": 21.5,
"blob_id": "6c00a5a9aa5890cecaa5741f75fd6048f4ab2ea9",
"content_id": "eec63cbeafce3f02c840f762563279b7daad86b7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 45,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 2,
"path": "/CHANGELOG.md",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "## 0.0.1 (December 20, 2015)\n* First version\n"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 18.33333396911621,
"blob_id": "e22d3e8794facc15a93719b4a57d469c055fb99d",
"content_id": "f30c812e3d1499fc21c3e09a2ed52313eb3f5428",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 232,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 12,
"path": "/test/test.sql",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "CREATE TABLE test_table (t1 TEXT, t2 REAL);\n\nINSERT INTO test_table (t1, t2)\nSELECT 'test1', 1;\n\nINSERT INTO test_table (t1, t2)\nSELECT 'test2', 2;\n\nINSERT INTO test_table (t1, t2)\nSELECT 'test3', 3;\n\nSELECT t1, t2 FROM test_table;\n"
},
{
"alpha_fraction": 0.6769383549690247,
"alphanum_fraction": 0.6938369870185852,
"avg_line_length": 26.16216278076172,
"blob_id": "1ef7f1865423204e9a8329a7e4d8ee94317bf8b9",
"content_id": "4090e4adde10b5c71f53b6644ca652e3e2763ff4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1006,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 37,
"path": "/scripts/install.sh",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "\n# using clang on linux causes\n# Error: /usr/lib/x86_64-linux-gnu/libstdc++.so.6: version `GLIBCXX_3.4.20' not found\n\nif [ $TRAVIS_OS_NAME == \"linux\" ]; then\n export CC=/usr/bin/gcc-4.8\n export CXX=/usr/bin/g++-4.8\n export npm_config_clang=0\nelse\n export CC=clang\n export CXX=clang++\n export npm_config_clang=1\nfi\n\nnvm unload || true\nrm -rf ./__nvm/ && git clone --depth 1 https://github.com/creationix/nvm.git ./__nvm\nsource ./__nvm/nvm.sh\nnvm install ${NODE_VERSION}\nnvm use ${NODE_VERSION}\nnode --version\nnpm --version\nwhich node\n\nif [ \"$RUNTIME\" == \"electron\" ]; then\n echo \"Building electron $TARGET\"\n export npm_config_target=$TARGET\n export npm_config_arch=$TARGET_ARCH\n export npm_config_target_arch=$TARGET_ARCH\n export npm_config_disturl=https://atom.io/download/electron\n export npm_config_runtime=electron\n export npm_config_build_from_source=true\nfi\n\nif [ -z \"$TARGET\" ]; then\n export TARGET=$(node -v | sed -e '1s/^.//')\nfi\n\nHOME=~/.electron-gyp npm install --build-from-source\n"
},
{
"alpha_fraction": 0.6983758807182312,
"alphanum_fraction": 0.6983758807182312,
"avg_line_length": 17.7391300201416,
"blob_id": "9bf1048ce4a19cb1cb86ee420197bc25041ed0ad",
"content_id": "2e0d2bd969745d50ae4a8d0f0f67c51caef758bf",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 431,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 23,
"path": "/open-worker.h",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#ifndef __OPEN_WORKER_H__\n#define __OPEN_WORKER_H__\n\n#include <nan.h>\n#include \"database.h\"\n\nclass OpenWorker : public Nan::AsyncWorker {\n public:\n OpenWorker(Nan::Callback *callback, Database *database, std::string connectionString, int flags, std::string vfs);\n\n virtual ~OpenWorker();\n\n void Execute() override;\n\n private:\n Database *database_;\n\n std::string connectionString_;\n int flags_;\n std::string vfs_;\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.7113702893257141,
"alphanum_fraction": 0.7223032116889954,
"avg_line_length": 18.323944091796875,
"blob_id": "33f2ef35291f056f75a680c4d6a53adf6346ea54",
"content_id": "8a25af21d0dad1c434eaba7455ad280f24328ef2",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1372,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 71,
"path": "/database.h",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#ifndef __CLIENT_H__\n#define __CLIENT_H__\n\n#include <map>\n\n#include \"sqlite3.h\"\n#include <nan.h>\n#include <vector>\n\nclass ConnectWorker;\nclass Statement;\n\nstruct AggregateContext {\n Nan::Persistent<v8::Object> *context;\n};\n\nclass Database : public Nan::ObjectWrap {\npublic:\n static void Init(v8::Local<v8::Object> exports);\n\n void SetLastError(int code);\n\n inline sqlite3 *GetDatabase() { return db_; };\n\n void AddStatement(Statement *statement);\n\n void RemoveStatement(Statement *statement);\n\nprivate:\n friend class OpenWorker;\n\n explicit Database();\n\n ~Database();\n\n static NAN_METHOD(New);\n\n static NAN_METHOD(Open);\n\n static NAN_METHOD(Close);\n\n static NAN_METHOD(CreateFunction);\n\n static NAN_METHOD(LastError);\n\n static NAN_METHOD(LastInsertID);\n\n static Nan::Persistent<v8::Function> constructor;\n\n static void CustomFunctionMain(sqlite3_context *context, int argc, sqlite3_value **argv);\n\n static void CustomFunctionStep(sqlite3_context *context, int argc, sqlite3_value **argv);\n\n static void CustomFunctionFinal(sqlite3_context *context);\n\n static void CustomFunctionDestroy(void *pointer);\n\n static void SetResult(sqlite3_context *context, v8::Local<v8::Value> result);\n\n void Close();\n\n std::string lastErrorMessage_;\n\n std::map<std::string, std::string> lastError_;\n\n sqlite3 *db_;\n\n std::vector<Statement *> statements_;\n};\n\n#endif\n"
},
{
"alpha_fraction": 0.6070603132247925,
"alphanum_fraction": 0.616174578666687,
"avg_line_length": 24.129032135009766,
"blob_id": "2e7107208df63e618a9a524be20d9484249a18fe",
"content_id": "d637124a0dfe665f9ce53d613816e57e3cea04c7",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7790,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 310,
"path": "/statement.cc",
"repo_name": "zhm/minisqlite",
"src_encoding": "UTF-8",
"text": "#include \"statement.h\"\n\n#include <iostream>\n\nstatic const int RESULT_BATCH_SIZE = 100;\n\nNan::Persistent<v8::Function> Statement::constructor;\n\nStatement::Statement() : database_(nullptr), statement_(nullptr), finished_(true), empty_(true) {\n}\n\nStatement::~Statement() {\n Close();\n}\n\nvoid Statement::Init(v8::Local<v8::Object> exports) {\n Nan::HandleScope scope;\n\n v8::Local<v8::FunctionTemplate> tpl = Nan::New<v8::FunctionTemplate>(New);\n tpl->SetClassName(Nan::New(\"Statement\").ToLocalChecked());\n tpl->InstanceTemplate()->SetInternalFieldCount(1);\n\n Nan::SetPrototypeMethod(tpl, \"query\", Query);\n Nan::SetPrototypeMethod(tpl, \"close\", Close);\n Nan::SetPrototypeMethod(tpl, \"getResults\", GetResults);\n Nan::SetPrototypeMethod(tpl, \"finished\", IsFinished);\n\n auto function = Nan::GetFunction(tpl).ToLocalChecked();\n\n constructor.Reset(function);\n\n Nan::Set(exports, Nan::New(\"Statement\").ToLocalChecked(), function);\n}\n\nNAN_METHOD(Statement::New) {\n if (info.IsConstructCall()) {\n Statement *obj = new Statement();\n obj->Wrap(info.This());\n info.GetReturnValue().Set(info.This());\n } else {\n const int argc = 1;\n v8::Local<v8::Value> argv[argc] = { info[0] };\n v8::Local<v8::Function> cons = Nan::New<v8::Function>(constructor);\n info.GetReturnValue().Set(Nan::NewInstance(cons, argc, argv).ToLocalChecked());\n }\n}\n\nNAN_METHOD(Statement::Close) {\n Statement* statement = ObjectWrap::Unwrap<Statement>(info.Holder());\n\n statement->Close();\n\n info.GetReturnValue().Set(info.This());\n}\n\nNAN_METHOD(Statement::IsFinished) {\n Statement* statement = ObjectWrap::Unwrap<Statement>(info.Holder());\n\n info.GetReturnValue().Set(Nan::New(statement->finished_));\n}\n\nNAN_METHOD(Statement::Query) {\n Statement* statement = ObjectWrap::Unwrap<Statement>(info.Holder());\n\n Database* db = ObjectWrap::Unwrap<Database>(info[0].As<v8::Object>());\n Nan::Utf8String commandText(info[1]);\n\n statement->database_ = db;\n statement->finished_ = false;\n statement->empty_ = true;\n statement->sql_ = *commandText;\n\n statement->CreateNextStatement();\n\n info.GetReturnValue().Set(info.This());\n}\n\nNAN_METHOD(Statement::GetResults) {\n Statement* statement = ObjectWrap::Unwrap<Statement>(info.Holder());\n\n bool returnMetadata = false;\n\n if (!info[0]->IsUndefined()) {\n returnMetadata = Nan::To<bool>(info[0]).FromMaybe(false);\n }\n\n auto results = Nan::New<v8::Array>();\n\n int index = 0;\n\n while (true) {\n auto result = statement->ProcessSingleResult(returnMetadata && index == 0);\n\n if (statement->finished_) {\n break;\n }\n\n Nan::Set(results, index, result);\n\n ++index;\n\n if (index >= RESULT_BATCH_SIZE || statement->empty_) {\n break;\n }\n }\n\n info.GetReturnValue().Set(results);\n}\n\nv8::Local<v8::Value> Statement::ProcessSingleResult(bool returnMetadata) {\n empty_ = true;\n\n if (!statement_) {\n finished_ = true;\n return Nan::Null();\n }\n\n int code = sqlite3_step(statement_);\n\n database_->SetLastError(code);\n\n switch (code) {\n case SQLITE_BUSY: {\n empty_ = true;\n return Nan::Null();\n break;\n }\n\n case SQLITE_DONE: {\n empty_ = true;\n\n auto resultObject = CreateResult(statement_, false, returnMetadata);\n\n CreateNextStatement();\n\n return resultObject;\n break;\n }\n\n case SQLITE_ROW: {\n empty_ = false;\n\n auto resultObject = CreateResult(statement_, true, returnMetadata);\n\n return resultObject;\n break;\n }\n\n default: {\n // SQLITE_ERROR\n // SQLITE_MISUSE\n empty_ = true;\n Finalize();\n return Nan::Null();\n break;\n }\n }\n}\n\nvoid Statement::Finalize() {\n if (statement_) {\n sqlite3_finalize(statement_);\n statement_ = nullptr;\n database_->RemoveStatement(this);\n }\n}\n\nvoid Statement::CreateNextStatement() {\n Finalize();\n\n const char *rest = NULL;\n\n int result = sqlite3_prepare_v2(database_->GetDatabase(), sql_.c_str(), -1, &statement_, &rest);\n\n database_->SetLastError(result);\n\n if (result != SQLITE_OK) {\n sql_ = \"\";\n return;\n }\n\n if (rest) {\n sql_ = rest;\n }\n\n if (statement_) {\n sqlite3_reset(statement_);\n sqlite3_clear_bindings(statement_);\n database_->AddStatement(this);\n }\n}\n\nvoid Statement::Close() {\n Finalize();\n}\n\nv8::Local<v8::Object> Statement::CreateResult(sqlite3_stmt *statement, bool includeValues, bool includeMetadata) {\n int fieldCount = sqlite3_column_count(statement);\n\n auto resultObject = Nan::New<v8::Object>();\n auto columns = Nan::New<v8::Array>();\n auto values = Nan::New<v8::Array>();\n\n for (int i = 0; i < fieldCount; ++i) {\n if (includeMetadata) {\n auto column = Nan::New<v8::Object>();\n\n const char *columnName = sqlite3_column_name(statement, i);\n int columnType = sqlite3_column_type(statement, i);\n const char *columnTable = sqlite3_column_table_name(statement, i);\n int columnNumber = i + 1;\n\n if (columnName) {\n Nan::Set(column, Nan::New(\"name\").ToLocalChecked(),\n Nan::New(columnName).ToLocalChecked());\n } else {\n Nan::Set(column, Nan::New(\"name\").ToLocalChecked(), Nan::Null());\n }\n\n if (columnTable) {\n Nan::Set(column, Nan::New(\"table\").ToLocalChecked(),\n Nan::New(columnTable).ToLocalChecked());\n } else {\n Nan::Set(column, Nan::New(\"table\").ToLocalChecked(), Nan::Null());\n }\n\n Nan::Set(column, Nan::New(\"column\").ToLocalChecked(),\n Nan::New(columnNumber));\n\n Nan::Set(column, Nan::New(\"type\").ToLocalChecked(),\n Nan::New(columnType));\n\n Nan::Set(columns, i, column);\n }\n\n if (includeValues) {\n int columnType = sqlite3_column_type(statement, i);\n\n switch (columnType) {\n case SQLITE_NULL:\n Nan::Set(values, i, Nan::Null());\n break;\n\n case SQLITE_TEXT:\n Nan::Set(values, i, Nan::New((const char *)sqlite3_column_text(statement, i)).ToLocalChecked());\n break;\n\n case SQLITE_FLOAT:\n Nan::Set(values, i, Nan::New(sqlite3_column_double(statement, i)));\n break;\n\n case SQLITE_INTEGER:\n Nan::Set(values, i, Nan::New<v8::Number>(sqlite3_column_int64(statement, i)));\n break;\n\n case SQLITE_BLOB:\n const void *data = sqlite3_column_blob(statement, i);\n int size = sqlite3_column_bytes(statement, i);\n Nan::Set(values, i, Nan::CopyBuffer((char *)data, size).ToLocalChecked());\n break;\n }\n }\n }\n\n if (includeMetadata) {\n Nan::Set(resultObject, Nan::New(\"columns\").ToLocalChecked(), columns);\n }\n\n if (includeValues) {\n Nan::Set(resultObject, Nan::New(\"values\").ToLocalChecked(), values);\n }\n\n return resultObject;\n}\n\nv8::Local<v8::Object> Statement::ConvertValues(int count, sqlite3_value **values) {\n auto resultObject = Nan::New<v8::Array>();\n\n for (int i = 0; i < count; ++i) {\n auto value = values[i];\n\n int valueType = sqlite3_value_type(value);\n\n switch (valueType) {\n case SQLITE_NULL:\n Nan::Set(resultObject, i, Nan::Null());\n break;\n\n case SQLITE_TEXT:\n Nan::Set(resultObject, i, Nan::New((const char *)sqlite3_value_text(value)).ToLocalChecked());\n break;\n\n case SQLITE_FLOAT:\n Nan::Set(resultObject, i, Nan::New(sqlite3_value_double(value)));\n break;\n\n case SQLITE_INTEGER:\n Nan::Set(resultObject, i, Nan::New<v8::Number>(sqlite3_value_int64(value)));\n break;\n\n case SQLITE_BLOB:\n const void *data = sqlite3_value_blob(value);\n int size = sqlite3_value_bytes(value);\n Nan::Set(resultObject, i, Nan::CopyBuffer((char *)data, size).ToLocalChecked());\n break;\n }\n }\n\n return resultObject;\n}\n"
}
] | 17 |
Dofreeshare/GenProfiler | https://github.com/Dofreeshare/GenProfiler | 69c42b8264cc3c0cc447d4359b4937deec8a4e56 | aaf2345df97ae4d2e615ce4fffb7d1e24e994108 | 9865cc0e5b2474c3d0b7aaad9bc332edc3cdc130 | refs/heads/master | 2020-04-11T09:32:46.371870 | 2019-01-07T23:16:35 | 2019-02-17T19:28:08 | 161,682,011 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5801641941070557,
"alphanum_fraction": 0.5853500366210938,
"avg_line_length": 29.060401916503906,
"blob_id": "64d122cb382c9a378145bb9861be8460ac89b8ff",
"content_id": "37b82e0d598ba1c2d36bcd3d5862c2dc3e9c14d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4628,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 149,
"path": "/profiler.py",
"repo_name": "Dofreeshare/GenProfiler",
"src_encoding": "UTF-8",
"text": "import os\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import *\r\n\r\nimport sqlite3\r\nfrom sqlite3 import Error, IntegrityError\r\n\r\nimport re\r\n\r\nimport sys, getopt\r\nimport getpass\r\n\r\nimport urllib\r\n\r\nimport time\r\n\r\nfrom subhamangal import *\r\nfrom common import *\r\n\r\nimport vedicmaratha as vedicm\r\nimport TAJ as TAJ\r\nimport common\r\n\r\nhelp_banner = \"\"\"python profiler.py -[option]\r\n\r\n-h : for this banner\r\n-a : Program will request you for Username and Password\r\n-t : Text file having user name and password in it\r\n Please make sure its name is Credentials.txt\r\n\"\"\"\r\n\r\noption_banner = \"\"\" Please select the options to collect the data.\r\nOptions are as follows:\r\n1 : Use list.txt file to collect the Data\r\n2 : Collect all data from bridal list present on site\r\n3 : Use it for Guna update using list.txt file\r\n\"\"\"\r\n\r\nlogin_ID = None\r\nlogin_pass = None\r\nlogin_page = None\r\nage_from = \"1989\"\r\nage_to = \"1993\"\r\n\r\n\r\ndef GetCredentialsFromFile():\r\n ID = None\r\n password = None\r\n page = None\r\n \r\n if (os.path.isfile('Credentials.txt') & os.path.exists('Credentials.txt')):\r\n # Open the existing DB connection \r\n try:\r\n with open(\"Credentials.txt\", \"r\") as fd:\r\n ID = fd.readline()\r\n password = fd.readline()\r\n page = fd.readline()\r\n \r\n except IOError:\r\n print(\"Error in opening Credentials.txt\\n\")\r\n else:\r\n print(\"Credentials.txt doen't exist\\n\")\r\n \r\n return ID, password, page\r\n\r\n \r\ndef kill_popup(browser):\r\n \r\n try:\r\n browser.find_element_by_xpath('//img[@alt=\"banner_popup_2\"]/preceding::button[@data-dismiss=\"modal\"]').click()\r\n except NoSuchElementException:\r\n print (\"Seems like no popup to face\\n\")\r\n\r\n\r\n\r\n \r\ndef main(argv):\r\n \r\n global login_ID\r\n global login_pass\r\n global login_page\r\n \r\n try:\r\n opts, remaining = getopt.getopt(argv, \"hat\")\r\n except getopt.GetoptError:\r\n print (help_banner)\r\n \r\n if (len(opts) == 0):\r\n print (help_banner)\r\n sys.exit()\r\n else:\r\n for o,a in opts:\r\n if o in ('-h'):\r\n print (help_banner)\r\n if o in ('-a'):\r\n login_ID = raw_input(\"Username:\")\r\n login_pass = getpass.getpass(\"Password for \" + login_ID + \":\")\r\n login_page = raw_input(\"Enter Login page:\")\r\n if o in ('-t'):\r\n (login_ID, login_pass, login_page) = GetCredentialsFromFile()\r\n \r\n profiler_level_string = raw_input(option_banner)\r\n try:\r\n profiler_level = int(profiler_level_string)\r\n if (profiler_level < 1 or profiler_level > 3):\r\n profiler_level = 1\r\n except ValueError:\r\n profiler_level = 1\r\n print (\"Seems like you have given incorrect option taking options forcefully as 1\\n\")\r\n pass\r\n \r\n# sys.exit()\r\n \r\n with Browser() as br, DBDriver(common.file_name, common.db_schema) as DB:\r\n \r\n host_name = (re.findall('(?<=www.).+(?=.com)', login_page))[0]\r\n \r\n print host_name\r\n \r\n if (not (os.path.exists('snaps/' + host_name))):\r\n os.mkdir('snaps/' + host_name)\r\n \r\n #Collect the First Stage of data\r\n if (profiler_level == 1):\r\n print (\"Collecting only primary info\\n\")\r\n if (host_name == 'vedicmaratha'):\r\n vedicm.NagivateToDashBoard(login_ID, login_pass, login_page, br.browser)\r\n vedicm.CollectAllQuickSearch(br.browser, DB.conn)\r\n elif (host_name == 'tumchaaamchajamla'):\r\n TAJ.NagivateToDashBoard(login_ID, login_pass, login_page, br.browser)\r\n TAJ.CollectAllQuickSearch(br.browser, DB.conn)\r\n elif(profiler_level == 2):\r\n print (\"Checking full completion support\\n\")\r\n if (host_name == 'vedicmaratha'):\r\n print (\"Feature not supported in Vedic maratha\\n\")\r\n elif (host_name == 'tumchaaamchajamla'):\r\n print (\"Collecting all Data\\n\")\r\n TAJ.NagivateToDashBoard(login_ID, login_pass, login_page, br.browser)\r\n TAJ.CollectDetailedInformation(br.browser, DB.conn)\r\n# Update_Guna(DB.conn)\r\n \r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n"
},
{
"alpha_fraction": 0.5031428933143616,
"alphanum_fraction": 0.5153612494468689,
"avg_line_length": 33.04703140258789,
"blob_id": "cfe37679bce33e9263fb40919e3fada008fb9439",
"content_id": "fc022a6715c9368377fb63ad332de5e90afc3d7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14159,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 404,
"path": "/vedicmaratha.py",
"repo_name": "Dofreeshare/GenProfiler",
"src_encoding": "UTF-8",
"text": "# import os\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import *\r\n\r\nimport sqlite3\r\nfrom sqlite3 import Error, IntegrityError\r\n\r\nimport re\r\n\r\nfrom datetime import datetime\r\n\r\nimport urllib\r\n\r\n# import time\r\n# from _socket import timeout\r\n# from pstats import browser\r\n\r\nfile_name = \"VedicMaratha.db\"\r\ndb_schema = '''CREATE TABLE IF NOT EXISTS candidate (\r\n id text PRIMARY KEY,\r\n l_name text,\r\n cast text,\r\n height real,\r\n reg_date text,\r\n edu text,\r\n prof text,\r\n income int,\r\n dob text,\r\n time text,\r\n place text,\r\n rashi text,\r\n nak text)'''\r\n\r\ndef CreateNewDB():\r\n print (\"Creating New DB\\n\")\r\n try:\r\n conn = sqlite3.connect(\"VedicMaratha.db\")\r\n except Error as e:\r\n print(e)\r\n else:\r\n c = conn.cursor()\r\n c.execute('''CREATE TABLE IF NOT EXISTS candidate (\r\n id text PRIMARY KEY,\r\n l_name text,\r\n cast text,\r\n height real,\r\n reg_date text,\r\n edu text,\r\n prof text,\r\n income int,\r\n dob text,\r\n time text,\r\n place text,\r\n rashi text,\r\n nak text)''')\r\n return conn\r\n# finally:\r\n# conn.close()\r\n\r\ndef GetProfielPic(browser, xpath, candidate_id):\r\n try:\r\n cand_img = browser.find_element_by_xpath(xpath).get_attribute(\"src\")\r\n except NoSuchElementException:\r\n print (\"Unable to get profile pic\\n\")\r\n pass\r\n else:\r\n file_name = \"snaps\\\\vedicmaratha\\\\\" + candidate_id + '.jpeg'\r\n# print cand_img\r\n urllib.urlretrieve(cand_img, file_name)\r\n\r\ndef GetLastName(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text.encode('ascii','ignore')\r\n str2 = re.findall('(?<=\\().+(?=\\))', str1)[0].title()\r\n return str2.title()\r\n \r\ndef GetCast(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n return str1.title()\r\n else:\r\n return None\r\n \r\ndef GetHeight(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n str2 = re.findall('(?= )?\\d', str1)[0]\r\n str3 = re.findall('(?= )?\\d', str1)[1]\r\n \r\n return float(str2 +'.'+ str3)\r\n else:\r\n return None\r\n \r\ndef GetRegDate(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (str1 != '--'):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n str1 = str1.replace('-','/')\r\n return str1\r\n else:\r\n return None\r\n \r\n \r\ndef GetEdu(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore').title()\r\n return str1\r\n else:\r\n return None\r\n\r\ndef GetProf(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore').title()\r\n return str1\r\n else:\r\n return None\r\n \r\ndef GetInc(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n if (str1 == 'Monthly' or str1 == 'Yearly'):\r\n return None\r\n else:\r\n str1 = str1.replace('Monthly', '')\r\n str1 = str1.replace(' Yearly', '')\r\n str1 = str1.replace('Rs.', '')\r\n str1 = str1.replace(',', '')\r\n try:\r\n income = int(str1)\r\n except ValueError:\r\n print \"Unable to conver %s into integer\\n\" %(str1)\r\n pass\r\n return None\r\n else:\r\n return income\r\n else:\r\n return None\r\n \r\ndef GetDOB(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (str1 != '--'):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n str1 = str1.replace('-','/')\r\n else:\r\n return None\r\n \r\ndef GetTime(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n time_lst = str1.split('-')\r\n try: \r\n if (time_lst[0].isdigit() and time_lst[1].isdigit() and time_lst[-1] in ('AM', 'PM')):\r\n # hr = int(re.findall('\\d\\d(?=-)', str1)[0])\r\n # mins = int(re.findall('\\d\\d(?=-)', str1)[1])\r\n \r\n hr = int(time_lst[0])\r\n mins = int(time_lst[1])\r\n \r\n if (time_lst[-1] == 'PM'):\r\n hr = hr + 12\r\n return \"{:02}:{:02}\".format(hr, mins)\r\n else:\r\n return None\r\n except IndexError:\r\n pass\r\n return None\r\n else:\r\n return None\r\n \r\ndef GetPlace(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore').title()\r\n return str1\r\n else:\r\n return None\r\n \r\ndef GetRashi(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.replace('Rashi / Nakshatra / Charan / Nadi / Gan: ','')\r\n token = str1.split(',')\r\n if (token[0] != ' '):\r\n return None\r\n else:\r\n return token[0]\r\n else:\r\n return None\r\n \r\ndef GetNak(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.replace('Rashi / Nakshatra / Charan / Nadi / Gan: ','')\r\n token = str1.split(',')\r\n if (token[1] != ' '):\r\n return None\r\n else:\r\n return token[1]\r\n else:\r\n return None\r\n \r\n \r\ndef StoreCandidateData(browser, c, candidate_id):\r\n \r\n l_name = None \r\n cast = None\r\n height = None\r\n reg_date = None\r\n edu = None\r\n prof = None\r\n income = None\r\n dob = None\r\n time = None\r\n place = None\r\n rashi = None\r\n nak = None\r\n \r\n try:\r\n \r\n #Wait for login box to load\r\n WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//table[@class=\"profile\"]')))\r\n# time.sleep(3)\r\n GetProfielPic(browser, '//img[contains(@src, \"photoprocess.php\")]', candidate_id)\r\n l_name = GetLastName(browser, '(//td)[29]')\r\n cast = GetCast(browser, '(//td[@class=\"data\"])[6]')\r\n height = GetHeight(browser, '(//td[@class=\"data\"])[8]')\r\n reg_date = GetRegDate(browser, '(//td[@class=\"data\"])[28]')\r\n edu = GetEdu(browser, '(//td[@class=\"data\"])[30]')\r\n prof = GetProf(browser, '(//td[@class=\"data\"])[32]')\r\n income = GetInc(browser, '(//td[@class=\"data\"])[36]')\r\n dob = GetDOB(browser, '(//td[@class=\"data\"])[2]')\r\n time = GetTime(browser, '(//td[@class=\"data\"])[38]')\r\n place = GetPlace(browser, '(//td[@class=\"data\"])[42]')\r\n rashi = GetRashi(browser, '(//td[@class=\"data\"])[45]')\r\n nak = GetNak(browser, '(//td[@class=\"data\"])[45]')\r\n \r\n ProfLink = browser.current_url\r\n now = datetime.now()\r\n ProfCreatTime = now.strftime(\"%d/%m/%Y\")\r\n \r\n complete_det = \"{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}\".format(candidate_id, l_name, cast, height, reg_date, edu, prof, income, dob, time, place, rashi, nak)\r\n \r\n print \"%s\\n\" %(complete_det)\r\n \r\n try:\r\n c.execute(\"insert into PrimData (ProfId, LName, Height, Edu, Prof, Income) values (?, ?, ?, ?, ?, ?)\", (candidate_id, l_name, height, edu, prof, income))\r\n \r\n c.execute(\"insert into CastData (ProfId, SCast) values (?, ?)\", (candidate_id, cast))\r\n \r\n c.execute(\"insert into BirData (ProfId, BTime, BDate, MSign, Naksh) values (?, ?, ?, ?, ?)\", (candidate_id, time, dob, rashi, nak))\r\n \r\n c.execute(\"insert into BirLocData (ProfId, City) values (?, ?)\", (candidate_id, place))\r\n \r\n c.execute(\"insert into Metdata (ProfId, ProfRef, ProfLink, ProfCreatTime) values (?, ?, ?, ?)\", (candidate_id, \"Vedic Maratha\", ProfLink, ProfCreatTime))\r\n\r\n except IntegrityError:\r\n print (\"Unable to update the database\\n\")\r\n \r\n except NoSuchElementException as e:\r\n print (\"StoreCandidateData : Unable to save the details because no such element\\n\")\r\n print e\r\n\r\n except TimeoutException:\r\n print (\"StoreCandidateData : Unable to load the profile\\n\")\r\n\r\n\r\ndef found_window(window_title):\r\n\r\n def predicate(driver):\r\n for handle in driver.window_handles:\r\n try: driver.switch_to_window(handle)\r\n except NoSuchWindowException:\r\n return False\r\n else:\r\n if (driver.title == window_title):\r\n return True # found window\r\n else:\r\n continue\r\n\r\n return predicate\r\n\r\n\r\ndef CollectTenCandidatesData(browser, c):\r\n try:\r\n # We are now on new page of the candidates\r\n curr_candidate_xpath = \"//a[contains(@href, 'profile_by_public')]\"\r\n candidate_id_array = browser.find_elements_by_xpath(curr_candidate_xpath)\r\n \r\n for candidate_id in candidate_id_array:\r\n cand_id = candidate_id.text.strip().encode('ascii','ignore')\r\n# print \"Value of cand id is %s\" %(cand_id)\r\n# temp_var = (cand_id,)\r\n c.execute(\"select exists (select PrimData.ProfId from PrimData where PrimData.ProfId = ?)\", (cand_id,))\r\n if (c.fetchone() == (0,)):\r\n #New candiate data found save it \r\n main_window = browser.current_window_handle\r\n \r\n candidate_id.click()\r\n# browser.implicitly_wait(30) \r\n# time.sleep(3) \r\n# print (\"Number of windows opened %d\\n \") %(len(browser.window_handles))\r\n\r\n WebDriverWait(browser, 10, 2).until(found_window('Member Profile'))\r\n StoreCandidateData(browser, c, cand_id)\r\n browser.close()\r\n browser.switch_to_window(main_window)\r\n \r\n\r\n #switch to new window\r\n# browser.switch_to_window(browser.window_handles[1])\r\n# browser.switch_to_window('')\r\n# print (\"Title of window %s\\n \") %browser.title\r\n\r\n# \r\n# for handle in browser.window_handles:\r\n# if (main_window != handle):\r\n# browser.switch_to_window(handle)\r\n# print (\"Switched to new window %s\\n \") %browser.title\r\n# StoreCandidateData(browser, c, cand_id)\r\n# browser.close()\r\n# browser.switch_to_window(main_window)\r\n \r\n else:\r\n print \"%s is already in DB\\n\" %(cand_id)\r\n except NoSuchElementException as e:\r\n print (\"CollectTenCandidatesData : Unable to find some elements\\n\")\r\n print (e)\r\n\r\n\r\ndef NagivateToDashBoard(login_ID, login_pass, login_page, browser):\r\n \r\n browser.get(login_page)\r\n try:\r\n #Wait for login box to load\r\n login_element = WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//table[@id=\"example\"]')))\r\n \r\n #Select Number of entries\r\n login_element = Select(browser.find_element_by_xpath('//select[@name=\"example_length\"]'))\r\n login_element.select_by_value('100')\r\n browser.implicitly_wait(5)\r\n \r\n #Wait for Dashboard to load\r\n# dash_board_element = WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//div[@class=\"cPageData\"]')))\r\n \r\n except NoSuchElementException:\r\n print (\"Unable to navigate to pavitra vivah dash board\\n\")\r\n\r\n except TimeoutException:\r\n print (\"Unable to use dash board due to time out\\n\")\r\n\r\n\r\ndef GetPageNum():\r\n print (\"GetPageNum\\n\")\r\n \r\ndef GoNextPage(browser):\r\n browser.find_element_by_xpath('//a[@class=\"paginate_button next\"]').click()\r\n \r\ndef ScrapLoadedPages(browser, conn):\r\n #Rotate through all the search result pages\r\n c = conn.cursor()\r\n \r\n while (True):\r\n try:\r\n \r\n #Wait for the results to load\r\n WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//table[@class=\"dataTable no-footer\"]')))\r\n \r\n CollectTenCandidatesData(browser, c)\r\n \r\n print (\"Moving on to Next page\\n\")\r\n GoNextPage(browser)\r\n \r\n except NoSuchElementException as e:\r\n print (\"Seems like we are on last page, saving changes to database\\n\")\r\n print e\r\n break\r\n \r\n# conn.commit()\r\n# c.close()\r\n\r\ndef CollectAllQuickSearch(browser, conn):\r\n \r\n ScrapLoadedPages(browser, conn)\r\n"
},
{
"alpha_fraction": 0.5906753540039062,
"alphanum_fraction": 0.604433000087738,
"avg_line_length": 45.42580795288086,
"blob_id": "0bb485a5790e0cc51dca3c1aba2f38db1d1c947e",
"content_id": "766e3588a16306eb6867e5c9739a8ae340550236",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14392,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 310,
"path": "/subhamangal.py",
"repo_name": "Dofreeshare/GenProfiler",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import *\n\nimport sqlite3\nfrom sqlite3 import Error, IntegrityError\n\nimport re\n\nimport sys, getopt\nimport getpass\n\nimport urllib\n\nimport time\n\nscript = \"\"\"\n var text = '';\n\n var childNodes = arguments[0].childNodes; // child nodes includes Element and Text Node\n\n childNodes.forEach(function(it, index)\n {\n if(it.nodeName.toUpperCase() === 'DIV') \n { // iterate until Element Node: hr\n text = childNodes[index+1].textContent; \n // get the text content of next Child Node of Element Node: hr\n }\n });\n return text;\n\"\"\"\n\ndef CreateNewDB():\n print (\"Creating New DB\\n\")\n try:\n conn = sqlite3.connect(\"Shunbhamangal.db\")\n except Error as e:\n print(e)\n else:\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS candidate (\n id text PRIMARY KEY,\n l_name text,\n height real,\n edu text,\n age int,\n income int,\n ocupation text)''')\n return conn\n# finally:\n# conn.close()\n\ndef StoreCandidateData(browser, c, curr_candidate_index, candidate_id, surname):\n \n l_name = None\n height = None\n edu = None\n age = None\n income_yearly = None\n profession = None\n \n try:\n \n l_name = surname.encode('ascii','ignore').title()\n# l_name = l_name.Title()\n# print l_name\n \n try:\n cand_img = browser.find_element_by_xpath('//div[@id=\"candidateBriefInformation\"]/div[@class=\"items\"]/div['+str(curr_candidate_index)+']/div/div/div/a').get_attribute(\"href\")\n except NoSuchElementException as e:\n print (\"Unable to get profile pic\\n\")\n else:\n# if (cand_img):\n file_name = \"snaps\\\\\" + candidate_id + '.jpeg'\n# print cand_img\n urllib.urlretrieve(cand_img, file_name)\n \n can_det_element = browser.find_element_by_xpath('//div[@id=\"candidateBriefInformation\"]/div[@class=\"items\"]/div['+str(curr_candidate_index)+']/div/div[2]')\n \n# raw_string_1 = browser.execute_script('return arguments[0].childNodes[1].textContent;', can_det_element).strip()\n# raw_string_2 = browser.execute_script('return arguments[0].childNodes[2].textContent;', can_det_element).strip()\n# raw_string_3 = browser.execute_script('return arguments[0].childNodes[3].textContent;', can_det_element).strip()\n# raw_string_4 = browser.execute_script('return arguments[0].childNodes[4].textContent;', can_det_element).strip()\n# raw_string_5 = browser.execute_script('return arguments[0].childNodes[5].textContent;', can_det_element).strip()\n# raw_string_6 = browser.execute_script('return arguments[0].childNodes[6].textContent;', can_det_element).strip()\n# raw_string_7 = browser.execute_script('return arguments[0].childNodes[7].textContent;', can_det_element).strip()\n# raw_string_8 = browser.execute_script('return arguments[0].childNodes[8].textContent;', can_det_element).strip()\n# raw_string_9 = browser.execute_script('return arguments[0].childNodes[9].textContent;', can_det_element).strip()\n# raw_string_10 = browser.execute_script('return arguments[0].childNodes[10].textContent;', can_det_element).strip()\n# raw_string_11 = browser.execute_script('return arguments[0].childNodes[11].textContent;', can_det_element).strip()\n# raw_string_12 = browser.execute_script('return arguments[0].childNodes[12].textContent;', can_det_element).strip()\n# raw_string_13 = browser.execute_script('return arguments[0].childNodes[13].textContent;', can_det_element).strip()\n# raw_string_14 = browser.execute_script('return arguments[0].childNodes[14].textContent;', can_det_element).strip()\n# raw_string_15 = browser.execute_script('return arguments[0].childNodes[15].textContent;', can_det_element).strip()\n# raw_string_16 = browser.execute_script('return arguments[0].childNodes[16].textContent;', can_det_element).strip()\n# raw_string_17 = browser.execute_script('return arguments[0].childNodes[17].textContent;', can_det_element).strip()\n# raw_string_18 = browser.execute_script('return arguments[0].childNodes[18].textContent;', can_det_element).strip()\n# \n# print \"Value at index 01 %s \" %(raw_string_1)\n# print \"Value at index 02 %s \" %(raw_string_2)\n# print \"Value at index 03 %s \" %(raw_string_3)\n# print \"Value at index 04 %s \" %(raw_string_4)\n# print \"Value at index 05 %s \" %(raw_string_5)\n# print \"Value at index 06 %s \" %(raw_string_6)\n# print \"Value at index 07 %s \" %(raw_string_7)\n# print \"Value at index 08 %s \" %(raw_string_8)\n# print \"Value at index 09 %s \" %(raw_string_9)\n# print \"Value at index 10 %s \" %(raw_string_10)\n# print \"Value at index 11 %s \" %(raw_string_11)\n# print \"Value at index 12 %s \" %(raw_string_12)\n# print \"Value at index 13 %s \" %(raw_string_13)\n# print \"Value at index 14 %s \" %(raw_string_14)\n# print \"Value at index 15 %s \" %(raw_string_15)\n# print \"Value at index 16 %s \" %(raw_string_16)\n# print \"Value at index 17 %s \" %(raw_string_17)\n# print \"Value at index 18 %s \" %(raw_string_18)\n \n# print \"\\n\\n\\n\\n\\n\\n\\n\\n\"\n \n # Edu\n raw_string = browser.execute_script('return arguments[0].childNodes[2].textContent;', can_det_element).strip()\n raw_string = raw_string.encode('ascii','ignore').strip().title()\n print \"Education Details is %s\" %(raw_string)\n edu = raw_string\n \n # Height\n raw_string = browser.execute_script('return arguments[0].childNodes[6].textContent;', can_det_element).strip()\n raw_string = raw_string.encode('ascii','ignore').strip()\n print \"Value of the salary is %s\" %(raw_string)\n height = float(raw_string)\n\n # Age\n raw_string = browser.execute_script('return arguments[0].childNodes[10].textContent;', can_det_element).strip()\n raw_string = raw_string.encode('ascii','ignore').strip().title()\n print \"Age of candiate is %s\" %(raw_string)\n age = int(raw_string)\n \n # Income\n raw_string = browser.execute_script('return arguments[0].childNodes[14].textContent;', can_det_element).strip()\n raw_string = raw_string.encode('ascii','ignore').strip().title()\n print \"Income is %s\" %(raw_string)\n income_yearly = int(raw_string)\n \n # Profession\n raw_string = browser.execute_script('return arguments[0].childNodes[18].textContent;', can_det_element).strip()\n raw_string = raw_string.encode('ascii','ignore').strip().title()\n print \"Profession is %s\" %(raw_string)\n profession = raw_string\n \n try:\n c.execute(\"insert into candidate (id, l_name, height, edu, age, income, ocupation) values (?, ?, ?, ?, ?, ?, ?)\", (candidate_id, l_name, height, edu, age, income_yearly, profession))\n\n except IntegrityError:\n print (\"Unable to update the database\\n\")\n \n except NoSuchElementException as e:\n print (\"StoreCandidateData : Unable to save the details because no such element\\n\")\n print e\n\n except TimeoutException:\n print (\"StoreCandidateData : Unable to load the profile\\n\")\n\ndef NagivateToDashBoard(login_ID, login_pass, login_page):\n \n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference(\"browser.privatebrowsing.autostart\", True)\n\n browser = webdriver.Firefox(firefox_profile=firefox_profile, executable_path=\"D:\\Portables\\geckodriver\")\n browser.get(login_page)\n try:\n #Wait for login box to load\n login_element = WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//div[@class=\"container\"]')))\n \n #Select Height\n login_element = Select(browser.find_element_by_xpath('//select[@name=\"height\"]'))\n login_element.select_by_value('N/A')\n browser.implicitly_wait(5)\n \n #Select Native\n login_element = Select(browser.find_element_by_xpath('//select[@name=\"native\"]'))\n login_element.select_by_value('N/A')\n browser.implicitly_wait(5)\n \n #Select Edu\n login_element = Select(browser.find_element_by_xpath('//select[@name=\"education\"]'))\n login_element.select_by_value('N/A')\n browser.implicitly_wait(5)\n \n #Select Marriage\n login_element = Select(browser.find_element_by_xpath('//select[@name=\"marriageType\"]'))\n login_element.select_by_value('Single')\n browser.implicitly_wait(5)\n \n #Select Gender\n login_element = Select(browser.find_element_by_xpath('//select[@name=\"sex\"]'))\n login_element.select_by_value('Female')\n browser.implicitly_wait(5)\n \n #Wait for Dashboard to load\n# dash_board_element = WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//div[@class=\"cPageData\"]')))\n return browser\n \n except NoSuchElementException:\n print (\"Unable to navigate to pavitra vivah dash board\\n\")\n return None\n except TimeoutException:\n print (\"Unable to use dash board due to time out\\n\")\n return None\n\ndef CollectTenCandidatesData(browser, c):\n curr_candidate_index = 1\n while(True):\n try:\n # We are now on new page of the candidates\n curr_candidate_xpath = '//div[@id=\"candidateBriefInformation\"]/div[@class=\"items\"]/div['+str(curr_candidate_index)+']/header'\n title_str = browser.find_element_by_xpath(curr_candidate_xpath).text\n title_str = title_str.strip()\n candidate_id = title_str.split(\" \")[0]\n surname = title_str.split(\" \")[-1]\n# print \"Found this as ID and surname %s %s\" %(candidate_id, surname)\n temp_var = (candidate_id,)\n c.execute(\"select exists (select candidate.id from candidate where candidate.id = ?)\", (temp_var))\n if (c.fetchone() == (0,)):\n #New candiate data found save it\n print \"Saving the data of %s\" %(candidate_id)\n StoreCandidateData(browser, c, curr_candidate_index, candidate_id, surname)\n else:\n print \"%s is already in DB\\n\" %(candidate_id)\n \n curr_candidate_index = curr_candidate_index + 1\n except NoSuchElementException as e:\n print (\"CollectTenCandidatesData : Unable to find some elements\\n\")\n print (e)\n break\n \ndef ScrapLoadedPages(browser, conn):\n #Rotate through all the search result pages\n c = conn.cursor()\n \n while(True):\n try:\n reload_attempt = 0\n next_attempt = 0\n #Wait for the results to load\n browser.implicitly_wait(10)\n dash_board_element = WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//div[@id=\"candidateBriefInformation\"]/div[@class=\"items\"]')))\n \n current_page = browser.find_element_by_xpath('//div[@id=\"candidateBriefInformation\"]/div[@class=\"pager\"]/ul/li[@class=\"page selected\"]').text\n print(\"Collecting Data on page \" + current_page)\n \n CollectTenCandidatesData(browser, c)\n conn.commit()\n \n print (\"Moving on to Next page\\n\")\n \n while (True):\n# browser.execute_script(\"document.getElementById('ctl00_ContentPlaceHolder1_lbtnNext1').click()\")\n browser.find_element_by_xpath('//div[@id=\"candidateBriefInformation\"]/div[@class=\"pager\"]/ul/li[@class=\"next\"]/a').click()\n browser.implicitly_wait(10)\n# time.sleep(10)\n# WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//span[@id=\"ctl00_ContentPlaceHolder1_dlsearch\"]')))\n next_page = browser.find_element_by_xpath('//div[@id=\"candidateBriefInformation\"]/div[@class=\"pager\"]/ul/li[@class=\"page selected\"]').text\n if (current_page == next_page):\n if (next_attempt < 5):\n print (\"Seems like unable to click the next button\\n\")\n next_attempt = next_attempt + 1\n browser.implicitly_wait(5)\n continue\n else:\n print (\"Unable to click the next after so many attempts trying refresh\\n\")\n browser.refresh()\n next_attempt = 0\n WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//div[@id=\"candidateBriefInformation\"]/div[@class=\"items\"]')))\n continue\n else:\n break\n# browser.find_element_by_xpath('//a[@id=\"ctl00_ContentPlaceHolder1_lbtnNext1\"]').click()\n \n except NoSuchElementException as e:\n print (\"Seems like we are on last page, saving changes to database\\n\")\n print e\n conn.commit()\n c.close()\n break\n except TimeoutException:\n #We try to reload the page and for particular amount of time and then press next\n reload_attempt = reload_attempt + 1\n if (reload_attempt == 3):\n print (\"Skipping this page and moving forward\\n\")\n temp_elem = browser.find_element_by_xpath('//div[@id=\"candidateBriefInformation\"]/div[@class=\"pager\"]/ul/li[@class=\"next\"]')\n temp_elem.click()\n else:\n print (\"Refreshing the current Page\\n\")\n browser.refresh()\n #Continue to the while loop\n continue\n \n \n conn.commit()\n c.close()\n\ndef CollectAllQuickSearch(browser, conn):\n \n ScrapLoadedPages(browser, conn)\n"
},
{
"alpha_fraction": 0.5558441281318665,
"alphanum_fraction": 0.5716391801834106,
"avg_line_length": 32.70000076293945,
"blob_id": "17c9d6c74ca2829189b6585c1115483f8db8fd99",
"content_id": "e361ea9630e74c0d8838abc9c0c8d4e41e5d66a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14245,
"license_type": "no_license",
"max_line_length": 148,
"num_lines": 410,
"path": "/common.py",
"repo_name": "Dofreeshare/GenProfiler",
"src_encoding": "UTF-8",
"text": "import os\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import *\r\n\r\nimport sqlite3\r\nfrom sqlite3 import Error, IntegrityError\r\n\r\nimport re\r\n\r\nimport sys, getopt\r\nimport getpass\r\n\r\nimport urllib\r\n\r\nimport time\r\n\r\nfrom subhamangal import *\r\n\r\nBROWSERLOCATION = 'D:\\Portables\\geckodriver'\r\n\r\nfile_name = \"Generic.db\"\r\ndb_schema = ''' CREATE TABLE IF NOT EXISTS \"PrimData\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE,\r\n \"FName\" varchar(20) NULL ,\r\n \"MName\" varchar(20) NULL ,\r\n \"LName\" varchar(20) NULL ,\r\n \"Height\" int NULL,\r\n \"Weight\" int NULL,\r\n \"Edu\" TEXT NULL,\r\n \"Prof\" TEXT NULL,\r\n \"Income\" int NULL\r\n);\r\n\r\n\r\nCREATE TABLE IF NOT EXISTS \"CastData\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE,\r\n \"MCast\" varchar(20) NULL ,\r\n \"SCast\" varchar(20) NULL ,\r\n \"Gotra\" varchar(20) NULL ,\r\n FOREIGN KEY (`ProfId`) REFERENCES `PrimData`(`ProfId`) ON delete cascade ON update no action\r\n);\r\n\r\n\r\nCREATE TABLE IF NOT EXISTS \"BirData\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE ,\r\n \"BTime\" varchar(20) NULL ,\r\n \"BDate\" varchar(20) NULL ,\r\n \"MSign\" varchar(20) NULL ,\r\n \"SSign\" varchar(20) NULL ,\r\n \"Naksh\" varchar(20) NULL ,\r\n \"Charan\" varchar(20) NULL ,\r\n \"Gan\" varchar(20) NULL ,\r\n \"Guna\" int NULL ,\r\n FOREIGN KEY (`ProfId`) REFERENCES `CastData`(`ProfId`) ON delete cascade ON update no action\r\n \r\n);\r\n\r\nCREATE TABLE IF NOT EXISTS \"CurLocData\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE,\r\n \"Country\" varchar(20) NULL ,\r\n \"State\" varchar(20) NULL ,\r\n \"Dist\" varchar(20) NULL ,\r\n \"Tal\" varchar(20) NULL ,\r\n \"City\" varchar(20) NULL ,\r\n \"Pincode\" varchar(20) NULL ,\r\n \"LAT\" REAL NULL ,\r\n \"LONG\" REAL NULL ,\r\n FOREIGN KEY (`ProfId`) REFERENCES `BirData`(`ProfId`) ON delete cascade ON update no action\r\n);\r\n\r\nCREATE TABLE IF NOT EXISTS \"BirLocData\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE ,\r\n \"Country\" varchar(20) NULL ,\r\n \"State\" varchar(20) NULL ,\r\n \"Dist\" varchar(20) NULL ,\r\n \"Tal\" varchar(20) NULL ,\r\n \"City\" varchar(20) NULL ,\r\n \"Pincode\" varchar(20) NULL ,\r\n \"LAT\" REAL NULL ,\r\n \"LONG\" REAL NULL ,\r\n FOREIGN KEY (`ProfId`) REFERENCES `CurLocData`(`ProfId`) ON delete cascade ON update no action\r\n);\r\n\r\nCREATE TABLE IF NOT EXISTS \"ParLocData\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE ,\r\n \"Country\" varchar(20) NULL ,\r\n \"State\" varchar(20) NULL ,\r\n \"Dist\" varchar(20) NULL ,\r\n \"Tal\" varchar(20) NULL ,\r\n \"City\" varchar(20) NULL ,\r\n \"Pincode\" varchar(20) NULL ,\r\n \"LAT\" REAL NULL ,\r\n \"LONG\" REAL NULL ,\r\n FOREIGN KEY (`ProfId`) REFERENCES `BirLocData`(`ProfId`) ON delete cascade ON update no action\r\n);\r\n\r\nCREATE TABLE IF NOT EXISTS \"AnsLocData\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE ,\r\n \"Country\" varchar(20) NULL ,\r\n \"State\" varchar(20) NULL ,\r\n \"Dist\" varchar(20) NULL ,\r\n \"Tal\" varchar(20) NULL ,\r\n \"City\" varchar(20) NULL ,\r\n \"Pincode\" varchar(20) NULL ,\r\n \"LAT\" REAL NULL ,\r\n \"LONG\" REAL NULL ,\r\n FOREIGN KEY (`ProfId`) REFERENCES `ParLocData`(`ProfId`) ON delete cascade ON update no action\r\n);\r\n\r\nCREATE TABLE IF NOT EXISTS \"Metdata\" (\r\n \"id\" INTEGER PRIMARY KEY AUTOINCREMENT ,\r\n \"ProfId\" varchar(20) UNIQUE ,\r\n \"ProfRef\" varchar(20) NULL,\r\n \"ProfLink\" text NULL ,\r\n \"ProfCreatTime\" varchar(20) NULL ,\r\n \"ProfUpdate\" varchar(20) NULL ,\r\n \"OtherData\" text NULL ,\r\n \"ProfileAccess\" int NULL ,\r\n FOREIGN KEY (`ProfId`) REFERENCES `AnsLocData`(`ProfId`) ON delete cascade ON update no action\r\n);'''\r\n\r\nclass Browser(object):\r\n \"\"\"Handles web browser\"\"\"\r\n def __init__(self):\r\n \"\"\"Class Initialization Function\"\"\"\r\n\r\n def __call__(self):\r\n \"\"\"Class call\"\"\"\r\n\r\n def startDriver(self,drive=\"firefox\"):\r\n \"\"\"Starts the driver\"\"\"\r\n #Make sure that the browser parameter is a string\r\n assert isinstance(drive,str)\r\n\r\n #Standardize the browser selection string\r\n drive = drive.lower().strip()\r\n #Start the browser\r\n if drive==\"firefox\":\r\n firefox_profile = webdriver.FirefoxProfile()\r\n firefox_profile.set_preference(\"browser.privatebrowsing.autostart\", True) \r\n \r\n self.browser = webdriver.Firefox(firefox_profile=firefox_profile, executable_path=BROWSERLOCATION)\r\n \r\n\r\n def closeDriver(self):\r\n \"\"\"Close the browser object\"\"\"\r\n #Try to close the browser\r\n try:\r\n self.browser.close()\r\n except Exception as e:\r\n print(\"Error closing the web browser: {}\".format(e))\r\n\r\n def getURL(self,url='www.google.com'):\r\n \"\"\"Retrieve the data from a url\"\"\"\r\n #Retrieve the data from the specified url\r\n data = self.browser.get(url)\r\n\r\n return data\r\n\r\n def __enter__(self):\r\n \"\"\"Set things up\"\"\"\r\n #Start the web driver\r\n self.startDriver()\r\n return self\r\n\r\n def __exit__(self, type, value, traceback):\r\n \"\"\"Tear things down\"\"\"\r\n #Close the webdriver\r\n# self.closeDriver()\r\n print (\"Close Browser by yourself\\n\")\r\n\r\nclass DBDriver(object):\r\n \"\"\"Handles web browser\"\"\"\r\n def __init__(self, file_name, db_schema):\r\n \"\"\"Class Initialization Function\"\"\"\r\n self.filename = file_name\r\n self.dbschema = db_schema\r\n\r\n def __call__(self):\r\n \"\"\"Class call\"\"\"\r\n\r\n def startDriver(self):\r\n \"\"\"Starts the driver\"\"\"\r\n try:\r\n self.conn = sqlite3.connect(self.filename)\r\n except Error as e:\r\n print(e)\r\n else:\r\n c = self.conn.cursor()\r\n c.executescript(self.dbschema)\r\n\r\n def closeDriver(self):\r\n \"\"\"Close the browser object\"\"\"\r\n #Try to close the browser\r\n self.conn.commit()\r\n self.conn.cursor().close() \r\n \r\n def __enter__(self):\r\n \"\"\"Set things up\"\"\"\r\n #Start the web driver\r\n self.startDriver()\r\n return self\r\n\r\n def __exit__(self, type, value, traceback):\r\n \"\"\"Tear things down\"\"\"\r\n #Close the webdriver\r\n self.closeDriver() \r\n\r\ndef Update_Guna(conn):\r\n \r\n if (os.path.isfile('List.txt') & os.path.exists('List.txt')):\r\n # Open the existing DB connection \r\n try:\r\n with open(\"List.txt\", \"r\") as fd:\r\n list_1 = fd.readlines()\r\n except IOError:\r\n print(\"Error in opening Credentials.txt\\n\")\r\n return\r\n else:\r\n print(\"List.txt doen't exist\\n\")\r\n return\r\n \r\n if (len(list_1) == 0):\r\n print(\"No data in list.txt\\n\")\r\n return\r\n \r\n list_1 = map(lambda s: s.strip(), list_1)\r\n \r\n print list_1\r\n \r\n c = conn.cursor()\r\n \r\n firefox_profile = webdriver.FirefoxProfile()\r\n firefox_profile.set_preference(\"browser.privatebrowsing.autostart\", True)\r\n\r\n browser = webdriver.Firefox(firefox_profile=firefox_profile)\r\n# browser.get('https://www.mpanchang.com/astrology/kundali-matching/')\r\n\r\n # Read the boys details\r\n try:\r\n c.execute(\"select candidate.f_name, candidate.dob, candidate.bir_time, candidate.bir_place from candidate where id = '1'\")\r\n except IntegrityError:\r\n print (\"Unable to read boys data from database\\n\")\r\n \r\n boy_data = c.fetchone()\r\n \r\n boy_f_name = \"He\"\r\n if (boy_data[0] != None):\r\n boy_f_name = boy_data[0]\r\n boy_date_list = boy_data[1].split('-')\r\n boy_time_list = boy_data[2].split(':')\r\n boy_bir_place = boy_data[3]\r\n \r\n # Desired values range is 01 to 12\r\n boy_month = boy_date_list[1]\r\n \r\n # Desired values range is 1 to 31\r\n boy_date = \"{0:1}\".format(int(boy_date_list[0]))\r\n \r\n # Desired values range is 2018 to 1918\r\n boy_year = boy_date_list[2]\r\n \r\n # Desired values range is 0 to 59\r\n boy_sec = \"{0:1}\".format(int(boy_time_list[2]))\r\n \r\n # Desired values range is 0 to 59\r\n boy_min = \"{0:1}\".format(int(boy_time_list[1]))\r\n \r\n if (int(boy_time_list[0]) > 12):\r\n boy_hour = str(int(boy_time_list[0]) - 12)\r\n # Desired values range is 0 to 12\r\n boy_hour = \"{0:1}\".format(int(boy_hour))\r\n boy_AM_PM = '02'\r\n else:\r\n boy_hour = \"{0:1}\".format(int(boy_time_list[0]))\r\n boy_AM_PM = '01'\r\n \r\n for ID in list_1:\r\n browser.get('https://www.drikpanchang.com/jyotisha/horoscope-match/horoscope-match.html')\r\n dash_board_element = WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//div[@id=\"dpBoyData\"]')))\r\n \r\n try:\r\n c.execute(\"select candidate.f_name, candidate.dob, candidate.bir_time, candidate.bir_place from candidate where id = ?\", (ID,))\r\n except IntegrityError:\r\n print (\"Unable to read girls database\\n\")\r\n \r\n candidate_data = c.fetchone()\r\n print candidate_data\r\n \r\n if ((candidate_data[1] == None) or (candidate_data[2] == None) or (candidate_data[3] == None)):\r\n print (\"Unable to get %s candidate data properly\\n\") %(ID)\r\n continue\r\n #Male Details\r\n browser.find_element_by_xpath('//input[@id=\"kmb-name\"]').send_keys(boy_f_name)\r\n \r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmb-month\"]'))\r\n input_fields.select_by_value(boy_month)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmb-day\"]'))\r\n input_fields.select_by_value(boy_date)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmb-year\"]'))\r\n input_fields.select_by_value(boy_year)\r\n \r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmb-hr\"]'))\r\n input_fields.select_by_value(boy_hour)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmb-min\"]'))\r\n input_fields.select_by_value(boy_min)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmb-sec\"]'))\r\n input_fields.select_by_value(boy_sec)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmb-ampm\"]'))\r\n input_fields.select_by_value(boy_AM_PM)\r\n \r\n browser.find_element_by_xpath('//input[@id=\"kmb-city\"]').send_keys(boy_bir_place)\r\n \r\n #female details\r\n f_name = \"She\"\r\n if (candidate_data[0] != None):\r\n f_name = candidate_data[0]\r\n date_list = candidate_data[1].split('-')\r\n time_list = candidate_data[2].split(':')\r\n bir_place = candidate_data[3]\r\n \r\n # Desired values range is 01 to 12\r\n month = date_list[1]\r\n \r\n # Desired values range is 1 to 31\r\n date = \"{0:1}\".format(int(date_list[0]))\r\n \r\n # Desired values range is 2018 to 1918\r\n year = date_list[2]\r\n \r\n # Desired values range is 0 to 59\r\n sec = \"{0:1}\".format(int(time_list[2]))\r\n \r\n # Desired values range is 0 to 59\r\n min = \"{0:1}\".format(int(time_list[1]))\r\n \r\n if (int(time_list[0]) > 12):\r\n hour = str(int(time_list[0]) - 12)\r\n # Desired values range is 0 to 12\r\n hour = \"{0:1}\".format(int(hour))\r\n AM_PM = '02'\r\n else:\r\n hour = \"{0:1}\".format(int(time_list[0]))\r\n AM_PM = '01'\r\n \r\n print (\"Entering following detials for girl\\n\")\r\n print (\"\\nName: %s\") %(f_name)\r\n \r\n print (\"\\nMonth: %s\") %(month)\r\n print (\"\\nDate: %s\") %(date)\r\n print (\"\\nYear: %s\") %(year)\r\n \r\n print (\"\\nHour: %s\") %(hour)\r\n print (\"\\nMin: %s\") %(min)\r\n print (\"\\nSec: %s\") %(sec)\r\n print (\"\\nZone: %s\") %(AM_PM)\r\n \r\n browser.find_element_by_xpath('//input[@id=\"kmg-name\"]').send_keys(f_name)\r\n \r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmg-month\"]'))\r\n input_fields.select_by_value(month)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmg-day\"]'))\r\n input_fields.select_by_value(date)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmg-year\"]'))\r\n input_fields.select_by_value(year)\r\n \r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmg-hr\"]'))\r\n input_fields.select_by_value(hour)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmg-min\"]'))\r\n input_fields.select_by_value(min)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmg-sec\"]'))\r\n input_fields.select_by_value(sec)\r\n input_fields = Select(browser.find_element_by_xpath('//select[@id=\"kmg-ampm\"]'))\r\n input_fields.select_by_value(AM_PM)\r\n \r\n browser.find_element_by_xpath('//input[@id=\"kmg-city\"]').send_keys(bir_place)\r\n \r\n raw_input(\"Press Enter to continue...\")\r\n \r\n# browser.find_element_by_xpath('//input[@id=\"dpSubmitDiv\"]').click()\r\n \r\n final_result = WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.XPATH, '//th[contains(text(),\"Total Guna Milan =\")]')))\r\n \r\n guna_temp = re.findall(\"[\\d.]+(?= ?out)\",final_result.text)[0]\r\n \r\n print guna_temp\r\n \r\n guna = float(guna_temp)\r\n \r\n print \"\\nCalculated guna are %f\" %(guna)\r\n \r\n try:\r\n c.execute(\"update candidate set guna = ? where id = ?\", (guna, ID))\r\n except IntegrityError:\r\n print (\"Unable to update guna in database\\n\")\r\n \r\n conn.commit()\r\n \r\n "
},
{
"alpha_fraction": 0.5031819343566895,
"alphanum_fraction": 0.5121640563011169,
"avg_line_length": 39.35187911987305,
"blob_id": "292559866b5587fbd3c1771e1fced2a6b20a1920",
"content_id": "b1d8e147fd8fae6ef984c87ca199cf98c2f40a8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 27499,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 665,
"path": "/TAJ.py",
"repo_name": "Dofreeshare/GenProfiler",
"src_encoding": "UTF-8",
"text": "# import os\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.common.exceptions import *\r\n\r\nimport sqlite3\r\nfrom sqlite3 import Error, IntegrityError\r\n\r\nimport re\r\n\r\nfrom datetime import datetime\r\n\r\nimport urllib\r\n\r\nimport os\r\nfrom pandas._libs.testing import isnull\r\n\r\n# import time\r\n# from _socket import timeout\r\n# from pstats import browser\r\n\r\nage_from = \"25\"\r\nage_to = \"29\"\r\n\r\nfile_name = \"VedicMaratha.db\"\r\ndb_schema = '''CREATE TABLE IF NOT EXISTS candidate (\r\n id text PRIMARY KEY,\r\n l_name text,\r\n cast text,\r\n height real,\r\n reg_date text,\r\n edu text,\r\n prof text,\r\n income int,\r\n dob text,\r\n time text,\r\n place text,\r\n rashi text,\r\n nak text)'''\r\n\r\ndef CreateNewDB():\r\n print (\"Creating New DB\\n\")\r\n try:\r\n conn = sqlite3.connect(\"VedicMaratha.db\")\r\n except Error as e:\r\n print(e)\r\n else:\r\n c = conn.cursor()\r\n c.execute('''CREATE TABLE IF NOT EXISTS candidate (\r\n id text PRIMARY KEY,\r\n l_name text,\r\n cast text,\r\n height real,\r\n reg_date text,\r\n edu text,\r\n prof text,\r\n income int,\r\n dob text,\r\n time text,\r\n place text,\r\n rashi text,\r\n nak text)''')\r\n return conn\r\n# finally:\r\n# conn.close()\r\n\r\ndef GetProfielPic(browser, xpath, candidate_id):\r\n try:\r\n cand_img = browser.find_element_by_xpath(xpath).get_attribute(\"src\")\r\n except NoSuchElementException:\r\n print (\"Unable to get profile pic\\n\")\r\n pass\r\n else:\r\n if (cand_img != 'https://www.tumchaaamchajamla.com/images/Default_Profile.png'):\r\n file_name = \"snaps\\\\tumchaaamchajamla\\\\\" + candidate_id + '.jpeg'\r\n urllib.urlretrieve(cand_img, file_name)\r\n# else:\r\n# print(\"Looks like default Pic\\n\")\r\n\r\ndef GetLastName(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text.encode('ascii','ignore')\r\n str2 = re.findall('(?<=\\().+(?=\\))', str1)[0].title()\r\n return str2.title()\r\n \r\ndef GetCast(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n return str1.title()\r\n else:\r\n return None\r\n \r\ndef GetHeight(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n return float(re.findall(\"(\\d+.?\\d+)(?= cms)\", str1)[0])\r\n else:\r\n return None\r\n \r\ndef GetWeight(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n return float(re.findall(\"(\\d+.?\\d+ ?)(?=Kg)\", str1)[0])\r\n else:\r\n return None\r\n \r\ndef GetRegDate(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (str1 != '--'):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n str1 = str1.replace('-','/')\r\n return str1\r\n else:\r\n return None\r\n \r\n \r\ndef GetEdu(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.replace('\"', '')\r\n str1 = str1.replace(\"'\", \"\")\r\n str1 = str1.encode('ascii','ignore').title()\r\n return str1\r\n else:\r\n return None\r\n\r\ndef GetProf(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.replace('\"', '')\r\n str1 = str1.replace(\"'\", \"\")\r\n str1 = str1.encode('ascii','ignore').title()\r\n return str1\r\n else:\r\n return None\r\n \r\ndef GetInc(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n income = None\r\n Currency = None\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n if (-1 != str1.find('INR')):\r\n Currency = 'INR';\r\n str1 = str1.replace('INR - ', '')\r\n elif (-1 != str1.find('USD')):\r\n Currency = 'USD';\r\n str1 = str1.replace('USD - ', '')\r\n elif (-1 != str1.find('Pound')):\r\n Currency = 'PND';\r\n str1 = str1.replace('Pound - ', '')\r\n \r\n str1 = str1.replace(',', '')\r\n income = str1\r\n return income, Currency\r\n \r\ndef GetDOB(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n str1 = str1.replace(' - ','-')\r\n return str1\r\n else:\r\n return None\r\n \r\ndef GetTime(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore')\r\n \r\n hour = int(re.findall(\"\\d+\", str1)[0])\r\n minutes = int(re.findall(\"\\d+\", str1)[1])\r\n sec = 0\r\n bir_time = \"{:0>2}:{:0>2}:{:0>2}\".format(hour,minutes,sec) \r\n return bir_time\r\n else:\r\n return None\r\n \r\ndef GetPlace(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.encode('ascii','ignore').title()\r\n return str1\r\n else:\r\n return None\r\n \r\ndef GetRashi(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.replace('Rashi / Nakshatra / Charan / Nadi / Gan: ','')\r\n token = str1.split(',')\r\n if (token[0] != ' '):\r\n return None\r\n else:\r\n return token[0]\r\n else:\r\n return None\r\n \r\ndef GetNak(browser, xpath):\r\n str1 = browser.find_element_by_xpath(xpath).text\r\n if (not(str1 == None or str1 == '')):\r\n str1 = str1.strip()\r\n str1 = str1.replace('Rashi / Nakshatra / Charan / Nadi / Gan: ','')\r\n token = str1.split(',')\r\n if (token[1] != ' '):\r\n return None\r\n else:\r\n return token[1]\r\n else:\r\n return None\r\n \r\n \r\ndef StoreCandidateData(browser, c, candidate_id):\r\n \r\n cast = None\r\n subcast = None\r\n height = None\r\n edu = None\r\n income = None\r\n Currency = None\r\n prof = None\r\n \r\n dob = None\r\n time = None\r\n place = None\r\n \r\n try:\r\n \r\n# time.sleep(3)\r\n GetProfielPic(browser, '//img[@id=\"imgDatingProfileView_PhotoMain\"]', candidate_id)\r\n \r\n cast = GetCast(browser, '//div[contains(text(), \"Caste\")]/following-sibling::div')\r\n subcast = GetCast(browser, '//div[contains(text(), \"Sub Caste\")]/following-sibling::div')\r\n \r\n height = GetHeight(browser, '//div[contains(text(), \"Height\")]/following-sibling::div')\r\n weight = GetWeight(browser, '//div[contains(text(), \"Weight\")]/following-sibling::div')\r\n \r\n edu = GetEdu(browser, '//div[contains(text(), \"Education Details\")]/following-sibling::div')\r\n \r\n (income, Currency) = GetInc(browser, '//div[contains(text(), \"Annual Income\")]/following-sibling::div') \r\n \r\n prof = GetProf(browser, '//div[contains(text(), \"Profession\")]/following-sibling::div')\r\n \r\n dob = GetDOB(browser, '//div[contains(text(), \"Birth Date\")]/following-sibling::div') # Birth Date\r\n time = GetTime(browser, '//div[contains(text(), \"Birth Time\")]/following-sibling::div') # Birth Time\r\n place = GetPlace(browser, '//div[contains(text(), \"Birth Place\")]/following-sibling::div') # Birth Place\r\n \r\n ProfLink = browser.current_url\r\n now = datetime.now()\r\n ProfCreatTime = now.strftime(\"%d/%m/%Y\")\r\n \r\n complete_det = \"{}|{}|{}|{} cm|{} Kg|{}|{} {}|{}|{}|{}\".format(candidate_id, cast, subcast, height, weight, edu, income, Currency, prof, dob, time, place)\r\n \r\n print \"%s\\n\" %(complete_det)\r\n \r\n try:\r\n db_string = \"UPDATE PrimData SET Height = {0}, Weight = {1}, Edu = {2}, Prof = {3}, Income = {4}, IncCur = {5} where ProfId = {6}\".format(\r\n (height if (height != None) else 'null'),\r\n (weight if (weight != None) else 'null'),\r\n ((\"'{}'\".format(edu)) if (edu != None) else 'null'),\r\n ((\"'{}'\".format(prof)) if (prof != None) else 'null'),\r\n ((\"'{}'\".format(income)) if (income != None) else 'null'),\r\n ((\"'{}'\".format(Currency)) if (Currency != None) else 'null'),\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'))\r\n# print db_string\r\n# print \"\\n\\n\"\r\n c.execute(db_string)\r\n \r\n c.execute(\"select exists (select CastData.ProfId from CastData where CastData.ProfId = ?)\", (candidate_id,))\r\n if (c.fetchone() == (1,)):\r\n db_string = \"UPDATE CastData SET MCast = {1}, SCast = {2} where ProfId = {0}\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(cast)) if (cast != None) else 'null'), \r\n ((\"'{}'\".format(subcast)) if (subcast != None) else 'null'))\r\n else:\r\n db_string = \"insert into CastData (ProfId, MCast, SCast) values ({0}, {1}, {2})\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(cast)) if (cast != None) else 'null'), \r\n ((\"'{}'\".format(subcast)) if (subcast != None) else 'null'))\r\n# print db_string\r\n# print \"\\n\\n\"\r\n c.execute(db_string)\r\n \r\n c.execute(\"select exists (select BirData.ProfId from BirData where BirData.ProfId = ?)\", (candidate_id,))\r\n if (c.fetchone() == (1,)):\r\n db_string = \"UPDATE BirData SET BTime = {1}, BDate = {2} where ProfId = {0}\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(time)) if (time != None) else 'null'), \r\n ((\"'{}'\".format(dob)) if (dob != None) else 'null'))\r\n else:\r\n db_string = \"insert into BirData (ProfId, BTime, BDate) values ({0}, {1}, {2})\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(time)) if (time != None) else 'null'), \r\n ((\"'{}'\".format(dob)) if (dob != None) else 'null'))\r\n# print db_string\r\n# print \"\\n\\n\"\r\n c.execute(db_string)\r\n \r\n c.execute(\"select exists (select BirLocData.ProfId from BirLocData where BirLocData.ProfId = ?)\", (candidate_id,))\r\n if (c.fetchone() == (1,)):\r\n db_string = \"UPDATE BirLocData SET City = {1} where ProfId = {0}\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(place)) if (place != None) else 'null'))\r\n else:\r\n db_string = \"insert into BirLocData (ProfId, City) values ({0}, {1})\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(place)) if (place != None) else 'null'))\r\n# print db_string\r\n# print \"\\n\\n\"\r\n c.execute(db_string)\r\n \r\n c.execute(\"select exists (select Metdata.ProfId from Metdata where Metdata.ProfId = ?)\", (candidate_id,))\r\n if (c.fetchone() == (1,)):\r\n db_string = \"UPDATE Metdata SET ProfLink = {1}, ProfCreatTime = {2} where ProfId = {0}\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(ProfLink)) if (ProfLink != None) else 'null'), \r\n ((\"'{}'\".format(ProfCreatTime)) if (ProfCreatTime != None) else'null'))\r\n else:\r\n db_string = \"insert into Metdata (ProfId, ProfLink, ProfCreatTime) values ({0}, {1}, {2})\".format(\r\n ((\"'{}'\".format(candidate_id)) if (candidate_id != None) else 'null'), \r\n ((\"'{}'\".format(ProfLink)) if (ProfLink != None) else 'null'), \r\n ((\"'{}'\".format(ProfCreatTime)) if (ProfCreatTime != None) else'null'))\r\n# print db_string\r\n# print \"\\n\\n\"\r\n c.execute(db_string)\r\n \r\n except IntegrityError as e:\r\n print (\"Unable to update the database\\n\")\r\n print e\r\n \r\n except NoSuchElementException as e:\r\n print (\"StoreCandidateData : Unable to save the details because no such element\\n\")\r\n print e\r\n\r\n except TimeoutException:\r\n print (\"StoreCandidateData : Unable to load the profile\\n\")\r\n\r\n\r\ndef found_window(profile_num):\r\n\r\n def predicate(driver):\r\n for handle in driver.window_handles[1:]:\r\n try: \r\n driver.switch_to_window(handle)\r\n WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.XPATH , '//div[@class=\"PageMidBG\"]')))\r\n str1 = driver.find_element_by_xpath('//span[contains(text(), \"Profile \")]/following-sibling::span').text\r\n str1 = str1.strip()\r\n# print str1\r\n if (profile_num == str1):\r\n return True # found window\r\n else:\r\n continue\r\n \r\n except NoSuchWindowException:\r\n print (\"Window not listed hence unable to switch to window\\n\")\r\n return False\r\n except NoSuchElementException as e:\r\n print (\"Unable to switch because unable to find elements below mentioned\\n\")\r\n print (e)\r\n pass\r\n continue\r\n\r\n return predicate\r\n\r\n\r\ndef CollectTenCandidatesData(browser, c):\r\n try:\r\n # We are now on new page of the candidates\r\n curr_candidate_xpath = \"//a[contains(@href, 'profile_by_public')]\"\r\n candidate_id_array = browser.find_elements_by_xpath(curr_candidate_xpath)\r\n \r\n for candidate_id in candidate_id_array:\r\n cand_id = candidate_id.text.strip().encode('ascii','ignore')\r\n# print \"Value of cand id is %s\" %(cand_id)\r\n# temp_var = (cand_id,)\r\n c.execute(\"select exists (select PrimData.ProfId from PrimData where PrimData.ProfId = ?)\", (cand_id,))\r\n if (c.fetchone() == (0,)):\r\n #New candiate data found save it \r\n main_window = browser.current_window_handle\r\n \r\n candidate_id.click()\r\n# browser.implicitly_wait(30) \r\n# time.sleep(3) \r\n# print (\"Number of windows opened %d\\n \") %(len(browser.window_handles))\r\n\r\n WebDriverWait(browser, 10, 2).until(found_window('Member Profile'))\r\n StoreCandidateData(browser, c, cand_id)\r\n browser.close()\r\n browser.switch_to_window(main_window)\r\n \r\n\r\n #switch to new window\r\n# browser.switch_to_window(browser.window_handles[1])\r\n# browser.switch_to_window('')\r\n# print (\"Title of window %s\\n \") %browser.title\r\n\r\n# \r\n# for handle in browser.window_handles:\r\n# if (main_window != handle):\r\n# browser.switch_to_window(handle)\r\n# print (\"Switched to new window %s\\n \") %browser.title\r\n# StoreCandidateData(browser, c, cand_id)\r\n# browser.close()\r\n# browser.switch_to_window(main_window)\r\n \r\n else:\r\n print \"%s is already in DB\\n\" %(cand_id)\r\n except NoSuchElementException as e:\r\n print (\"CollectTenCandidatesData : Unable to find some elements\\n\")\r\n print (e)\r\n\r\n\r\ndef NagivateToDashBoard(login_ID, login_pass, login_page, browser):\r\n \r\n browser.get(login_page)\r\n try:\r\n #Input the user name\r\n login_element = WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'input#txtUsername')))\r\n login_element.send_keys(login_ID)\r\n login_element.submit()\r\n \r\n #Input password\r\n login_element = WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'input#txtPassword')))\r\n login_element.send_keys(login_pass)\r\n login_element.submit()\r\n \r\n# pop_up_element = browser.find_element_by_xpath('//a[@class=\"popup-modal-dismiss\"]')\r\n pop_up_element = WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//a[@class=\"popup-modal-dismiss\"]')))\r\n pop_up_element.click()\r\n \r\n #Wait for Dashboard to load\r\n WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//div[@class=\"MenuIconLinks_Div\"]')))\r\n \r\n except NoSuchElementException:\r\n print (\"Unable to find out login forms\\n\")\r\n return None\r\n except TimeoutException:\r\n print (\"Timeout happened\\n\")\r\n return None\r\n\r\n\r\ndef GetPageNum():\r\n print (\"GetPageNum\\n\")\r\n \r\ndef GoNextPage(browser):\r\n browser.find_element_by_xpath('//a[@class=\"paginate_button next\"]').click()\r\n \r\ndef CollectDetailedInformation(browser, conn):\r\n \r\n if (os.path.isfile('List.txt') & os.path.exists('List.txt')):\r\n # Open the existing DB connection \r\n try:\r\n with open(\"List.txt\", \"r\") as fd:\r\n list_of_cand = fd.readlines()\r\n except IOError:\r\n print(\"Error in opening Credentials.txt\\n\")\r\n else:\r\n print(\"List.txt doen't exist\\n\")\r\n \r\n c = conn.cursor()\r\n try:\r\n # Scraping the Search results\r\n main_window = browser.current_window_handle\r\n \r\n #Wait for Dashboard to load\r\n dash_board_element = WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//div[@onclick=\"CheckMyProfileStatus(1);\"]')))\r\n dash_board_element.click()\r\n \r\n #Quick search\r\n qck_srch_element = WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH , '//div[@id=\"divDatingQuickSearch_MainSearchBlock\"]')))\r\n \r\n for ID in list_of_cand:\r\n qck_srch_element = browser.find_element_by_xpath('//input[@id=\"txtDatingQuickSearch_SearchByKeyword\"]')\r\n qck_srch_element.send_keys(ID)\r\n \r\n qck_srch_element = browser.find_element_by_xpath('//input[@class=\"DatingCSS_SearchButton\"]')\r\n qck_srch_element.click()\r\n \r\n #Wait for the results to load\r\n WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.XPATH , '//div[@class=\"ResultList\"]')))\r\n \r\n candidate_link = browser.find_element_by_xpath('//div[@class=\"ResultList\"]/div[1]//a[@class=\"LinkBold\"]')\r\n candidate_link.click()\r\n \r\n #switch to new window\r\n# browser.switch_to_window(browser.window_handles[1])\r\n WebDriverWait(browser, 10, 2).until(found_window(\"ID: \" + ID.strip()))\r\n \r\n# candidate_page = WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH , '//div[@class=\"PageMidBG\"]')))\r\n \r\n StoreCandidateData(browser, c, ID.strip())\r\n# print \"trying to close window\\n\"\r\n browser.close()\r\n# print \"Moving to next window\\n\"\r\n browser.switch_to_window(main_window)\r\n# browser.implicitly_wait(1)\r\n qck_srch_element = browser.find_element_by_xpath('//input[@id=\"txtDatingQuickSearch_SearchByKeyword\"]')\r\n qck_srch_element.clear()\r\n \r\n except NoSuchElementException:\r\n print (\"Unable to find out login forms\\n\")\r\n except TimeoutException:\r\n print (\"Timeout happened\\n\")\r\n except IndexError:\r\n pass\r\n \r\ndef ScrapLoadedPages(browser, conn):\r\n #Rotate through all the search result pages\r\n c = conn.cursor()\r\n \r\n while (True):\r\n try:\r\n \r\n #Wait for the results to load\r\n WebDriverWait(browser, 30).until(EC.visibility_of_element_located((By.XPATH, '//table[@class=\"dataTable no-footer\"]')))\r\n \r\n CollectTenCandidatesData(browser, c)\r\n \r\n print (\"Moving on to Next page\\n\")\r\n GoNextPage(browser)\r\n \r\n except NoSuchElementException as e:\r\n print (\"Seems like we are on last page, saving changes to database\\n\")\r\n print e\r\n break\r\n \r\n# conn.commit()\r\n# c.close()\r\n\r\ndef QuickSearchCollectList(browser, c, curr_candidate_index):\r\n \r\n while(True):\r\n try: \r\n curr_candidate_xpath = '(//div[@class=\"ResultBlock\"]/div/div[@class=\"Text\"][1])['+str(curr_candidate_index)+']'\r\n \r\n title = browser.find_element_by_xpath(curr_candidate_xpath).text\r\n title = title.strip()\r\n title = title.encode('ascii','ignore')\r\n title = title.replace('(', '')\r\n title = title.replace(')', '')\r\n title_list = title.split(' ')\r\n cand_id = title_list[-1]\r\n \r\n print cand_id\r\n \r\n c.execute(\"select exists (select PrimData.ProfId from PrimData where PrimData.ProfId = ?)\", (cand_id,))\r\n if (c.fetchone() == (0,)):\r\n #New candiate data found save it\r\n if (len(title_list) == 1):\r\n l_name = None\r\n f_name = None\r\n elif (len(title_list) == 2):\r\n l_name = title_list[-2]\r\n f_name = None\r\n elif (len(title_list) == 3):\r\n l_name = title_list[-2]\r\n f_name = title_list[-3]\r\n \r\n curr_candidate_xpath = '(//div[@class=\"ResultBlock\"]/div/div[@class=\"Text\"][2])['+str(curr_candidate_index)+']'\r\n address_str = browser.find_element_by_xpath(curr_candidate_xpath).text.strip().encode('ascii','ignore')\r\n \r\n address_tag = address_str.split('\\n')\r\n \r\n try:\r\n \r\n city_extract_new = address_tag[-1].split(',')\r\n country = city_extract_new[-1].strip().capitalize()\r\n state = None\r\n city = None\r\n if (len(city_extract_new) == 2):\r\n state = city_extract_new[-2].strip().capitalize()\r\n city = None\r\n elif(len(city_extract_new) == 3):\r\n state = city_extract_new[-2].strip().capitalize()\r\n city = city_extract_new[-3].strip().capitalize()\r\n \r\n except IndexError:\r\n pass\r\n \r\n print \"{}|{}|{}|{}|{}\\n\".format(cand_id, f_name, l_name, country, state, city)\r\n \r\n try:\r\n c.execute(\"insert into PrimData (ProfId, FName, LName) values (?, ?, ?)\", (cand_id, f_name, l_name))\r\n \r\n c.execute(\"insert into CurLocData (ProfId, Country, State, City) values (?, ?, ?, ?)\", (cand_id, country, state, city))\r\n \r\n c.execute(\"insert into Metdata (ProfId, ProfRef) values (?, ?)\", (cand_id, \"TAJ\"))\r\n \r\n except IntegrityError:\r\n print (\"Unable to update the database\\n\")\r\n \r\n curr_candidate_index = curr_candidate_index + 1\r\n \r\n except NoSuchElementException:\r\n print (\"Collected Data of all 10 Candidates\\n\")\r\n pass\r\n break\r\n \r\n return curr_candidate_index\r\n\r\ndef CollectAllQuickSearch(browser, conn):\r\n \r\n c = conn.cursor()\r\n try:\r\n dash_board_element = browser.find_element_by_xpath('//div[@onclick=\"CheckMyProfileStatus(1);\"]')\r\n dash_board_element.click()\r\n \r\n #Quick search\r\n qck_srch_element = WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH , '//div[@id=\"divDatingQuickSearch_MainSearchBlock\"]')))\r\n \r\n qck_srch_element = browser.find_element_by_xpath('//select[@id=\"selDatingQuickSearch_AgeFrom\"]')\r\n select = Select(qck_srch_element)\r\n select.select_by_visible_text(age_from)\r\n \r\n qck_srch_element = browser.find_element_by_xpath('//select[@id=\"selDatingQuickSearch_AgeTo\"]')\r\n select = Select(qck_srch_element)\r\n select.select_by_visible_text(age_to)\r\n \r\n qck_srch_element = browser.find_element_by_xpath('//select[@id=\"selDatingQuickSearch_Caste\"]')\r\n select = Select(qck_srch_element)\r\n select.select_by_value(\"2\")\r\n \r\n qck_srch_element = browser.find_element_by_xpath('//input[@class=\"DatingCSS_SearchButton\"]')\r\n qck_srch_element.click()\r\n \r\n curr_candidate_index = 1\r\n while(True):\r\n #Wait for the results to load\r\n all_list = WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.XPATH , '//div[@class=\"ResultList\"]')))\r\n\r\n curr_candidate_index = QuickSearchCollectList(browser, c, curr_candidate_index)\r\n \r\n# curr_candidate_index = curr_candidate_index + 1\r\n \r\n print (\"Clicking the for more Updates\\n\")\r\n temp_elem = browser.find_element_by_xpath('//div[@id=\"divMoreUpdates\"]')\r\n temp_elem.click()\r\n browser.implicitly_wait(5)\r\n \r\n except NoSuchElementException:\r\n print (\"Unable to find out login forms\\n\")\r\n except TimeoutException:\r\n print (\"Timeout happened\\n\")\r\n \r\n finally:\r\n# browser.close()\r\n conn.commit()\r\n c.close()\r\n"
}
] | 5 |
zerolugithub/FriedRing | https://github.com/zerolugithub/FriedRing | 411991423185cad62911f226e08d8c3ca9e10ce0 | dad6594c440c0e37239ff934895d35a7d69826df | b3757b9b8ba033c4398ad6445ad1338e4b94cdc6 | refs/heads/master | 2020-12-30T17:51:38.389204 | 2017-03-31T01:32:48 | 2017-03-31T01:32:48 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5808581113815308,
"alphanum_fraction": 0.5907590985298157,
"avg_line_length": 22.33333396911621,
"blob_id": "47a137812b53b1268c800993262ca7993e55336d",
"content_id": "c8388cd6b09f6fc0a1e1df881ba16f93d862d8f9",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 909,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 39,
"path": "/build/lib/FriedRing/fr.py",
"repo_name": "zerolugithub/FriedRing",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#coding=utf-8\nimport getopt\nimport os\nimport time\n#import shutil\n#import subprocess\nimport sys\n#import optparse\n\nfrom mitmproxy import proxy\n\nfrom FriedRing import FriedRing\n\nVERSION='1.0.5'\ndef main():\n opts, args = getopt.getopt(sys.argv[1:], \"hp:w:\")\n strPort=8888\n fnamescript='__crisschan_TEMP'+str(time.time())\n for op, value in opts:\n if op == \"-p\":\n strPort = value\n elif op == \"-w\":\n fnamescript = value\n elif op == \"-h\":\n #usage()\n print '-p the proxy port\\r\\n-w the script_solution name'\n sys.exit()\n\n config = proxy.ProxyConfig(\n cadir=os.path.expanduser(\"~/.mitmproxy/\"),\n port=int(strPort)\n )\n server = proxy.ProxyServer(config)\n print 'the porxy port is '+str(strPort)\n m = FriedRing(server, fnamescript)\n m.run()\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.45872125029563904,
"alphanum_fraction": 0.6996553540229797,
"avg_line_length": 93.37378692626953,
"blob_id": "b93f8002afdf56510c8167317035cdb8c40d47f6",
"content_id": "3acc0c61f4a8f9c0dc259a0788431b204d0278ab",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19441,
"license_type": "permissive",
"max_line_length": 480,
"num_lines": 206,
"path": "/build/lib/FriedRing/__Conf/test_scripts/script.py",
"repo_name": "zerolugithub/FriedRing",
"src_encoding": "UTF-8",
"text": "import requests\n\n\ndef script():\n\n\n\n # start : 1467623525.76\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=20001507&t=1467623524806\",\n headers=headers)\n # end : 1467623525.8\n\n # insert the assert or other check point\n\n # start : 1467623525.76\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=20001501&t=1467623524815\",\n headers=headers)\n # end : 1467623525.8\n\n # insert the assert or other check point\n\n # start : 1467623525.76\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=20001508&t=1467623524814&time=1467606424\",\n headers=headers)\n # end : 1467623525.8\n\n # insert the assert or other check point\n\n # start : 1467623526.2\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=20004406&t=1467623525126\",\n headers=headers)\n # end : 1467623526.22\n\n # insert the assert or other check point\n\n # start : 1467623526.2\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=20001201&t=1467623525130\",\n headers=headers)\n # end : 1467623526.22\n\n # insert the assert or other check point\n\n # start : 1467623526.37\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=30000406&t=1467623525455\",\n headers=headers)\n # end : 1467623526.41\n\n # insert the assert or other check point\n\n # start : 1467623526.37\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=30000403&t=1467623525447\",\n headers=headers)\n # end : 1467623526.41\n\n # insert the assert or other check point\n\n # start : 1467623526.37\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=20001513&t=1467623525460\",\n headers=headers)\n # end : 1467623526.41\n\n # insert the assert or other check point\n\n # start : 1467623526.49\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=30000404&t=1467623525570\",\n headers=headers)\n # end : 1467623526.51\n\n # insert the assert or other check point\n\n # start : 1467623526.5\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=30000407&t=1467623525580\",\n headers=headers)\n # end : 1467623526.52\n\n # insert the assert or other check point\n\n # start : 1467623526.5\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=20001516&t=1467623525604\",\n headers=headers)\n # end : 1467623526.52\n\n # insert the assert or other check point\n\n # start : 1467623526.62\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=300001175101&t=1467623525721\",\n headers=headers)\n # end : 1467623526.64\n\n # insert the assert or other check point\n\n # start : 1467623526.62\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=3000037201&t=1467623525729\",\n headers=headers)\n # end : 1467623526.64\n\n # insert the assert or other check point\n\n # start : 1467623527.69\n headers = {\"Host\": \"nsclick.baidu.com\", \"Proxy-Connection\": \"keep-alive\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.1; m2 note Build/LMY47D) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.81 Mobile Safari/537.36\",\n \"Accept\": \"image/webp,image/*,*/*;q=0.8\",\n \"Referer\": \"http://wapbaike.baidu.com/subview/7410/5040563.htm?fr=aladdin&ref=wise&ssid=0&from=844b&uid=0&pu=usm@2,sz@1320_1001,ta@iphone_2_5.1_3_537&bd_page_type=1&baiduid=D1D8246AC804B5D314977701F7E75E7D&tj=Xv_2_0_10_title\",\n \"Accept-Encoding\": \"gzip, deflate, sdch\", \"Accept-Language\": \"zh-CN,zh;q=0.8,en;q=0.6\",\n \"Cookie\": \"plus_cv=0::m:1-nav:250e8bac-hotword:a6eb63c2; plus_lsv=4388bdbef257d8ef; BAIDULOC=12960396.239553755_4834358.330481074_35_131_1467606415694; H5LOC=1; BAIDUID=D1D8246AC804B5D314977701F7E75E7D:FG=1\"}\n r = requests.get(\n \"http://nsclick.baidu.com/v.gif?pid=103&url=http%3A%2F%2Fwapbaike.baidu.com%2Fsubview%2F7410%2F5040563.htm%3Ffr%3Daladdin%26ref%3Dwise%26ssid%3D0%26from%3D844b%26uid%3D0%26pu%3Dusm%402%2Csz%401320_1001%2Cta%40iphone_2_5.1_3_537%26bd_page_type%3D1%26baiduid%3DD1D8246AC804B5D314977701F7E75E7D%26tj%3DXv_2_0_10_title&type=30000401&t=1467623526751&blockInfo={%22mobile-lemma-top-ad%22:{%22isValid%22:true},%22mobile-lemma-float-ad%22:{%22isValid%22:true}}&page=mobile-lemma\",\n headers=headers)\n # end : 1467623527.7\n\n # insert the assert or other check point\n\n\n\n # start : 1467623531.78\n"
},
{
"alpha_fraction": 0.6119980812072754,
"alphanum_fraction": 0.62070631980896,
"avg_line_length": 29.382352828979492,
"blob_id": "e7d246335ffc3d909d9debabe2fda8555a5725f2",
"content_id": "6f5a4478c98856b348f9aea130dfff8fd5efe296",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2067,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 68,
"path": "/build/lib/FriedRing/FriedRing.py",
"repo_name": "zerolugithub/FriedRing",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n#coding=utf-8\n'''\nauthor:Crisschan\ntime:2016-6-30\n'''\nfrom mitmproxy import controller, proxy,flow\nfrom mitmproxy.proxy import ProxyServer,ProxyConfig\nimport os\nimport sys\nimport pdb\nimport requests\nimport datetime\nfrom F2requests import F2requests\nfrom Cfg import Cfg\nclass FriedRing(controller.Master):\n #fscript =\n def __init__(self, server,fnamescript):\n\n curpath = os.path.abspath(os.curdir)\n fscriptsolutionpath = os.path.join(curpath,fnamescript)\n if not os.path.isdir(fscriptsolutionpath):\n os.makedirs(fscriptsolutionpath)\n else:\n fnamescript=fnamescript+str(datetime.datetime.now().microsecond)\n fscriptsolutionpath = os.path.join(curpath,fnamescript)\n os.makedirs(fscriptsolutionpath)\n\n Cfg(fscriptsolutionpath)\n self.fnamescript=str(fscriptsolutionpath)+'/test_scripts/script.py'\n print 'script solution path(include script files, config files and results:'+str(fscriptsolutionpath)\n\n controller.Master.__init__(self, server)\n self.f2r = F2requests(self.fnamescript)\n #def shutdown(self):\n # self.shutdown()\n def run(self):\n try:\n return controller.Master.run(self)\n except KeyboardInterrupt:\n self.shutdown()\n def handle_request(self, msg):\n #print msg\n req=msg.request\n print str(req.host)+str(req.path)\n self.f2r.F2Req(req)\n msg.reply()\n def handle_response(self, msgg):\n #print msg\n msgg.reply()\n res = msgg.response\n '''\n print res.status_code\n print res.headers\n print res.content+'\\n'\n print res.reason+'\\n'\n print res.timestamp_start+'\\n'\n print res.timestamp_end+'\\n'\n print '--------------------------------------\\n'\n '''\n'''if __name__ == '__main__':\n config = proxy.ProxyConfig(\n\t cadir = os.path.expanduser(\"~/.mitmproxy/\"),\n port=8888\n\t)\n server = proxy.ProxyServer(config)\n m = FriedRing(server)\n m.run()'''\n\n"
},
{
"alpha_fraction": 0.5775193572044373,
"alphanum_fraction": 0.5910852551460266,
"avg_line_length": 17.39285659790039,
"blob_id": "9a2d50e9e1993c472b3e4be6ae5ecfb1a3a16675",
"content_id": "073addf1488ef40aded505e8ff7cd3189f36feb9",
"detected_licenses": [
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 516,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 28,
"path": "/build/lib/FriedRing/__Conf/test_scripts/v_user.py",
"repo_name": "zerolugithub/FriedRing",
"src_encoding": "UTF-8",
"text": "\n#!/usr/bin/env python\n#coding=utf-8\n'''\nauthor:Crisschan\ntime:2016-7-5\nfrom:multimech\n'''\nimport script\nclass Transaction(object):\n def __init__(self):\n #self.custom_timers={}\n pass\n\n def run(self):\n #start_timer = time.time()\n script.script()\n #latency = time.time() - start_timer\n #user's transaction \n #self.custom_timers['Transaction_Custom']=latency\n\n\n\n\n\nif __name__ == '__main__':\n trans = Transaction()\n trans.run()\n #print trans.custom_timers\n"
}
] | 4 |
MariaAfanaseva/mini_jobninja | https://github.com/MariaAfanaseva/mini_jobninja | e4b481a851e70f5166c802b61df3eab6a11ea3d8 | 440e39ec636457c51e9683312c12b1d79f63cf67 | e59976eb65d69cac95463c1d2fea966bd5ab1430 | refs/heads/main | 2023-03-21T01:33:17.041558 | 2021-03-13T12:37:44 | 2021-03-13T12:37:44 | 343,819,847 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5976496934890747,
"alphanum_fraction": 0.6004476547241211,
"avg_line_length": 30.910715103149414,
"blob_id": "d6bca91fd461c99048916548ab68b769e873772a",
"content_id": "0d62d59f50d361006f0e0403d57bac365804c041",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1787,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 56,
"path": "/tests/test_model_methods.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom create_app import create_app\nfrom fill_db import UpdateDb\nfrom databases.sql_db import db\nfrom models.category import Category\nfrom models.vacancy import Vacancy\n\n\nclass ModelMethodsTestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app(\"testing\")\n self.client = self.app.test_client()\n self.app_context = self.app.app_context()\n\n with self.app_context:\n update = UpdateDb(xml_file_path='data/test_feed.xml')\n update.recreate_db()\n\n def test_get_category_by_name(self):\n with self.app_context:\n category = Category.find_by_name('Medizin')\n self.assertEqual('Medizin', category.name)\n\n def test_get_jobs_by_category(self):\n with self.app_context:\n category = Category.find_by_name('Medizin')\n jobs = category.get_vacancies(3)\n self.assertEqual(3, len(list(jobs)))\n for job in jobs:\n category_names = [category.name for category in job.categories]\n self.assertIn('Medizin', category_names)\n\n def test_get_job_by_id(self):\n with self.app_context:\n job = Vacancy.find_by_id(3)\n self.assertEqual(3, job.id)\n\n def test_get_job_data(self):\n with self.app_context:\n job = Vacancy.find_by_id(3).dict()\n self.assertEqual(dict, type(job))\n self.assertIn('id', job)\n self.assertIn('title', job)\n self.assertIn('location', job)\n\n def tearDown(self):\n \"\"\"teardown all initialized variables.\"\"\"\n with self.app_context:\n db.session.remove()\n db.drop_all()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.6059369444847107,
"alphanum_fraction": 0.6096475124359131,
"avg_line_length": 36.95774459838867,
"blob_id": "e8e22efb473b185146c414766a9f5107a439bffa",
"content_id": "5be70e71f3275ed400c57fa4642ab9462eb977e3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2695,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 71,
"path": "/fill_db.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from xml.dom import minidom\nfrom databases.sql_db import db\nfrom models.category import Category\nfrom models.vacancy import Vacancy\nfrom create_app import create_app\nimport datetime\n\n\nclass UpdateDb:\n\n def __init__(self, xml_file_path):\n self.xml_file_path = xml_file_path\n\n @staticmethod\n def clear_db():\n db.drop_all()\n db.create_all()\n\n def _parse_xml(self):\n data = minidom.parse(self.xml_file_path)\n records = data.getElementsByTagName('record')\n return records\n\n def fill_categories(self):\n records = self._parse_xml()\n categories = set()\n\n for record in records:\n category_names = record.getElementsByTagName('category')[0].firstChild.data\n for name in category_names.split(', '):\n if name not in categories:\n categories.add(name)\n category = Category(name=name)\n category.save_to_db()\n\n def fill_vacancies(self):\n records = self._parse_xml()\n\n for record in records:\n title = record.getElementsByTagName('titel')[0].firstChild.data\n firm = record.getElementsByTagName('firma')[0].firstChild.data\n text = record.getElementsByTagName('volltext')[0].firstChild.data\n postcode = record.getElementsByTagName('plz_arbeitsort')[0].firstChild.data\n location = record.getElementsByTagName('arbeitsort')[0].firstChild.data\n from_date = record.getElementsByTagName('vondatum')[0].firstChild.data\n job_link = record.getElementsByTagName('stellenlink')[0].firstChild.data\n job_type = record.getElementsByTagName('jobtype')[0].firstChild.data\n category_names = record.getElementsByTagName('category')[0].firstChild.data\n\n day, month, year = from_date.split('-')\n from_date = datetime.date(int(year), int(month), int(day))\n vacancy = Vacancy(title=title, firm=firm, description=text,\n location_postcode=postcode, location=location,\n from_date=from_date, job_link=job_link, job_type=job_type)\n vacancy.save_to_db()\n for name in category_names.split(', '):\n category = Category.find_by_name(name=name)\n category.vacancies.append(vacancy)\n category.save_to_db()\n\n def recreate_db(self):\n self.clear_db()\n self.fill_categories()\n self.fill_vacancies()\n\n\nif __name__ == '__main__':\n app = create_app('development')\n with app.app_context():\n update = UpdateDb(xml_file_path='data/jobs_feed.xml')\n update.recreate_db()\n"
},
{
"alpha_fraction": 0.5789473652839661,
"alphanum_fraction": 0.5789473652839661,
"avg_line_length": 29.61111068725586,
"blob_id": "ab8ce78ae42d7e2dd4751d7fe949e63c2b258c24",
"content_id": "a3da93535e8fd3c90bad771bce531fa514fe8bde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 18,
"path": "/job_app/views.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint, render_template, redirect, url_for\n\nfrom models.vacancy import Vacancy\n\njob_page = Blueprint('job_page', __name__,\n url_prefix='/jobs',\n static_folder='static',\n static_url_path='/',\n template_folder='templates')\n\n\n@job_page.route('/<int:job_id>')\ndef job(job_id):\n vacancy = Vacancy.find_by_id(job_id)\n if vacancy:\n return render_template('job.html', job=vacancy)\n else:\n return redirect(url_for('search_page.index'))\n"
},
{
"alpha_fraction": 0.7345277070999146,
"alphanum_fraction": 0.7345277070999146,
"avg_line_length": 24.58333396911621,
"blob_id": "f3cdc65c3138d1e3642de6f0e09c2a8223e9028f",
"content_id": "80dcc3174688382e299adf99419c0b2c690de26c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 614,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 24,
"path": "/create_app.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_cors import CORS\nfrom databases.sql_db import db\nfrom configs.app_configs.config import app_config\n\nfrom search_app.views import search_page\nfrom job_app.views import job_page\n\n\ndef create_app(config_name):\n flask_app = Flask(__name__)\n flask_app.config.from_object(app_config[config_name])\n flask_app.config.from_pyfile('configs/app_configs/config.py')\n\n db.init_app(flask_app)\n\n flask_app.secret_key = flask_app.config['SECRET']\n\n CORS(flask_app)\n\n flask_app.register_blueprint(search_page)\n flask_app.register_blueprint(job_page)\n\n return flask_app\n"
},
{
"alpha_fraction": 0.7874395847320557,
"alphanum_fraction": 0.7922705411911011,
"avg_line_length": 45,
"blob_id": "3e3915cc11488cdb58e6b82f86cf8124ef2bb4e9",
"content_id": "daa5b53a75cfe3d15c0b5be359fb1bf5b73c3a02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 9,
"path": "/tests/tests.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom tests.test_search_page import SearchPageTestCase\nfrom tests.test_model_methods import ModelMethodsTestCase\n\nif __name__ == \"__main__\":\n search = unittest.TestLoader().loadTestsFromModule(SearchPageTestCase)\n models = unittest.TestLoader().loadTestsFromModule(ModelMethodsTestCase)\n unittest.TextTestRunner(verbosity=2).run(search)\n unittest.TextTestRunner(verbosity=2).run(models)\n"
},
{
"alpha_fraction": 0.6808510422706604,
"alphanum_fraction": 0.7276595830917358,
"avg_line_length": 25.11111068725586,
"blob_id": "80901deff6d6f8a984c15ea1d37c18b05edbda02",
"content_id": "64092185946b7aac26d3992fd354b441bd5fd3df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 9,
"path": "/Dockerfile",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "FROM python:3\nADD . /code\nWORKDIR /code\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\nRUN python -m pip install --upgrade pip\nRUN pip install -r requirements.txt\nENTRYPOINT [\"python\"]\nCMD [\"app.py\", \"--host\", \"0.0.0.0\", \"5000\"]\n"
},
{
"alpha_fraction": 0.48730605840682983,
"alphanum_fraction": 0.4894217252731323,
"avg_line_length": 32.761905670166016,
"blob_id": "23b6e48e3619ffc816edc0d8fa107801a3f7ee3e",
"content_id": "a688d08edb8a255625953ae5e7788aa8a148b843",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1418,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 42,
"path": "/models/category.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from databases.sql_db import db\n\nvacancy_categories = db.Table('vacancy_categories',\n db.Column('vacancy_id',\n db.Integer,\n db.ForeignKey('vacancies.id'),\n primary_key=True),\n db.Column('category_id',\n db.Integer,\n db.ForeignKey('categories.id'),\n primary_key=True)\n )\n\n\nclass Category(db.Model):\n __tablename__ = \"categories\"\n\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(128), nullable=False, unique=True)\n vacancies = db.relationship('Vacancy', secondary=vacancy_categories, lazy='dynamic',\n backref=db.backref('categories', lazy=True))\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return f'{self.name}'\n\n @classmethod\n def find_by_name(cls, name):\n return cls.query.filter_by(name=name).first()\n\n def get_vacancies(self, quantity):\n return self.vacancies.limit(quantity).all()\n\n def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n\n def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()\n"
},
{
"alpha_fraction": 0.6692014932632446,
"alphanum_fraction": 0.7062737345695496,
"avg_line_length": 31.875,
"blob_id": "1b976800ca947e3392c0c6ec883a87606a45035c",
"content_id": "578fbaef4d66de51158a85722760f7838c2155dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1052,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 32,
"path": "/migrations/versions/c539d02f890a_rename_workplace_to_location.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "\"\"\"Rename workplace to location.\n\nRevision ID: c539d02f890a\nRevises:\nCreate Date: 2021-03-12 09:33:21.267981\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c539d02f890a'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('vacancies', 'workplace', nullable=False, new_column_name='location')\n op.alter_column('vacancies', 'workplace_postcode', nullable=False, new_column_name='location_postcode')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('vacancies', sa.Column('workplace_postcode', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('vacancies', sa.Column('workplace', sa.VARCHAR(length=128), autoincrement=False, nullable=True))\n op.drop_column('vacancies', 'location_postcode')\n op.drop_column('vacancies', 'location')\n # ### end Alembic commands ###\n"
},
{
"alpha_fraction": 0.7046070694923401,
"alphanum_fraction": 0.7235772609710693,
"avg_line_length": 23.600000381469727,
"blob_id": "42353f73030c9667e5211d37004bd5b0447d6f6a",
"content_id": "7fedf8b3a7679518b1bc617485f71c8adc5406f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 15,
"path": "/app.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from flask import render_template, jsonify, request, redirect, url_for\nfrom create_app import create_app\nfrom models.category import Category\nfrom models.vacancy import Vacancy\n\napp = create_app('development')\n\n\[email protected](404)\ndef page_not_found(error):\n return redirect(url_for('search_page.index'))\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n"
},
{
"alpha_fraction": 0.662162184715271,
"alphanum_fraction": 0.6707616448402405,
"avg_line_length": 36,
"blob_id": "8be57ef27b18e31338ff577215bda214f06bb75d",
"content_id": "e88e2aad5c320bb7ddd28247d100ed50c922674c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 814,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 22,
"path": "/search_app/views.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint, render_template, request, jsonify\n\nfrom models.vacancy import Vacancy\n\nsearch_page = Blueprint('search_page', __name__,\n static_folder='static/search/dist/',\n static_url_path='/')\n\n\n@search_page.route('/')\ndef index():\n return search_page.send_static_file('index.html')\n\n\n@search_page.route('/search')\ndef search():\n page = request.args.get('page', 1, type=int)\n keywords = request.args.getlist('keyword')\n locations = request.args.getlist('where')\n search_lst = Vacancy.search_vacancies(page, 10, keywords, locations)\n pages = [page for page in search_lst.iter_pages(left_edge=2, left_current=2, right_current=3, right_edge=2)]\n return jsonify(jobs=[vacancy.dict() for vacancy in search_lst.items], totalPages=pages)\n"
},
{
"alpha_fraction": 0.5218884348869324,
"alphanum_fraction": 0.5298283100128174,
"avg_line_length": 41.75229263305664,
"blob_id": "b1736ec689422502417e30479bbc61e4df41c959",
"content_id": "7110edbd92396e37debcb68e724cbb3883360690",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4660,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 109,
"path": "/tests/test_search_page.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "import unittest\nimport json\nfrom create_app import create_app\nfrom fill_db import UpdateDb\nfrom databases.sql_db import db\nfrom models.category import Category\n\n\nclass SearchPageTestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app(\"testing\")\n self.client = self.app.test_client()\n self.test_data = 5\n self.app_context = self.app.app_context()\n\n with self.app_context:\n update = UpdateDb(xml_file_path='data/test_feed.xml')\n update.recreate_db()\n\n def test_get_jobs(self):\n with self.app_context:\n res = self.client.get('/search',\n headers={\"Content-Type\": \"application/json\"})\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(list, type(data['jobs']))\n self.assertEqual(list, type(data['totalPages']))\n self.assertEqual(self.test_data, len(data['jobs']))\n self.assertEqual(1, len(data['totalPages']))\n\n def test_get_jobs_with_location(self):\n with self.app_context:\n res = self.client.get('/search',\n headers={\"Content-Type\": \"application/json\"},\n query_string={'where': 'Schwerin'})\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(list, type(data['jobs']))\n self.assertEqual(1, len(data['jobs']))\n self.assertEqual(1, len(data['totalPages']))\n\n def test_get_jobs_with_keyword(self):\n with self.app_context:\n res = self.client.get('/search',\n headers={\"Content-Type\": \"application/json\"},\n query_string={'keyword': 'Oberarzt'})\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(list, type(data['jobs']))\n self.assertEqual(4, len(data['jobs']))\n self.assertEqual(1, len(data['totalPages']))\n\n def test_get_jobs_with_keyword_and_location(self):\n with self.app_context:\n res = self.client.get('/search',\n headers={\"Content-Type\": \"application/json\"},\n query_string={'where': 'Schwerin',\n 'keyword': 'Oberarzt',\n 'page': 1})\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(list, type(data['jobs']))\n self.assertEqual(1, len(data['jobs']))\n self.assertEqual(1, len(data['totalPages']))\n\n def test_get_jobs_with_wrong_page(self):\n with self.app_context:\n res = self.client.get('/search',\n headers={\"Content-Type\": \"application/json\"},\n query_string={'page': 2})\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(list, type(data['jobs']))\n self.assertEqual(0, len(data['jobs']))\n self.assertEqual(1, len(data['totalPages']))\n\n def test_get_jobs_with_wrong_location(self):\n with self.app_context:\n res = self.client.get('/search',\n headers={\"Content-Type\": \"application/json\"},\n query_string={'where': 'dddddddd'})\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(list, type(data['jobs']))\n self.assertEqual(0, len(data['jobs']))\n self.assertEqual(0, len(data['totalPages']))\n\n def test_get_jobs_with_wrong_keyword(self):\n with self.app_context:\n res = self.client.get('/search',\n headers={\"Content-Type\": \"application/json\"},\n query_string={'keyword': 'aaaaaaa'})\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 200)\n self.assertEqual(list, type(data['jobs']))\n self.assertEqual(0, len(data['jobs']))\n self.assertEqual(0, len(data['totalPages']))\n\n def tearDown(self):\n \"\"\"teardown all initialized variables.\"\"\"\n with self.app_context:\n db.session.remove()\n db.drop_all()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
},
{
"alpha_fraction": 0.5259259343147278,
"alphanum_fraction": 0.7037037014961243,
"avg_line_length": 18.285715103149414,
"blob_id": "a5626c0a4d20a6663294b3eb82cb7050dad41213",
"content_id": "8a9b00f2bb6331a6985cd74af368dfb09d71992b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 7,
"path": "/requirements.txt",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "Flask>=1.1.2\nFlask-SQLAlchemy>=2.4.4\npython-dotenv>=0.15.0\npsycopg2>=2.8.6\nflask-migrate>=2.7.0\nflask-script>=2.0.6\nflask-cors>=3.0.10\n"
},
{
"alpha_fraction": 0.6635311245918274,
"alphanum_fraction": 0.6649782657623291,
"avg_line_length": 25.576923370361328,
"blob_id": "3bc06549f86e4bd78c5ed27c9af4571934a3cf66",
"content_id": "ac2d2094cca1a72a0a27af4ea7d13561cf6d86b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1382,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 52,
"path": "/configs/app_configs/config.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "import os\nfrom dotenv import load_dotenv\n\nload_dotenv('configs/env_configs/.env')\n\n\nclass Config(object):\n \"\"\"Parent configuration class.\"\"\"\n DEBUG = False\n SECRET = os.getenv('SECRET')\n user = os.getenv(\"POSTGRES_USER\")\n password = os.getenv(\"POSTGRES_PASSWORD\")\n hostname = os.getenv(\"POSTGRES_HOSTNAME\")\n port = os.getenv(\"POSTGRES_PORT\")\n database = os.getenv(\"APPLICATION_DB\")\n SQLALCHEMY_DATABASE_URI = (\n f\"postgresql+psycopg2://{user}:{password}@{hostname}:{port}/{database}\"\n )\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\nclass DevelopmentConfig(Config):\n \"\"\"Configurations for Development.\"\"\"\n DEBUG = True\n PROPAGATE_EXCEPTIONS = True\n\n\nclass TestingConfig(Config):\n \"\"\"Configurations for Testing, with a separate test database.\"\"\"\n TESTING = True\n DEBUG = True\n user = os.getenv(\"POSTGRES_USER\")\n password = os.getenv(\"POSTGRES_PASSWORD\")\n hostname = os.getenv(\"POSTGRES_HOSTNAME\")\n port = os.getenv(\"POSTGRES_PORT\")\n database = os.getenv(\"TEST_DB\")\n SQLALCHEMY_DATABASE_URI = (\n f\"postgresql+psycopg2://{user}:{password}@{hostname}:{port}/{database}\"\n )\n\n\nclass ProductionConfig(Config):\n \"\"\"Configurations for Production.\"\"\"\n DEBUG = False\n TESTING = False\n\n\napp_config = {\n 'development': DevelopmentConfig,\n 'testing': TestingConfig,\n 'production': ProductionConfig,\n}\n"
},
{
"alpha_fraction": 0.7397260069847107,
"alphanum_fraction": 0.7397260069847107,
"avg_line_length": 18.909090042114258,
"blob_id": "a1a457c67503f60a423867e3e090bd0e84cda9be",
"content_id": "1192b357dee5e398af981251ee8e37f950bb1f97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 22,
"path": "/manage.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\n\nfrom app import app\nfrom databases.sql_db import db\n\n\"\"\"\n\nTo create migrations folder: python manage.py db init\nTo make migrations: python manage.py db migrate\nTo update database: python manage.py db upgrade\n\n\"\"\"\n\nmigrate = Migrate(app, db)\n\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\n\nif __name__ == '__main__':\n manager.run()\n"
},
{
"alpha_fraction": 0.6021462678909302,
"alphanum_fraction": 0.6077106595039368,
"avg_line_length": 32.105262756347656,
"blob_id": "54614efcae62cbb8958e4155c17341fc4cf525b5",
"content_id": "7c0aa4f3870e205986fbe47cd283b7cdfe5392cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2516,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 76,
"path": "/models/vacancy.py",
"repo_name": "MariaAfanaseva/mini_jobninja",
"src_encoding": "UTF-8",
"text": "from databases.sql_db import db\n\n\nclass Vacancy(db.Model):\n __tablename__ = \"vacancies\"\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(255), nullable=False)\n firm = db.Column(db.String(128), nullable=False)\n description = db.Column(db.Text(), nullable=False)\n location_postcode = db.Column(db.Integer())\n location = db.Column(db.String(128))\n from_date = db.Column(db.Date())\n job_link = db.Column(db.String(128))\n job_type = db.Column(db.String(80))\n\n def __init__(self, title, firm, description,\n location_postcode, location,\n from_date, job_link, job_type):\n self.title = title\n self.firm = firm\n self.description = description\n self.location_postcode = location_postcode\n self.location = location\n self.from_date = from_date\n self.job_link = job_link\n self.job_type = job_type\n\n def __repr__(self):\n return f'{self.title}'\n\n def dict(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'location': self.location\n }\n\n @classmethod\n def find_by_id(cls, vacancy_id):\n return cls.query.filter_by(id=vacancy_id).first()\n\n @classmethod\n def _create_keywords_conditions(cls, keywords):\n conditions = []\n for keyword in keywords:\n conditions.append(cls.title.ilike(f'%{keyword}%'))\n conditions.append(cls.description.ilike(f'%{keyword}%'))\n conditions.append(cls.firm.ilike(f'%{keyword}%'))\n return conditions\n\n @classmethod\n def _create_location_conditions(cls, locations):\n conditions = []\n for location in locations:\n conditions.append(cls.location.ilike(f'%{location}%'))\n return conditions\n\n @classmethod\n def search_vacancies(cls, page, quantity, keywords, locations):\n data = cls.query\n if locations:\n locations_conditions = cls._create_location_conditions(locations)\n data = data.filter(db.and_(*locations_conditions))\n if keywords:\n keywords_conditions = cls._create_keywords_conditions(keywords)\n data = data.filter(db.or_(*keywords_conditions))\n return data.order_by(cls.from_date.desc()).paginate(page, quantity, error_out=False)\n\n def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n\n def delete_from_db(self):\n db.session.delete(self)\n db.session.commit()\n"
}
] | 15 |
jahil/stupify | https://github.com/jahil/stupify | 2b5b95ca9411ad451b489f2dc64965486696b0be | a4ecaa8f2c11f3d89231cda4900b5b3bc0c8d1de | 073d8e7261598be99a176728073ec857e5a6435d | refs/heads/master | 2021-01-01T05:35:36.983495 | 2012-08-05T12:25:57 | 2012-08-05T12:25:57 | 1,284,382 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5415617227554321,
"alphanum_fraction": 0.5659109950065613,
"avg_line_length": 21.331249237060547,
"blob_id": "3ec671aff28b87278f75455532ebe53b76e9d261",
"content_id": "0ca49e27af2a86aa2e8ea356931e933b09f5bb47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3573,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 160,
"path": "/tools/pong.c",
"repo_name": "jahil/stupify",
"src_encoding": "UTF-8",
"text": "/*\n * By Patrick Reynolds <[email protected]>\n * Distributed under GPL.\n *\n * Build instructions\n * gcc pong.c -o pong\n *\n * Usage:\n * pong 1.2.3.0 # as root!\n */\n\n#include <errno.h>\n#include <signal.h>\n#include <stdio.h>\n#include <string.h>\n#include <unistd.h>\n#include <arpa/inet.h>\n#include <netinet/in.h>\n#include <netinet/ip_icmp.h>\n#include <sys/socket.h>\n\n#define PONG_DEAD 0\n#define PONG_BROKEN 1\n#define PONG_OKAY 2\n\n#undef I_CARE_ABOUT_BAD_RETURN_PACKETS\n\nint id, sock;\nint datalen = 0;\nint nhosts = 256;\nint vec[256];\nint foo_set = 0;\nstruct sockaddr_in dest;\n\nvoid pong();\nvoid catch();\nint in_cksum(u_short *addr, int len);\nvoid print();\nvoid foo(int i) { foo_set = 1; }\n\nint main(int argc, char **argv) {\n id = getpid();\n if ((sock = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP)) < 0) {\n perror(\"pong: socket\");\n return -1;\n }\n if (argc != 2) {\n fprintf(stderr, \"Usage:\\n %s aaa.bbb.ccc.0\\n\\n\", argv[0]);\n return -1;\n }\n bzero(&dest, sizeof(dest));\n dest.sin_family = AF_INET;\n inet_aton(argv[1], &dest.sin_addr);\n pong();\n catch();\n print();\n return 0;\n}\n\nvoid pong() {\n unsigned char buf[sizeof(struct icmphdr)];\n struct icmphdr *p = (struct icmphdr *)buf;\n int ret, i;\n\n\tfprintf(stderr, \"Sending packets\");\n\n p->type = ICMP_ECHO;\n p->code = 0;\n p->checksum = 0;\n p->un.echo.sequence = 0;\n p->un.echo.id = id;\n\n p->checksum = in_cksum((u_short *)p, datalen + 8);\n\n dest.sin_addr.s_addr &= 0xffffff;\n for (i=1; i<=254; i++) {\n\t\tfputc('.', stderr);\n\t\tfflush(stderr);\n dest.sin_addr.s_addr &= 0xffffff;\n dest.sin_addr.s_addr |= (i<<24);\n ret = sendto(sock, (char *)buf, 8 + datalen, 0,\n (struct sockaddr *)&dest, sizeof(struct sockaddr_in));\n if (ret != 8 + datalen) {\n if (ret < 0)\n perror(\"pong: sendto\");\n printf(\"pong: wrote %d chars, ret=%d\\n\", 8+datalen, ret);\n }\n }\n\tfputc('\\n', stderr);\n}\n\nvoid catch() {\n int ret;\n char incoming[128];\n\tfprintf(stderr, \"Gathering responses\");\n bzero(vec, nhosts*sizeof(int));\n alarm(5);\n signal(SIGALRM, foo);\n signal(SIGINT, foo);\n while (!foo_set) {\n struct sockaddr_in from;\n\t\tfd_set rfds;\n\t\tstruct timeval tv = { 1, 0 };\n socklen_t fromlen = sizeof(from);\n\t\tFD_ZERO(&rfds);\n\t\tFD_SET(sock, &rfds);\n\t\tif (select(sock+1, &rfds, NULL, NULL, &tv) <= 0) continue;\n ret = recvfrom(sock, (char *)incoming, 128, 0, (struct sockaddr *)&from,\n &fromlen);\n#ifdef I_CARE_ABOUT_BAD_RETURN_PACKETS\n if (ret != datalen + 16 && (ret >= 0 || errno != EINTR)) {\n if (ret < 0)\n perror(\"pong: recvfrom\");\n else\n vec[from.sin_addr.s_addr >> 24] = PONG_BROKEN;\n printf(\"pong: read %d chars, ret=%d, host=%d\\n\", 16+datalen, ret,\n from.sin_addr.s_addr>>24);\n }\n else\n#endif\n\t\t{\n\t\t\tfputc('.', stderr);\n\t\t\tfflush(stderr);\n vec[from.sin_addr.s_addr >> 24] = PONG_OKAY;\n\t\t}\n }\n\tfputc('\\n', stderr);\n}\n\nint in_cksum(u_short *addr, int len) {\n register int nleft = len;\n register u_short *w = addr;\n register int sum = 0;\n u_short answer = 0;\n \n while (nleft > 1) {\n sum += *w++;\n nleft -= 2;\n }\n if (nleft == 1) {\n *(u_char *)(&answer) = *(u_char *)w ;\n sum += answer;\n }\n sum = (sum >> 16) + (sum & 0xffff);\n sum += (sum >> 16);\n answer = ~sum;\n return(answer);\n}\n\nvoid print() {\n int i, count = 0;\n for (i=0; i<=255; i++)\n if (vec[i]) {\n unsigned int ip = ntohl(dest.sin_addr.s_addr);\n count++;\n printf(\"%d.%d.%d.%d: %s\\n\", ip>>24, (ip>>16)&0xff, (ip>>8)&0xff, i,\n vec[i]==PONG_OKAY ? \"found\" : \"broken\");\n }\n printf(\"\\nTotal hosts: %d\\n\", count);\n}\n"
},
{
"alpha_fraction": 0.4887218177318573,
"alphanum_fraction": 0.5250626802444458,
"avg_line_length": 40.97368240356445,
"blob_id": "efd22b05c5e6da7c0b365070ec951d0c60191d8e",
"content_id": "dbb5db09428099359cf7a4d59ae243ee77cd0d21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1596,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 38,
"path": "/tools/ppassword",
"repo_name": "jahil/stupify",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# GRC's Perfect Passwords Generator\n# Mohsin Mirza <[email protected]>\n\nWGET=\"/usr/bin/wget\"\nFILE=`tempfile`\n$WGET -q --tries=10 --timeout=5 http://www.google.com -O $FILE &> /dev/null\n[ -z \"$1\" ] && echo -e \"\\E[31mGRC's\\E[37m Ultra High Security Password Generator v0.1\\r\\033[0m\" && echo -e '--------------------------------------------------\\n' && \n\necho -e '\\E[37mUsage: ppassword [\\E[31m-an alphanumeric\\E[37m ] [\\E[32m-as ascii\\E[37m ] [ -hex hexadecimal ]' && exit 100\n\ncase \"$1\" in\n\n -an)\n\t\tif [ ! -s $FILE ];then\n\t\techo \"ERROR: Please check you internet connectivity.\"\n\t\telse\n curl -s https://www.grc.com/passwords.htm | grep alpha | head -n1 | perl -wlne 'm/font>(.*)<\\/font/i && print $1' | sed -e \"s/°//g\" | cut -d\" \" -f14 | sed 's/size=2>//g'\n\t\tfi\n ;;\n -as)\n\t\tif [ ! -s $FILE ];then\n\t\techo \"ERROR: Please check you internet connectivity.\"\n\t\telse\n curl -s https://www.grc.com/passwords.htm | grep ASCII | head -n1 | perl -wlne 'm/font>(.*)<\\/font/i && print $1' | sed -e \"s/°//g\" | cut -d\" \" -f14 | sed 's/size=2>//g'\n\t\tfi\n ;;\n -hex)\n\t\tif [ ! -s $FILE ];then\n\t\techo \"ERROR: Please check you internet connectivity.\"\n\t\telse\n curl -s https://www.grc.com/passwords.htm | grep hexadecimal | head -n1 | perl -wlne 'm/font>(.*)<\\/font/i && print $1' | sed -e \"s/°//g\" | cut -d\" \" -f14 | sed 's/size=2>//g'\n\t\tfi\n ;;\n *)\n\t\techo -e '\\E[37mUsage: ppassword [\\E[31m-an alphanumeric\\E[37m ] [\\E[32m-as ascii\\E[37m ] [\\E[37m -hex hexadecimal ]'\n ;;\nesac\n\n"
},
{
"alpha_fraction": 0.47171565890312195,
"alphanum_fraction": 0.47822850942611694,
"avg_line_length": 46.98214340209961,
"blob_id": "7e342e94c173f5ac189dbea2f36d918c9c98b018",
"content_id": "91149c3ea5bd70ab5b58c0b0a89016c70dd5754a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 5374,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 112,
"path": "/scripts/mysql_backup",
"repo_name": "jahil/stupify",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Database Backup script.\n# Created By: Mohammed Salih\n# System Administrator\n# MBC Group / New Media Department\n# Date: 21/06/2007\n\n# Database credentials\nDB_USER=root\nDB_PASS=\"password\"\n\n\n# Get list of Databases except the pid file\n#DBS_LIST=$(ls /data/mysql|grep -v \"$hostname.pid\")\nDBS_LIST=$(ls /data/db|grep -v \"$hostname.pid\")\n# Log file\nBAKUP_LOG=/var/log/db-backup.log\n# Backup Base directory\nBASE_BAK_FLDR=/data/backup\n# bin-log files location\nBINLOG_FLDR=/data/logs/binlog/\n# Index file of bin-log\nINCR_INDEX=$BINLOG_FLDR/incr-log.index\n# Full backup file rotation threshold.\nRM_FLDR_DAYS=\"+14\"\n# Incremental backup files rotation threshold\nRM_INCR_DAYS=\"+7\"\n# From here, only edit if you know what you are doing.\nindex=0\n\n# Check if we can connect to the mysql server; otherwise die\n#if [ ! \"$(id -u -n)\" = \"mysql\" ]; then\n# echo -e \"Error:: $0 : Only user 'mysql' can run this script\"\n# exit 100\n#fi\n\nPING=$(mysqladmin ping -u $DB_USER $DB_PASS 2>/dev/null)\nif [ \"$PING\" != \"mysqld is alive\" ]; then\n echo \"Error:: Unable to connected to MySQL Server, exiting !!\"\n exit 101\nfi\n\n\ncase \"$1\" in\n full)\n # Full backup.\n # Flush logs prior to the backup. any changes recorded in\n # the bin-log after this will backed up in next incremental/full backup.\n mysql -u $DB_USER $DB_PASS -e \"FLUSH LOGS\"\n # Loop through the DB list and create table level backup,\n # applying appropriate option for MyISAM and InnoDB tables.\n for DB in $DBS_LIST; do\n DB_BKP_FLDR=$BASE_BAK_FLDR/full/$(date +%d-%m-%Y)/$DB\n [ ! -d $DB_BKP_FLDR ] && mkdir -p $DB_BKP_FLDR\n # Get the schema of database with the stored procedures.\n # This will be the first file in the database backup folder\n mysqldump -u $DB_USER $DB_PASS -R -d --single-transaction $DB | \\\n gzip -c > $DB_BKP_FLDR/000-DB_SCHEMA.sql.gz\n index=0\n #Get the tables and its type. Store it in an array.\n table_types=($(mysql -u $DB_USER $DB_PASS -e \"show table status from $DB\" | \\\n awk '{ if ($2 == \"MyISAM\" || $2 == \"InnoDB\") print $1,$2}'))\n table_type_count=${#table_types[@]}\n # Loop through the tables and apply the mysqldump option according to the table type\n # The table specific SQL files will not contain any create info for the table schema.\n # It is available in the curresponding SCHEMA file\n while [ \"$index\" -lt \"$table_type_count\" ]; do\n START=$(date +%s)\n TYPE=${table_types[$index + 1]}\n table=${table_types[$index]}\n echo -en \"$(date) : backup $DB : $table : $TYPE \"\n if [ \"$TYPE\" = \"MyISAM\" ]; then\n DUMP_OPT=\"-u $DB_USER $DB_PASS $DB --no-create-info --tables \"\n else\n DUMP_OPT=\"-u $DB_USER $DB_PASS $DB --no-create-info --single-transaction --tables\"\n fi\n mysqldump $DUMP_OPT $table |gzip -c > $DB_BKP_FLDR/$table.sql.gz\n index=$(($index + 2))\n echo -e \" - Total time : $(($(date +%s) - $START))\\n\"\n done\n done\n # Rotating old backup. according to the 'RM_FLDR_DAYS'\n if [ ! -z \"$RM_FLDR_DAYS\" ]; then\n echo -en \"$(date) : removing folder : \"\n find $BASE_BAK_FLDR/full/ -maxdepth 1 -mtime $RM_FLDR_DAYS -type d -exec rm -rf {} \\;\n echo\n fi\n ;;\n incr)\n # Incremental Backup\n # Flush log and backup the last bin-log file prior to the 'flush logs' command.\n if [ ! -z \"$RM_INCR_DAYS\" ]; then\n LST_FILE=$(tail -1 $INCR_INDEX)\n mysql -u $DB_USER $DB_PASS -e \"FLUSH LOGS\"\n [ ! -d $BASE_BAK_FLDR/incremental ] && mkdir -p $BASE_BAK_FLDR/incremental\n echo -e \"$(date) : Incremental backup : $LST_FILE : Started\"\n # Compress and store the bin-log file in the incremental folder\n gzip -c $LST_FILE > $BASE_BAK_FLDR/incremental/$(date +%d-%m-%Y).gz\n echo -e \"$(date) : Incremental backup : $LST_FILE : Finished\"\n echo -en \"$(date) : removing file : \"\n find $BASE_BAK_FLDR/incremental/ -maxdepth 1 -mtime $RM_INCR_DAYS -type f \\\n -exec rm -fv {} \\;\n echo\n fi\n ;;\n *)\n # Usage help.\n echo \"Usage:: $0 (full|incr)\"\n echo \"full: Full backup\"\n echo \"incr: Incremental backup\"\n ;;\nesac\n"
},
{
"alpha_fraction": 0.6127744317054749,
"alphanum_fraction": 0.6277444958686829,
"avg_line_length": 24.049999237060547,
"blob_id": "7332c67c19a8f656d6529e682d7dbe660e4da2a2",
"content_id": "1020aea92c0c3d2fa72ccdc8a44d9b8940adc2f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1002,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 40,
"path": "/tools/megaupload-dl",
"repo_name": "jahil/stupify",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n# Megaupload Premium Downloader for Linux 0.2\n# Mohsin Mirza <[email protected]>\n# ex: megaupload-dl filename\n# ex: cat filename | megaupload-dl\n\nFILE=\"\"\nif [ \"$1\" == \"\" ]; then\n FILE=\"/dev/stdin\"\nelse\n FILE=\"$1\"\n if [ ! -f $FILE ]; then\n echo \"$FILE : does not exists\"\n exit 1\n elif [ ! -r $FILE ]; then\n echo \"$FILE: can not read\"\n exit 2\n fi\nfi\n# Variables\n\nUSERNAME=YOUR_USERNAME\nPASS=YOUR_PASSWORD\nCOOKIEJAR=$HOME/.cookies\nif [ ! -d $COOKIEJAR ]; then\n mkdir $COOKIEJAR\nfi\n\n## Do not change anything below here ##\n\nwget --save-cookies $COOKIEJAR/.megaupload --post-data \"login=1&redir=1&username=$USERNAME&password=$PASS\" -O - http://www.megaupload.com/index.php?c=login > /dev/null\nexec 3<&0\nexec 0<$FILE\nwhile read lines\ndo\nlinks=$lines\nfilenames=`curl -s $lines | grep \"File name\" | perl -wlne'm/span>(.*)<\\/span/i && print $1' | cut -d \">\" -f2`\necho $filenames; curl -L --cookie $COOKIEJAR/.megaupload $links -# -C - -o \"$filenames\"\ndone\nexec 0<&3\n"
},
{
"alpha_fraction": 0.6809248328208923,
"alphanum_fraction": 0.7138728499412537,
"avg_line_length": 35.787235260009766,
"blob_id": "4920c47e9d44e84cfc71c0f1aecf4e7f656f3cef",
"content_id": "e7d16563d69d520db5a9f0022a624b725cb9cbd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1730,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 47,
"path": "/aws/user-data.example",
"repo_name": "jahil/stupify",
"src_encoding": "UTF-8",
"text": "#!/bin/bash -ex\n# Development AppServer user-data script for troyworks.com\n# vsftpd + MySQL Server Configuration\n# Mohsin Mirza <[email protected]\necho BEGIN\nexec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1\nsed -i 's/mnt/data/g' /etc/fstab\numount /mnt\nmkdir /data\nmount -a\nmkdir /data/lib\nservice mysql stop\nsed -i '46s/var\\//data\\//' /etc/mysql/my.cnf\nsed -i '30s/var\\//data\\//' /etc/apparmor.d/usr.sbin.mysqld\nsed -i '31s/var\\//data\\//' /etc/apparmor.d/usr.sbin.mysqld\n/etc/init.d/apparmor restart\nmv /var/lib/mysql /data/lib/\nservice mysql start\n/etc/init.d/apache2 stop\nsed -i '4s/var\\//data\\//' /etc/apache2/sites-available/default\nsed -i '9s/var\\//data\\//' /etc/apache2/sites-available/default\nmv /var/www/ /data/\n/etc/init.d/apache2 start\napt-get install -y mkpasswd vsftpd\necho \"local_umask=022\" >> /etc/vsftpd.conf\necho \"write_enable=YES\" >> /etc/vsftpd.conf\necho \"chroot_local_user=YES\" >> /etc/vsftpd.conf\necho \"pasv_enable=YES\" >> /etc/vsftpd.conf\necho \"pasv_min_port=65000\" >> /etc/vsftpd.conf\necho \"pasv_max_port=65534\" >> /etc/vsftpd.conf\nIP=`curl -s http://169.254.169.254/2009-04-04/meta-data/public-ipv4`\necho \"pasv_address=$IP\" >> /etc/vsftpd.conf\nservice vsftpd restart\nuseradd yasir -d /data/yasir -m -s /bin/bash -p `mkpasswd password`\nuseradd django -d /data/django -m -p `mkpasswd password`\ncp /etc/sudoers /etc/sudoers.bak\ncp /etc/sudoers /etc/sudoers.tmp\nchmod 0640 /etc/sudoers.tmp\necho \"yasir ALL=(ALL) NOPASSWD:ALL\" >> /etc/sudoers.tmp\nchmod 0440 /etc/sudoers.tmp\nmv /etc/sudoers.tmp /etc/sudoers\nuseradd ftpier -d /data/www -p `mkpasswd passowrd`\nchown -R ftpier.ftpier /data/www\necho \"mohsin\" >> /etc/ftpusers\necho \"ubuntu\" >> /etc/ftpusers\necho \"yasir\" >> /etc/ftpusers\necho END\n\n"
},
{
"alpha_fraction": 0.5726826786994934,
"alphanum_fraction": 0.5837851762771606,
"avg_line_length": 38.121212005615234,
"blob_id": "39debf13b99392ef8bac55ff3535f7071db5837a",
"content_id": "e3bad4175a297665cbec117379d253e2d578181c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Python",
"length_bytes": 7746,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 198,
"path": "/falkia/fabfile.py",
"repo_name": "jahil/stupify",
"src_encoding": "UTF-8",
"text": "from fabric.api import *\nfrom fabric.colors import red, green, yellow, blue, _wrap_with\nfrom fabric.utils import puts\nfrom cuisine import *\nfrom random import randint\nfrom fabric.context_managers import hide\nimport socket\nimport paramiko\n\nenv.hosts = [\"xxx.xxx.xxx.xxx\"]\nenv.user =\"root\"\nenv.password = \"password\"\ndbpasswd = \"dbpasswd\"\nport = randint(3307, 9999)\ngreen_bg = _wrap_with('42')\nred_bg = _wrap_with('41')\n\n\ndef uptime():\n run('uptime')\n\ndef create(account):\n \"\"\"create new falkia account\"\"\"\n with settings(hide('running', 'user'), warn_only=True):\n if is_host_up(env.host):\n print(green(\"bootstrapping falkia\"))\n run(\"git clone git://github.com/jahil/falkia.git /falkia/%s\" % account)\n print(green(\"creating database and user\"))\n run('mysqladmin -u %s -p%s create %s' % (env.user, dbpasswd, account))\n run('mysql -u %s -p%s -e \"grant all on %s.* to \\'%s\\'@\\'localhost\\' identified by \\'DBM@rk3T\\'\"' % (env.user, dbpasswd, account, account))\n global naqsh\n naqsh = account\n init()\n\ndef init():\n \"\"\"initialize account\"\"\"\n#\tprint(green('Configuring Nginx'))\n print(green(\"setup/configure nginx\"))\n file_write(\"/etc/nginx/sites-enabled/%s\" % naqsh, text_strip_margin(\n \"\"\"\n |server {\n |listen 80;\n |server_name jahil.falkia.com;\n |\n |access_log /var/log/nginx/jahil.access.log;\n |error_log /var/log/nginx/jahil.error.log;\n |location / {\n |proxy_pass http://127.0.0.1:7777/;\n |proxy_redirect off;\n |\n |proxy_set_header Host $host;\n |proxy_set_header X-Real-IP $remote_addr;\n |proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n |proxy_max_temp_file_size 0;\n |\n |client_max_body_size 10m;\n |client_body_buffer_size 128k;\n |\n |proxy_connect_timeout 90;\n |proxy_send_timeout 90;\n |proxy_read_timeout 90;\n |\n |proxy_buffer_size 4k;\n |proxy_buffers 4 32k;\n |proxy_busy_buffers_size 64k;\n |proxy_temp_file_write_size 64k;\n | }\n |}\n \"\"\"\n ))\n with settings(hide('running', 'user'), warn_only=True):\n if is_host_up(env.host):\n run('sed -i s/jahil/%s/g /etc/nginx/sites-enabled/%s' % (naqsh, naqsh))\n run('sed -i s/7777/%s/g /etc/nginx/sites-enabled/%s' % (port, naqsh))\n run('sed -i s/falkia/%s/g /falkia/%s/config/database.yml' % (naqsh, naqsh))\n run('sed -i s/3000/%s/g /falkia/%s/nbproject/project.properties' % (port, naqsh))\n print(green(\"database initialization\"))\n run('mysql -u %s -p%s %s < /falkia/%s/extra/init.sql' % (env.user, dbpasswd, naqsh, naqsh))\n print(green(\"starting falkia instance\"))\n run('cd /falkia/%s ; mongrel_rails start -p %s -a 127.0.0.1 -e production -P /tmp/%s.pid -d' % (naqsh, port, naqsh))\n run('/etc/init.d/nginx restart')\n print(red(\"ACCOUNT SETUP COMPLETED / ADD BELOW DNS RECORD TO YOUR DOMAIN\"))\n print(green_bg('%s.falkia.com IN A 66.220.0.170') % naqsh )\n\ndef backup(account):\n \"\"\"backup account and download\"\"\"\n with settings(hide('running', 'user'), warn_only=True):\n if is_host_up(env.host):\n run('mysqldump -u %s -p%s %s | gzip > /tmp/%s.sql.gz' % (env.user, dbpasswd, account, account))\n get('/tmp/%s.sql.gz' %account)\n run('rm -fr /tmp/%s.sql.gz' %account)\n\ndef accounts():\n \"\"\"list available accounts\"\"\"\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n accounts = run('ls /falkia')\n lists = accounts.split()\n print (green(lists))\n\ndef delete(account):\n \"\"\"delete account without backup\"\"\"\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n stop(account)\n print(yellow(\"[*] removing files\"))\n run('rm -fr /falkia/%s' % account)\n print(yellow(\"[*] dropping database\"))\n run('mysql -u %s -p%s -e \"drop database %s\"' % (env.user, dbpasswd, account))\n print(yellow(\"[*] dropping database user\"))\n run('mysql -u %s -p%s -e \"drop user %s@localhost\"' % (env.user, dbpasswd, account))\n print(yellow(\"[*] removing nginx configuration\"))\n run('rm -fr /etc/nginx/sites-enabled/%s' % account)\n run('/etc/init.d/nginx restart')\n print(red_bg(\"[*] %s account deleted\") % account)\n\ndef destroy(account):\n \"\"\"destroy account and download backup\"\"\"\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n stop(account)\n backup(account)\n delete(account)\n\ndef restart(account):\n \"\"\"restart falkia instance on account\"\"\"\n if is_host_up(env.host):\n stop(account)\n start(account)\n\ndef stop(account):\n \"\"\"stop falkia instance on account\"\"\"\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n run ('cd /falkia/%s ; mongrel_rails stop -P /tmp/%s.pid' % (account, account))\n print(yellow(\"* falkia instance stopped on %s\") % account)\n\ndef start(account):\n \"\"\"start falkia instance on account\"\"\"\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n nport = run('cat /etc/nginx/sites-enabled/%s | grep \":\" | cut -c 37-40' % account)\n run ('cd /falkia/%s ; mongrel_rails start -p %s -a 127.0.0.1 -e production -P /tmp/%s.pid -d' % (account, nport, account))\n print(green(\"* falkia instance started on %s\") % account)\n\ndef getport(account):\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n nport = run('cat /etc/nginx/sites-enabled/%s | grep \":\" | cut -c 37-40' % account)\n print (green(nport))\n\n\ndef pidof(account):\n \"\"\"show account process id \"\"\"\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n pid = run('cat /tmp/%s.pid' % account)\n print(green(\"* %s pid is: %s\") % (account, pid))\n\ndef is_host_up(host):\n \"\"\"Verify the host computer is online before action\"\"\"\n print('Attempting connection to host: %s' % host)\n original_timeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(1)\n host_up = True\n try:\n paramiko.Transport((host, 22))\n except Exception, e:\n host_up = False\n print('%s down, %s' % (host, e))\n finally:\n socket.setdefaulttimeout(original_timeout)\n return host_up\n\ndef cmd(command):\n \"\"\"execute command on server\"\"\"\n with settings(warn_only=True):\n if is_host_up(env.host):\n run(command)\n\ndef setemail(account):\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n client = prompt(\"Enter client's email address:\")\n run('mysql -u %s -p%s %s -e \"UPDATE users SET email = ' %(env.user, dbpasswd, account) + \"'%s'\" % client + ' WHERE id = 1\"')\n print(green('email id for %s is now: %s' % (account, client)))\n\ndef email(account):\n \"\"\"get account admin's email id\"\"\"\n with settings(hide('everything', 'commands'), warn_only=True):\n if is_host_up(env.host):\n eid = run('mysql -u %s -p%s %s -Bse \"select email from users where username=\\'admin\\'\"' %(env.user, dbpasswd, account))\n print(red(eid))\n\ndef push(account):\n with settings(hide('running', 'user'), warn_only=True):\n if is_host_up(env.host):\n run(\"cd /falkia/%s ; git pull\" % account)\n"
}
] | 6 |
sameernegi17/C- | https://github.com/sameernegi17/C- | a5dcfb4655b21371801cd78eca05133bf9789fc6 | 7a156ef211d56d23ec0259fd35b47a78214d4324 | 4828969b2c2d68dae8cf2963574f84dd1796c783 | refs/heads/master | 2021-05-25T20:04:59.301342 | 2020-05-08T07:47:28 | 2020-05-08T07:47:28 | 253,902,012 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5761035084724426,
"alphanum_fraction": 0.5791476368904114,
"avg_line_length": 21.27118682861328,
"blob_id": "b53c5fcdc415f2639b5613434599dc2fee62e783",
"content_id": "cd5e4adbf822ece9237cbdf120f744d1f05ef8bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1318,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 59,
"path": "/firstNotRepeatingCharacter.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "\n/*\nGiven a string s consisting of small English letters, find and return the first instance of a non-repeating character in it. If there is no such character, return '_'.\nExample\n•\tFor s = \"abacabade\", the output should be\nfirstNotRepeatingCharacter(s) = 'c'.\nThere are 2 non-repeating characters in the string: 'c' and 'd'. Return c since it appears in the string first.\n•\tFor s = \"abacabaabacaba\", the output should be\nfirstNotRepeatingCharacter(s) = '_'.\nThere are no characters in this string that do not repeat.\n*/\n#include <iostream>\n#include <unordered_map>\n\nchar firstNotRepeatingCharacter(std::string s)\n{\n\nstd::unordered_map<int,int> count_map;\n\n\nfor(int i = 0; i < s.size(); i++)\n{\n if(count_map.find(s[i]) == count_map.end())\n {\n count_map[s[i]] = i;\n }\n else\n {\n count_map[s[i]] = s.size() + 1;\n }\n}\n\n auto min_index = s.size();\n {\n for(auto it = count_map.begin(); it !=count_map.end(); it++)\n {\n if(it->second < min_index)\n {\n min_index = it->second;\n }\n }\n }\n\n if (min_index == s.size())\n {\n return '_';\n }\n else\n {\n return s[min_index];\n }\n \n\n}\n\nint main()\n{\n std::cout << firstNotRepeatingCharacter(\"abacabaabacaba\") << std::endl;\n return 0;\n}"
},
{
"alpha_fraction": 0.5534693598747253,
"alphanum_fraction": 0.5730612277984619,
"avg_line_length": 18.774192810058594,
"blob_id": "69af5a253ff361fbc948e9f8e92bfa2d6651f7ea",
"content_id": "c16f6c8f67f81a9dcdd6c83ab90ae18ca2c23a50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1225,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 62,
"path": "/datatypes.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include<iostream>\n#include <cstdint>\n#include <limits>\n\nint main()\n{\n\n bool value = true;\n std::cout << value << std::endl;\n std::cout << sizeof(value) << std :: endl;\n\n\n char value_1 = 'a';\n std::cout << value_1 << std::endl;\n std::cout << sizeof(value_1) << std :: endl;\n \n\n short a = 1;\n int b = 10;\n long c = 15;\n\n std::cout << sizeof(a) << std::endl;\n std::cout << sizeof(b) << std::endl;\n std::cout << sizeof(c) << std::endl;\n\n std::int16_t acstd;\n std::int32_t bcstd;\n std::int64_t ccstd;\n\n std::cout << sizeof(acstd) << std::endl;\n std::cout << sizeof(bcstd) << std::endl;\n std::cout << sizeof(ccstd) << std::endl;\n\n std::cout << std::numeric_limits<short>::max() << std::endl;\n\n a = 32767;\n\n std::cout << a << std::endl;\n\n float f;\n double d;\n long double ld;\n\n std::cout << sizeof(f) << std::endl;\n std::cout << sizeof(d) << std::endl;\n std::cout << sizeof(ld) << std::endl;\n\n std::cout << std::numeric_limits<long double>::max() << std::endl;\n\n\n unsigned short us;\n\n std::cout << std::numeric_limits<unsigned short>::max() << std::endl;\n std::cout << std::numeric_limits<unsigned short>::min() << std::endl;\n\n int x = 10;\n const int ci = x;\n constexpr int cci = 10;\n\n\n return 0;\n}"
},
{
"alpha_fraction": 0.43157893419265747,
"alphanum_fraction": 0.46315789222717285,
"avg_line_length": 15.565217018127441,
"blob_id": "b184f0c5773d6f1e81440238ec3e396294515b66",
"content_id": "41b161f5dff7ffb8b29a178acca00e0ac3ba0254",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 380,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 23,
"path": "/ShapeArea.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include<iostream>\n\nint ShapeArea(int n)\n{\n int num = 1;\n int i = 0;\n int result = 1;\n while(num <= n)\n {\n result = result + 4 * i;\n ++num;\n ++i;\n }\n return result;\n}\n\nint main()\n{\n std::cout << (ShapeArea(2) == 5) << std::endl;\n std::cout << (ShapeArea(3) == 13) << std::endl;\n std::cout << (ShapeArea(4) == 25) << std::endl;\n\n}"
},
{
"alpha_fraction": 0.5265911221504211,
"alphanum_fraction": 0.5605928301811218,
"avg_line_length": 18.133333206176758,
"blob_id": "d823f393d9ec3dd97f0a0d17cdf0b3cbccb297fb",
"content_id": "b44c0d6582b05e7dd6c60f041418c6f8822a0a51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1147,
"license_type": "no_license",
"max_line_length": 239,
"num_lines": 60,
"path": "/matrixSumElement.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "/*\n\nGiven matrix, a rectangular matrix of integers, where each value represents the cost of the room, your task is to return the total sum of all rooms that are suitable for the CodeBots (ie: add up all the values that don't appear below a 0).\n\n\nmatrix = [[0, 1, 1, 2], \n [0, 5, 0, 0], \n [2, 0, 3, 3]]\n\n\nthe output should be\nmatrixElementsSum(matrix) = 9.\n\n*/\n\n#include<vector>\n#include <iostream>\n#include <unordered_map>\n\nint matrixElementsSum(std::vector<std::vector<int>> matrix) {\n\nint sum = 0;\nstd::unordered_map<int,bool> map;\n\nfor(int i =0;i<matrix[0].size(); i++)\n{\n map[i] = true;\n}\n\nfor(int i = 0; i < matrix.size();i++ )\n{ //00 01 02\n for(int j = 0 ; j < matrix[i].size(); j++)\n {\n if(matrix[i][j] == 0)\n {\n map[j] = false;\n }\n else if(map.find(j)->second)\n {\n sum = sum + matrix[i][j];\n }\n }\n}\n\nreturn sum;\n\n}\n\n\nint main()\n{\n\n std::vector<std::vector<int>> matrix;\n matrix.push_back({0,1,1,2});\n matrix.push_back({0,5,0,0});\n matrix.push_back({2,0,3,3});\n\n std::cout << matrixElementsSum(matrix) << std::endl;\n return 0;\n}"
},
{
"alpha_fraction": 0.3045822083950043,
"alphanum_fraction": 0.32345014810562134,
"avg_line_length": 12.285714149475098,
"blob_id": "437e392fa7fe14fdbce40e3a2970a8005d2539f7",
"content_id": "430f42e7f63cc5e7e612e1ecea4818770f9adc1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 28,
"path": "/loops.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint main()\n{ \n for(int i=0; i<0 ; i++)\n {\n std::cout << i << \" \" ;\n }\n std::cout << std::endl;\n\n int j =0;\n while(j<0)\n {\n std::cout << j << \" \" ;\n ++j;\n }\n\n std::cout << std::endl;\n int k =0;\n do\n {\n std::cout << k << \" \" ;\n ++k;\n } while (k<0);\n \n return 0;\n\n}"
},
{
"alpha_fraction": 0.27470818161964417,
"alphanum_fraction": 0.3346303403377533,
"avg_line_length": 18.17910385131836,
"blob_id": "d60a73214bfd72b51f93a8b8c79655ce7ea7faff",
"content_id": "e79b3f7d5770a368c051252eae2b7ddadb292ce3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1285,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 67,
"path": "/operators.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint main()\n{\n\n int a,b,c;\n // Example 1\n\n a=10; \n std::cout<< a++ << std::endl;\n std::cout<< a << std::endl;\n\n std::cout << \"=======================================\" << std::endl;\n // Example 2\n\n a = 20;\n b = 20;\n a = 90/b;\n std::cout << a << std::endl;\n\n std::cout << \"=======================================\" << std::endl;\n\n a=10;\n b = a++;\n c = a;\n std::cout<< a << \",\"<<b << \",\"<< c << std::endl;\n\n std::cout << \"=======================================\" << std::endl;\n\n int c1,c2;\na = -8;\nb = 3;\nc1 = --a + b;\nc2 = a-- + b;\nstd::cout<<\"c1=\"<<c1<<\", \"<<\"c2=\"<<c2<< std::endl;\nstd::cout<<a<< std::endl;\n\nstd::cout << \"=======================================\" << std::endl;\na = 10,b=11;\n\na==b?c=a:c=b;\n\nstd::cout << c << std::endl;\n\nstd::cout << \"=======================================\" << std::endl;\n\na = 1;// 0001\na = a << 1; // 0010\nstd::cout << a << \", \";\na = a << 1; //0100\nstd::cout << a << \", \";\na = a >> 1; // 0010\nstd::cout << a << std::endl;\n\nstd::cout << \"=======================================\" << std::endl;\n\na = 1; //0001\nb = 2; //0010\nc = a | b ; // 0011\nstd::cout << c << \", \";\nc = a & b; //0000\nstd::cout << c << \", \";\nc = ~a & b; // 1110 & 0010 = 0010\nstd::cout << c << std::endl;\n\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5379877090454102,
"alphanum_fraction": 0.5770020484924316,
"avg_line_length": 16.428571701049805,
"blob_id": "d27d461099a125dfc9043e400b8c7980928a8877",
"content_id": "4ae54cf14b95f931d8a116c4a2a85aee2de03a0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 487,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 28,
"path": "/adjacentElementsProduct.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <vector>\n\nint adjacementElementMaxProduct(std::vector<int> inputArray)\n{\n int max = -999999;\n int i = 0, j = 1;\n int multiply = 0;\n\n while(j < inputArray.size())\n {\n multiply = inputArray[i] * inputArray[j];\n if(max < multiply)\n max = multiply;\n ++i;\n ++j;\n }\n\n return max;\n\n}\n\nint main()\n{\n std::vector<int> Elements{-23,4,-3,8,-12};\n std::cout << (adjacementElementMaxProduct(Elements) == -12);\n return 0;\n}"
},
{
"alpha_fraction": 0.3970223367214203,
"alphanum_fraction": 0.40777501463890076,
"avg_line_length": 17.32575798034668,
"blob_id": "5ceb456b6699b709f99801eae2d4ee059c1a6dbb",
"content_id": "447b44d44dba20cf17f098eecc175bf62591f50c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2418,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 132,
"path": "/queue.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nclass queue\n{\n public:\n queue(int size)\n {\n capacity = size;\n arr = new int[size];\n }\n void enqueue(int value)\n {\n if(is_full())\n {\n std::cout << \"queue is full\" << std::endl;\n }\n else\n {\n back = (back + 1) % capacity;\n arr[back] = value;\n ++count;\n }\n \n }\n void dequeue()\n {\n if(!is_empty())\n {\n front = (front + 1) % capacity;\n --count;\n }\n else\n {\n std::cout << \"queue is empty\" << std::endl;\n }\n \n }\n\n int peek()\n {\n if(!is_empty())\n {\n return arr[front];\n }\n else\n {\n return -1;\n }\n \n }\n\n\n bool is_full()\n {\n return (size()== capacity);\n }\n\n bool is_empty()\n {\n return (size()== 0);\n }\n\n int size()\n {\n return count;\n }\n\n private:\n int front{0};\n int back{-1};\n int capacity{0};\n int count{0};\n int *arr;\n};\n\nint main()\n{\n queue q(5);\n std::cout << q.is_full() << std::endl;\n std::cout << q.is_empty() << std::endl;\n std::cout << q.size() << std::endl;\n std::cout << q.peek() << std::endl;\n\n q.enqueue(10);\n\n std::cout << \"+++++++++++++++++++++++\" << std::endl;\n\n std::cout << q.is_full() << std::endl;\n std::cout << q.is_empty() << std::endl;\n std::cout << q.size() << std::endl;\n std::cout << q.peek() << std::endl;\n\n std::cout << \"+++++++++++++++++++++++\" << std::endl;\n\n q.dequeue();\n\n q.dequeue();\n\n std::cout << \"+++++++++++++++++++++++\" << std::endl;\n\n q.enqueue(10);\n q.enqueue(20);\n q.enqueue(30);\n q.enqueue(40);\n q.enqueue(50);\n q.enqueue(60);\n\n std::cout << q.is_full() << std::endl;\n std::cout << q.is_empty() << std::endl;\n std::cout << q.size() << std::endl;\n std::cout << q.peek() << std::endl;\n\n q.dequeue();\n q.enqueue(70);\n std::cout << q.peek() << std::endl;\n q.dequeue();\n std::cout << q.peek() << std::endl;\n q.dequeue();\n std::cout << q.peek() << std::endl;\n q.dequeue();\n std::cout << q.peek() << std::endl;\n q.dequeue();\n std::cout << q.peek() << std::endl;\n q.dequeue();\n std::cout << q.is_empty() << std::endl;\n std::cout << q.peek() << std::endl;\n q.dequeue();\n\n\n\n return 0;\n}"
},
{
"alpha_fraction": 0.5248227119445801,
"alphanum_fraction": 0.5626477599143982,
"avg_line_length": 17.413043975830078,
"blob_id": "68ec150249d88078fa65e8b8c7134ca4aa60672e",
"content_id": "57ff9bc821c1fd6d571dffad922d25c07f9f3195",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 846,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 46,
"path": "/commonCharacterCount.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "/*\n Given two strings, find the number of common characters between them.\n Example\n For s1 = \"aabcc\" and s2 = \"adcaa\", the output should be\n commonCharacterCount(s1, s2) = 3.\n\n Strings have 3 common characters - 2 \"a\"s and 1 \"c\".\n*/\n#include<array>\n#include<iostream>\n\nint commonCharacterCount(std::string s1, std::string s2) {\n\n std::array<int, 26> freq_s1{0};\n std::array<int, 26> freq_s2{0};\n\n for(auto character : s1)\n {\n freq_s1[character - 'a']++;\n }\n \n for(auto character : s2)\n {\n freq_s2[character - 'a']++;\n }\n\n auto count = 0;\n for(int i = 0; i < freq_s1.size();i++)\n {\n count+= std::min(freq_s1[i],freq_s2[i]);\n }\n\n return count;\n}\n\n\n\nint main()\n{\n auto s1{\"aabccd\"};\n auto s2{\"adcaa\"};\n\n std::cout << commonCharacterCount(s1,s2);\n\n return 0;\n}"
},
{
"alpha_fraction": 0.5774999856948853,
"alphanum_fraction": 0.6287500262260437,
"avg_line_length": 35.25,
"blob_id": "195460290c8b827652b87db7404c00713189e998",
"content_id": "8ac7766c318880550cbf299bfb125dd5f026cfa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1600,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 44,
"path": "/almostStrictlyIncreasing.py",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "'''\nGiven a sequence of integers as an array, determine whether it is possible to obtain a strictly increasing sequence by removing no more than one element from the array.\n\nsequence = [1, 3, 2, 1], the output should be\nalmostIncreasingSequence(sequence) = false.\n\nsequence = [1, 3, 2], the output should be\nalmostIncreasingSequence(sequence) = true.\n\nsequence = [10, 1, 2, 3 ,4, 5], the output should be\nalmostIncreasingSequence(sequence) = true.\n\nsequence = [1, 2, 1, 2], the output should be\nalmostIncreasingSequence(sequence) = false.\n\nsequence = [1, 2, 5, 3 , 5], the output should be\nalmostIncreasingSequence(sequence) = false.\n'''\ndef find_bad_pair(sequence):\n for i in range(0,len(sequence)-1):\n if sequence[i] >= sequence[i+1]:\n return i\n return -1\n\n\ndef almostIncreasingSequence(sequence):\n j = find_bad_pair(sequence)\n if j == -1:\n return True\n print(sequence[-1:0])\n if find_bad_pair(sequence[j-1:j] + sequence[j+1:]) == -1:\n return True\n if find_bad_pair(sequence[j:j+1] + sequence[j+2:]) == -1:\n return True\n return False\n \n\n\nif __name__ == \"__main__\":\n print(\"The Test of [1, 3, 2, 1] is \" + str(almostIncreasingSequence([1, 3, 2, 1])))\n print(\"The Test of [1, 3, 2] is \" + str(almostIncreasingSequence([1, 3, 2]))) \n print(\"The Test of [10, 1, 2, 3 ,4, 5] is \" + str(almostIncreasingSequence([10, 1, 2, 3 ,4, 5]))) \n print(\"The Test of [1, 2, 1, 2] is \" + str(almostIncreasingSequence([1, 2, 1, 2]))) \n print(\"The Test of [1, 2, 5, 3 , 5] is \" + str(almostIncreasingSequence([1, 2, 5, 3 , 5]))) "
},
{
"alpha_fraction": 0.5677290558815002,
"alphanum_fraction": 0.5697211027145386,
"avg_line_length": 21.33333396911621,
"blob_id": "f4524b4fe0f2e3c79d92093a2e75c1891da6745f",
"content_id": "f17fee52a53fbb9ce4e719cac5e7a3101e59ee38",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1004,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 45,
"path": "/allLongeststring.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "/*\nGiven an array of strings, return another array containing all of its longest strings.\nExample\nFor inputArray = [\"aba\", \"aa\", \"ad\", \"vcd\", \"aba\"], \n\nthe output should be\nallLongestStrings(inputArray) = [\"aba\", \"vcd\", \"aba\"].\n*/\n\n#include <vector>\n#include <iostream>\n\nstd::vector<std::string> allLongestString(std::vector<std::string> input_array)\n{\n std::vector<std::string> output_array;\n auto current_size = 0;\n\n for(auto string : input_array)\n {\n if(string.size() > current_size)\n {\n output_array.clear();\n output_array.push_back(string);\n current_size = string.size();\n }\n else if (string.size() == current_size)\n {\n output_array.push_back(string);\n }\n }\n\n return output_array;\n\n}\n\nint main()\n{\n auto output = allLongestString({\"aba\", \"aa\", \"adbc\", \"vcd\", \"aba\"});\n for(auto element : output)\n {\n std::cout << \" \" << element ;\n }\n std::cout << std::endl;\n return 0;\n}"
},
{
"alpha_fraction": 0.6133671998977661,
"alphanum_fraction": 0.6353638172149658,
"avg_line_length": 25.288888931274414,
"blob_id": "ebbba95b49473e6d469691746566738d0afa7915",
"content_id": "b1af7807cc22c03ca36265b610487a9a350562b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1182,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 45,
"path": "/makeArrayConsecutive.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "/*\nSameer got statues of different sizes as a present from CodeMaster for his birthday, each statue having an non-negative integer size. Since he likes to make things perfect, \nhe wants to arrange them from smallest to largest so that each statue will be bigger than the previous one exactly by 1. He may need some additional statues to be able to\naccomplish that. Help him figure out the minimum number of additional statues needed.\n\nFor statues = [6, 2, 3, 8], the output should be\nmakeArrayConsecutive(statues) = 3.\n\nFor statues = [0, 3], the output should be\nmakeArrayConsecutive(statues) = 2.\n*/\n\n#include <iostream>\n#include <vector>\n\nint makeArrayConsecutive(std::vector<int> statues)\n{\n int value;\n\n int max = 0,min=99999;\n\n for(int i = 0; i< statues.size(); i++)\n {\n if(max<statues[i])\n {\n max = statues[i];\n }\n\n if(min >statues[i])\n {\n min = statues[i];\n }\n }\n\n value = (max - min + 1) - statues.size();\n return value;\n}\n\n\nint main()\n{\n std::cout << (makeArrayConsecutive({6,2,3,8}) == 3) << std::endl;\n std::cout << (makeArrayConsecutive({0,3}) == 2) << std::endl;\n return 0;\n}"
},
{
"alpha_fraction": 0.37700146436691284,
"alphanum_fraction": 0.38791847229003906,
"avg_line_length": 16.85714340209961,
"blob_id": "6673e3a6bc353edbf2e9277eb12d6c7bdfdf363b",
"content_id": "501b86dbf7b5c5e71150516a40f50921da5bbbc1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1374,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 77,
"path": "/stack.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nclass stack\n{\n public:\n stack(int size)\n {\n max_size = size;\n arr = new int[size];\n }\n\n void push(int value)\n {\n if(top_element < (max_size-1))\n {\n ++top_element;\n arr[top_element] = value;\n }\n else\n {\n std::cout << \"stack is full\" << std::endl;\n }\n \n }\n void pop()\n {\n if (top_element != -1)\n {\n --top_element;\n }\n else\n {\n std::cout << \"stack is empty\" << std::endl;\n } \n }\n int top()\n {\n if(top_element != -1)\n {\n return arr[top_element];\n }\n else\n {\n return -1;\n }\n \n }\n bool is_empty()\n {\n return top_element == -1;\n }\n private:\n int top_element{-1};\n int max_size{0};\n int *arr;\n};\n\nint main()\n{\n stack s(5);\n std::cout << s.is_empty() << std::endl;\n s.pop();\n s.push(1);\n std::cout << s.is_empty() << std::endl;\n std::cout << s.top() << std::endl;\n s.pop();\n std::cout << s.is_empty() << std::endl;\n std::cout << \"+++++++++++++++++++++++\" << std::endl;\n std::cout << s.top() << std::endl;\n s.push(1);\n s.push(2);\n s.push(3);\n s.push(4);\n s.push(5);\n s.push(6);\n std::cout << s.top() << std::endl;\n}"
},
{
"alpha_fraction": 0.4767801761627197,
"alphanum_fraction": 0.5201238393783569,
"avg_line_length": 14.428571701049805,
"blob_id": "2fbf42b91c062c031c46bb2de10329a00af12a29",
"content_id": "26f95527701a18a905fdc4f78c2f2148984541a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 323,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/Reverse_of_a_number.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint reverse_of_a_number(int num)\n{\n int reverse = 0;\n int temp = 0;\n while(num != 0)\n {\n temp = num % 10;\n reverse = reverse * 10 + temp;\n num = num / 10;\n }\n return reverse;\n}\n\n\nint main()\n{\n int number = 57898;\n std::cout << reverse_of_a_number(number);\n}"
},
{
"alpha_fraction": 0.40499457716941833,
"alphanum_fraction": 0.4158523380756378,
"avg_line_length": 17.795917510986328,
"blob_id": "4291568a425b82ee52027564455f5754c2b690ed",
"content_id": "ae667a6f0e393011899cf0f3ef7023cb3e5ebd0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 921,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 49,
"path": "/ConditionalStatements.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nint main(int argc, char const *argv[])\n{\n int a = 1;\n /*\n if(a==0)\n {\n std::cout << \"Do Something\" << std::endl;\n }\n else if (a == 1)\n {\n std::cout << \"We watch TV\" << std::endl;\n }\n else if (a == 2)\n {\n std::cout << \"We eat food\" << std::endl;\n }\n else if (a == 3)\n {\n std::cout << \"We play\" << std::endl;\n }\n else\n {\n std::cout << \"Don't Do Anything\" << std::endl;\n }\n */\n switch (a)\n {\n case 0:\n std::cout << \"Do Something\" << std::endl;\n break;\n case 1:\n std::cout << \"We watch TV\" << std::endl;\n // break;\n case 2:\n std::cout << \"We eat food\" << std::endl;\n // break;\n case 3:\n std::cout << \"We play\" << std::endl;\n // break;\n \n default:\n std::cout << \"Don't Do Anything\" << std::endl;\n break;\n }\n \n return 0;\n}\n"
},
{
"alpha_fraction": 0.4412698447704315,
"alphanum_fraction": 0.4761904776096344,
"avg_line_length": 17.920000076293945,
"blob_id": "dbe535fe815acd112639950486838b0ad47a2172",
"content_id": "bb009266cbecb257e4170c811ecc10c75093d0a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 945,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 50,
"path": "/firstDuplicateInArray.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <vector>\n#include <unordered_map>\n\nint firstDuplicate(std::vector<int> a) {\n\n std::unordered_map<int,int> count_map;\n\n for(int i=0;i<a.size();i++)\n {\n if(count_map.find(a[i]) == count_map.end())\n {\n count_map[a[i]] = 0;\n }\n else if(count_map[a[i]] == 0)\n {\n count_map[a[i]] = i;\n }\n }\n\n auto min_index = 999999;\n for(auto it = count_map.begin(); it != count_map.end() ; it++)\n {\n if(it->second < min_index && it->second != 0)\n {\n min_index = it->second;\n }\n }\n\n if(min_index == 999999)\n {\n return -1;\n }\n else\n {\n return a[min_index];\n }\n \n}\n\nint main()\n{\n std::vector<int> test_value{2, 1, 3, 5, 3, 2};\n\n std::vector<int> test_value2{2, 2};\n\n std::vector<int> test_value3{2, 4, 3, 5, 1};\n\n std::cout << firstDuplicate(test_value2) << std::endl;\n}"
},
{
"alpha_fraction": 0.4236540198326111,
"alphanum_fraction": 0.4245366156101227,
"avg_line_length": 19.962963104248047,
"blob_id": "83a281811aa6cd8aa14f0804dae400a02793b613",
"content_id": "3481fe1f70c897af0253ab5bbb014180ec4a0ecb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1133,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 54,
"path": "/balanced_paranthesis.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <stack>\n#include <map>\n\nstd::string isBalanced(std::string s)\n{\n std::string result{\"NO\"};\n std::stack<char> string_stack;\n std::map<char,char> paranthesis_map{{')','('},{']','['},{'}','{'}};\n\n for(auto character : s)\n {\n if(character == '{' || character == '(' || character == '[' )\n {\n string_stack.push(character);\n }\n else if(character == '}'|| character == ')' || character == ']')\n {\n if(string_stack.empty())\n {\n return result;\n }\n else\n {\n auto top_element = string_stack.top();\n if(top_element != paranthesis_map[character]) // paranthesis_map['}'] => {\n {\n return result;\n }\n string_stack.pop();\n }\n \n }\n }\n\n if(string_stack.empty())\n {\n result = \"YES\";\n }\n \n return result;\n}\n\n\nint main()\n{\n std::string s{\"(abc]\"};\n\n auto result = isBalanced(s);\n\n std::cout << result << std::endl;\n\n return 0;\n}\n\n"
},
{
"alpha_fraction": 0.44400396943092346,
"alphanum_fraction": 0.47373637557029724,
"avg_line_length": 14.78125,
"blob_id": "79274e0f83fe81b9f509178de4cc88e296d53fae",
"content_id": "4e79324cd9e2e007be10ff6073f98982aeb52af9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1009,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 64,
"path": "/Graph.cpp",
"repo_name": "sameernegi17/C-",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <vector>\n\nclass Graph\n{\nprivate:\n int vertix;\n std::vector<std::vector<int>> adjlist;\n\npublic:\n Graph(int v);\n void addEdge(int u, int v)\n {\n adjlist[u].push_back(v);\n // adjlist[v].push_back(u); // Comment For Directed Graph\n }\n void Print()\n {\n int i{0};\n for(auto element : adjlist)\n {\n std::cout << \"HEAD ->\" << i << \"->\";\n for(auto vertix : element)\n {\n std::cout << vertix << \" \";\n }\n ++i;\n std::cout << std::endl;\n }\n }\n};\n\nGraph::Graph(int v)\n{\n vertix = v;\n adjlist.resize(vertix);\n}\n\n\nint main()\n{\n /*\n Graph g(7);\n g.addEdge(0,1);\n g.addEdge(1,3);\n g.addEdge(1,2);\n g.addEdge(2,3);\n g.addEdge(2,4);\n g.addEdge(4,6);\n g.addEdge(6,5);\n g.Print();\n*/\n\nGraph g(5);\ng.addEdge(0,1);\ng.addEdge(1,2);\ng.addEdge(2,4);\ng.addEdge(4,2);\ng.addEdge(2,3);\ng.addEdge(3,0);\ng.Print();\n\n return 0;\n}"
}
] | 18 |
Herra-Dronich/PythonLessons | https://github.com/Herra-Dronich/PythonLessons | 6a593c12ee82cf9c0a8a1435337bad2be8b268ef | 963a7abea4f4608610e729349c411ad1fd9d316f | fffde7dd3e59ddd815616c30e803379e5fa7ee5b | refs/heads/master | 2019-07-18T02:23:25.631640 | 2015-01-30T11:56:09 | 2015-01-30T11:56:09 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5104529857635498,
"alphanum_fraction": 0.5209059119224548,
"avg_line_length": 22.4489803314209,
"blob_id": "c17c1a21fb4a03f4c32bb789ad71c134efb02a3c",
"content_id": "07f9784cb7128821c595efd5f1c701673ec0fb81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1150,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 49,
"path": "/pythonLessons/Advanced programming/Ülesanne 20 - BST/Ülesanne 20 - Binary Trees.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\nÜlesanne 20 - Binary Search Trees\nJuhend: https://courses.cs.ttu.ee/w/images/e/e7/2014_Loeng_21_-_Binary_tree.pdf\n\"\"\"\n__author__ = \"Borka Martin Orlov\"\n__email__ = \"[email protected]\"\n\nfrom bst import *\n\ndef read_words(fail):\n \"\"\"\n Loe sõnad failist\n \"\"\"\n file_contents = fail.read()\n word = \"\"\n\n for char in file_contents:\n char = char.lower()\n if char.isalpha():\n word += char\n else:\n if word != \"\":\n yield word\n word = \"\"\n\ndef main():\n try:\n fail = open(\"hound.txt\", encoding=\"utf8\")\n except Exception as e:\n print(\"Error occured when opening fail:\", e)\n else:\n words = read_words(fail)\n tree = Tree(next(words))\n [tree.add(Tree(word)) for word in words]\n node, dist = tree.search(\"was\")\n if node:\n node.printTree(1)\n print('Distance from root:', dist)\n\n print(tree.maxDepth())\n \n outputFile = open(\"result.txt\", \"w\", encoding=\"utf8\")\n tree.printToFile(outputFile)\n outputFile.close()\n finally:\n fail.close()\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.6734693646430969,
"alphanum_fraction": 0.6734693646430969,
"avg_line_length": 23.75,
"blob_id": "b886ce0d1bdd5218a3b41aa1126ab53559cc45b1",
"content_id": "beda43bbbf0b719e688485367b4e7b0166470291",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 98,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 4,
"path": "/exercise/Warrior.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "class Character(object):\n\tdef __init__(self, name, level):\n\t\tself.name = name\n\t\tself.level = level"
},
{
"alpha_fraction": 0.5881863832473755,
"alphanum_fraction": 0.6239600777626038,
"avg_line_length": 18.406780242919922,
"blob_id": "ac2cd01def16fc146bb7653fc0e0bf5b0a23611d",
"content_id": "52408b010700205ec4471217e69892bd790fa247",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1203,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 59,
"path": "/pythonLessons/Advanced programming/Ülesanne 8 -Testimine/Ülesanne 8 - testimine.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nÜlesanne 8 - Testimine\r\nJuhend: https://courses.cs.ttu.ee/w/images/8/87/2014_Loeng_8_-_Testing.pdf\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\nimport unittest\r\nfrom Roman_numerals import convert\r\n\r\nclass Testing(unittest.TestCase):\r\n\t\"\"\"\r\n\tKlass testimiseks\r\n\t\"\"\"\r\n\tdef test_1(self):\r\n\t\tself.assertEqual(convert(\"I\"), 1)\r\n\r\n\tdef test_2(self):\r\n\t\tself.assertEqual(convert(\"IV\"), 4)\r\n\r\n\tdef test_3(self):\r\n\t\tself.assertEqual(convert(\"VI\"), 6)\r\n\r\n\tdef test_4(self):\r\n\t\t# Failed: Test Cases missing\r\n\t\tself.assertEqual(convert(\"XC\"), 90)\r\n\r\n\tdef test_5(self):\r\n\t\t# Failed: Syntax faulty\r\n\t\tself.assertEqual(convert(\"CD\"), 399)\r\n\r\n\tdef test_6(self):\r\n\t\tself.assertEqual(convert(\"MMMMCMXCIX\"), 4999)\r\n\r\n\tdef test_7(self):\r\n\t\tself.assertEqual(convert(\"IIV\"), -1)\r\n\r\n\tdef test_8(self):\r\n\t\tself.assertEqual(convert(\"XXXXX\"), -1)\r\n\r\n\tdef test_9(self):\r\n\t\tself.assertEqual(convert(\"LLCDD\"), -1)\r\n\r\n\tdef test_10(self):\r\n\t\tself.assertEqual(convert(\"blah\"), -1)\r\n\r\n\tdef test_11(self):\r\n\t\t# Initially returned \"None\"\r\n\t\t# Case added\r\n\t\tself.assertEqual(convert(10), -1)\r\n\r\ndef main():\r\n\tunittest.main(verbosity=2)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()"
},
{
"alpha_fraction": 0.6206244230270386,
"alphanum_fraction": 0.634815514087677,
"avg_line_length": 22.627906799316406,
"blob_id": "9cad8f94a22378d2d9ab489d34ed5dd121b71d5f",
"content_id": "d193c32d7aa9da99899dd42050f820408efc00f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1061,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 43,
"path": "/pythonLessons/Advanced programming/Ülesanne 5 - Failid/unique_words.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nModules for exercise 5\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\ndef count_words(file_name, word_array):\r\n\t\"\"\"\r\n\tLoeb kokku sõnade arvu ja unikaalsete sõnade arvu.\r\n\tLisab unikaalsed sõnad listi\r\n\t\"\"\"\r\n\tword_count = 0\r\n\tbook = open(file_name, 'r')\r\n\tfor line in book:\r\n\t\tline = line.split()\r\n\t\tfor word in line:\r\n\t\t\tif word.isalpha():\r\n\t\t\t\tword_count = word_count + 1 \r\n\t\t\t\tif word not in word_array:\r\n\t\t\t\t\tword_array.append(word)\r\n\tprint('Tekstis', file_name, 'on kokku', word_count, 'sona.' )\r\n\t\r\n\tlength = len(word_array)\r\n\tprint('Kokku on', length, 'unikaalset sona')\r\n\r\n\treturn length\r\n\t\r\ndef compare_words(unique_1, unique_2):\r\n\t\"\"\"\r\n\tLeiab mitu unikaalset sõna on esimeses listis, mida teises pole\r\n\t\"\"\"\r\n\tuniques = 0\r\n\tfor word in unique_1:\r\n\t\tif word not in unique_2:\r\n\t\t\tuniques = uniques + 1\r\n\tprint ('Tekstis on ', uniques, 'sona mida teises pole.')\r\n\r\ndef words_in_both(unique_1, unique_2):\r\n\tuniques = 0\r\n\tfor word in unique_1:\r\n\t\tif word in unique_2:\r\n\t\t\tuniques = uniques + 1\r\n\tprint (uniques, 'sona esineb molemas tekstis')"
},
{
"alpha_fraction": 0.5286298394203186,
"alphanum_fraction": 0.5449897646903992,
"avg_line_length": 29.54838752746582,
"blob_id": "0ba8dbf5c07b2b612325fab0cec3e0986c4dbe72",
"content_id": "ddb8112f6ec23e072338771f4c489100a830c432",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 981,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 31,
"path": "/pythonLessons/Advanced programming/Ülesanne 10 - Lambda/Ülesanne 9 - Lambda.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nÜlesanne 9 - Lambda\r\nJuhend: https://courses.cs.ttu.ee/w/images/5/54/2014_Loeng_10_-_Lambda%2C_comprehensions_and_generator.pdf\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\ndef generator(filename):\r\n text_file = open(filename, 'r')\r\n\r\n for line in text_file:\r\n word = ''\r\n for char in line:\r\n if char.isalpha():\r\n word = word + char\r\n elif char in [' ', '!', '?', ',', '.', ':']:\r\n yield word\r\n word = ''\r\n text_file.close()\r\n\r\n\r\ndef main():\r\n list_of_words = [x for x in generator('HoundOfTheBaskervilles.txt')][3:400:4]\r\n print('Sõnade arv listis: ', len(list_of_words))\r\n average = sum(map(len, list_of_words))/len(list_of_words)\r\n print('Sõnade keskmine pikkus', average)\r\n list_of_words = list(filter(lambda x: len(x) > average, list_of_words))\r\n print(list(filter(lambda x: x.capitalize(), list_of_words)))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
},
{
"alpha_fraction": 0.6381215453147888,
"alphanum_fraction": 0.6381215453147888,
"avg_line_length": 16.200000762939453,
"blob_id": "1f7a3cbcc1c5e4654b7ac55b18562bf3762454fd",
"content_id": "ff72e6fa6bff1fadb33d55747492fb59c6fc0fdf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 20,
"path": "/documents-export-2015-01-30/pythonlessons5.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "from abc import ABCMeta, abstractmethod\r\n\r\nclass BaseClass(object):\r\n\t__metaclass__ = ABCMeta\r\n\r\n\t@abstractmethod # Decorators\r\n\tdef __init__(self):\r\n\t\td='lol'\r\n\t\tprint(locals())\r\n\t\t#print(globals())\r\n\r\nclass InClass(BaseClass):\r\n\tdef printHam(self):\r\n\t\tprint(\"Ham\")\r\n\tdef local(self):\r\n\t\tsuper(InClass, self).__init__()\r\n\r\ni = InClass()\r\ni.printHam()\r\ni.local()"
},
{
"alpha_fraction": 0.5737179517745972,
"alphanum_fraction": 0.5865384340286255,
"avg_line_length": 15.44444465637207,
"blob_id": "f36e4a0dad038fbc1324a877c19853ce8388bf9e",
"content_id": "5ef6600d3f4c1ba6b235681ba7ee1c8cedd8f1fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 18,
"path": "/documents-export-2015-01-30/pythonlesson4.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "class BaseClass(object):\r\n\tdef __init__(self):\r\n\t\tself.x = 10\r\n\tdef test(self):\r\n\t\tprint(\"ham\")\r\n\r\n\r\nclass InClass(BaseClass):\r\n\tdef __init__(self):\r\n\t\tsuper(InClass, self).__init__()\r\n\t\tsuper(InClass, self).test()\r\n\t\tself.x = 20\r\n\tdef test(self):\r\n\t\tprint(\"hammer\")\r\ni = InClass()\r\n\r\nprint(i.x)\r\nprint(i.test())"
},
{
"alpha_fraction": 0.5361595749855042,
"alphanum_fraction": 0.6059850454330444,
"avg_line_length": 12.862069129943848,
"blob_id": "9093dcc71e77315785744988e1decb84a3aa39c2",
"content_id": "2e5b740825dd09ff263f57e6f2f8173d87326319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 401,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 29,
"path": "/pythonLessons/lesson2.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "lst = [1, 2, 3, 5]\n\n# If gotcha\nif 1 in lst:\n\tpass\n\n# List sorting\nlst = [(1,2,3), (3,1,1), (7,65,3)]\nlst.sort(reverse = True, key=lambda x: x[1])\nprint lst\n\n# Lambda\nlamb = lambda x: x**2\nprint lamb(8)\n\n# List comprehension\nlst = [(x,y) for x in range(1,10) for y in range(1,20) if x==y]\nprint lst\n\ndef square(x):\n\treturn x**2\n\n\n# Generator\ndef generator():\n\tfor x in range(1, 10):\n\t\tyield x\n\nx = nex"
},
{
"alpha_fraction": 0.47345131635665894,
"alphanum_fraction": 0.5353982448577881,
"avg_line_length": 18.65217399597168,
"blob_id": "60ad005ea5ba4ef123be249c32e91045576f22d2",
"content_id": "d926829f4e467cf95b8a58635f83419834f1c80a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 453,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 23,
"path": "/pythonLessons/Advanced programming/Ülesanne 14 - Sympy/Ülesanne 14 - Sympy.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\nÜlesanne 14 - Sympy\nJuhend: https://courses.cs.ttu.ee/w/images/e/ee/ITI0140_2014_Loeng_sympy1.pdf\n\"\"\"\n__author__ = \"Borka Martin Orlov\"\n__email__ = \"[email protected]\"\n\nimport sympy\n\ndef main():\n a, x = sympy.symbols('a x')\n \t\n f1 = x**3 + 4*x**2 + 100\n f2 = (7+1)*sympy.sin(x) + sympy.cos(x)\n\n S = sympy.Abs(sympy.integrate(f2-f1, (x,0,a)))\n \n print(S)\n print(S.subs(a, 20))\n print(S.evalf(subs={a: 20}))\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6154855489730835,
"alphanum_fraction": 0.6496062874794006,
"avg_line_length": 24.433332443237305,
"blob_id": "e3522c14fcc2d4dcb22db0f4a959faeab40d45dc",
"content_id": "2571093b8fd38347693082dc519723190b32fb3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 763,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 30,
"path": "/pythonLessons/Advanced programming/Ülesanne 17 - Graafid/Ülesanne 17 - Graafid.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\nÜlesanne 17 - Graafid\nJuhend: https://courses.cs.ttu.ee/w/images/f/fc/2014_Loeng_Graafid.pdf\n\"\"\"\n__author__ = \"Borka Martin Orlov\"\n__email__ = \"[email protected]\"\n\nfrom matplotlib import pyplot\nimport networkx\n\ndef main():\n\t# Graph generation\n G = networkx.fast_gnp_random_graph(23, 0.142357, 142357)\n pos = networkx.spring_layout(G)\n\n # Shortest path\n path = networkx.shortest_path(G, source=1, target=7)\n path_edges = list(zip(path, path[1:]))\n\n # Drawing\n networkx.draw_networkx(G, pos)\n networkx.draw_networkx_nodes(G, pos, nodelist=path, node_color='g')\n networkx.draw_networkx_edges(G, pos, edgelist=path_edges, edge_color='g', width=10)\n\n # Axis off, unneccessary.\n pyplot.axis('on')\n pyplot.show()\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.5658682584762573,
"alphanum_fraction": 0.56886225938797,
"avg_line_length": 12,
"blob_id": "5b5633ceb3824ffbaa34218b3aae919969b654ae",
"content_id": "967cfc0b48380e3663eac751d7d6fb1ef0cbf178",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 24,
"path": "/documents-export-2015-01-30/test.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "def fun(a, *args, **kwargs):\r\n\tprint(kwargs['yes'])\r\n\tfor key, value in kwargs.items():\r\n\t\tprint(key, value)\r\n\tfor value in args:\r\n\t\tprint(value)\r\n\r\nfun('test','hello', yes = 'tere', no = 2)\r\n\r\nclass D(object):\r\n\tpass\r\n\r\nclass B(D):\r\n\tpass\r\nclass F(object):\r\n\tpass\r\n\r\nclass C(D, F):\r\n\tpass\r\n\r\nclass A(B, C):\r\n\tpass\r\n\r\nprint(A.__mro__)"
},
{
"alpha_fraction": 0.6796727776527405,
"alphanum_fraction": 0.696664571762085,
"avg_line_length": 29.576923370361328,
"blob_id": "532436f202694f12fd794dbaefd2851bdb9909f5",
"content_id": "1b9f63e398a9bfa1036c61327292431c58c68988",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1589,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 52,
"path": "/pythonScripts/dave/Dave_Tuple_HW.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy\n\nlistOfCrosses = []\n\nprint '\\nEnter field dimensions in format x,y'\nfieldDimensions = tuple(map(int,raw_input().split(',')))\nprint \"\\nField size is: %s\" % (fieldDimensions,)\n\nfieldWidth \t= \tfieldDimensions[1]\nfieldHeight = \tfieldDimensions[0]\n\nwhile True:\n\ti = raw_input(\"\\nNow enter cross locations in format x,y then Enter (or type 'done' then Enter to display field): \")\n\tif i == \"done\":\n\t\tbreak\n\tcrossLocation = tuple(map(int,i.split(',')))\n\tprint \"\\nYou placed a cross at location: %s\" % (crossLocation,)\n\n\tif crossLocation[0]<=fieldHeight and crossLocation[1]<=fieldWidth and crossLocation[0]>0 and crossLocation[1]>0:\n\t\tif crossLocation not in listOfCrosses:\n\t\t\tlistOfCrosses.append(crossLocation)\n\telse:\n\t\tprint \"--> Error: cross outside the field dimensions: %s - try again!\\n\" % (fieldDimensions,)\n\ndef displayCrosses(fieldDimensions,listOfCrosses):\n\tdisplay = numpy.zeros((fieldWidth,fieldHeight))\n\n\tfor cross in listOfCrosses:\n\n\t\txCoord = cross[1]-1\n\t\tyCoord = cross[0]-1\n\n\t\tdisplay [xCoord,yCoord] = -1\n\n\t\tfor y in range(-1,2):\n\t\t\tfor x in range(-1,2):\n\n\t\t\t\tif xCoord+x>=0 and yCoord+y>=0 and xCoord+x<fieldWidth and yCoord+y<fieldHeight and not(x==y==0):\n\t\t\t\t\t\n\t\t\t\t\t#print '\\nIncrementing value in position: ',[yCoord+y+1,xCoord+x+1]\n\n\t\t\t\t\ttupleToTest = (yCoord+y+1,xCoord+x+1)\n\t\t\t\t\tif tupleToTest not in listOfCrosses: #Prevents 'crosses' (minus 1 elements) from being incremented\n\t \t\t\t\t\n\t\t\t\t\t\tdisplay [xCoord+x,yCoord+y] += 1\n\tprint \"\\n\"\n\tprint display\n\tprint \"\\n\"\n\ndisplayCrosses(fieldDimensions,listOfCrosses)"
},
{
"alpha_fraction": 0.6548009514808655,
"alphanum_fraction": 0.6702576279640198,
"avg_line_length": 27.27397346496582,
"blob_id": "4a97dc263cd89d15a18dad23ab608506f0800e64",
"content_id": "845e3601de317a6408b5f450dcc0a7b2114f6f4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2172,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 73,
"path": "/pythonLessons/Advanced programming/Ülesanne 6 - Moodulid ja erindid/text_processing.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nKuues kodune töö\r\nMoodulid ja erindid\r\nJuhend: https://courses.cs.ttu.ee/w/images/5/5d/ITI0140_Loeng_6_-_Moodulid_ja_erindid.pdf\r\nMOODUL\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\ndef process_file(fail):\r\n\t\"\"\"\r\n\tVõtab argumendiks avatud faili ja tagastab teksti sõnadeks jaotamise tulemusena\r\n\ttekkiva järjendi (st kõik sõnad tekstist nende leidmise järjekorras, sh kordused)\r\n\t\"\"\"\r\n\tword_list = []\r\n\tfor line in fail:\r\n\t\tline = line.split()\r\n\t\tfor word in line:\r\n\t\t\tif word.isalpha():\r\n\t\t\t\tword_list.append(word)\r\n\r\n\treturn word_list\r\n\r\ndef count_words(list):\r\n\t\"\"\"\r\n\tvõtab argumendiks sõnade järjendi ja tagastab sõnastiku, kus võti on sõna ja\r\n\tväärtus on selle sõna esinemiste arv\r\n\t\"\"\"\r\n\tdct = {}\r\n\tfor word in list:\r\n\t\tif word in dct:\r\n\t\t\tdct[word] = dct[word] + 1\r\n\t\telse:\r\n\t\t\tdct[word] = 1\r\n\treturn dct\r\n\r\ndef getKey(item):\r\n\treturn item[1]\r\n\r\ndef find_top_words(dct, n):\r\n\t\"\"\"\r\n\tvõtab argumendiks count_words() tagastatava sõnastiku ja tagastab\r\n\tsõnastiku, kus võti on sõna pikkus ja väärtus on n sagedasemalt esinenud sõna vastava pikkusega ja\r\n\tesinemiste arvuga (st omakorda kuni n pikkused järjendid ennikutest või sõnastikud)\r\n\t\"\"\"\r\n\tdct = sorted(dct.items(), key=lambda dct: dct[1])\r\n\tsorted_dct = {}\r\n\tfor word in dct:\r\n\t\tif word[1] > n:\r\n\t\t\tsorted_dct.setdefault(word[1], []).append(word[0])\r\n\r\n\tsorted_dct = sorted(sorted_dct.items(), key=lambda sorted_dct: sorted_dct[0], reverse=True)\r\n\treturn sorted_dct\r\n\r\ndef print_top_words(dct, filename):\r\n\t\"\"\"\r\n\tvõtab vastu find_top_words() tagastatava sõnastiku ja trükib\r\n\ttulemustest viisakalt vormindatud tabeli faili, sõnade pikkuse järgi kasvavalt, sõnade esinemise sageduse\r\n\tjärgi kahanevalt, sama sageduse korral tähestiku järjekorras.\r\n\t\"\"\"\r\n\tfail = open(filename, 'w')\r\n\tstring = '|{0:6}|{1:12}|{2:7}|\\n'.format('Count', 'Word', 'Length')\r\n\tfail.write(string)\r\n\tfor words in dct:\r\n\t\tword_list = sorted(words[1], key=str.lower)\r\n\t\tfor word in word_list:\r\n\t\t\t#fail.write(str(words[0])+ '|'+ word+ '|'+ str(len(word)) + '\\n')\r\n\t\t\tstring = '|{0:6}|{1:12}|{2:7}|\\n'.format(words[0], word, len(word))\r\n\t\t\tfail.write(string)\r\n\r\n\tfail.close()\r\n\r\n\treturn 0"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6775068044662476,
"avg_line_length": 27.461538314819336,
"blob_id": "97fe520b9cc0a6da673dd2727abb0b17ca1866f3",
"content_id": "39b551c32600eb31ba8b6fe0c3ddf172f75c1475",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 13,
"path": "/exercise/Archer.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "from CharecterClass import Character\n\nclass Archer(Character):\n\tdef __init__(self, name, level):\n\t\tself.archeryLevel = 80\n\t\tsuper(Archer, self).__init__(name, level)\n\n\tdef sayFunction(self):\n\t\tprint 'I am Archer and my name is: ' + self.name + ' my level is: ' + str(self.level) + ' archery level is: ' + str(self.archeryLevel)\n\n\na = Archer('Artur', 50)\na.sayFunction()"
},
{
"alpha_fraction": 0.5045416951179504,
"alphanum_fraction": 0.5301403999328613,
"avg_line_length": 23.219999313354492,
"blob_id": "2164168b2c63c5c4318b449873f15f6189fa4618",
"content_id": "6aea779870b1c57d722e25fd989bbf083611565b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1212,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 50,
"path": "/pythonLessons/Advanced programming/Ülesanne 15 - Matplotlib/Ülesanne 15 - Matplotlib.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\nÜlesanne 14 - Sympy\nJuhend: https://courses.cs.ttu.ee/w/images/3/32/ITI0140_2014_Loeng_matplotlib.pdf\n\"\"\"\n__author__ = \"Borka Martin Orlov\"\n__email__ = \"[email protected]\"\n\nfrom matplotlib import pyplot\ndef get_data(file):\n \"\"\"\n Read data from CSV file\n \"\"\"\n data = {}\n data['years'] = []\n data['births'] = []\n data['deaths'] = []\n data['accretion'] = []\n\n for row in file:\n splitRow = row.split(';')\n data['years'].append( int(splitRow[0][1:-1]) )\n data['births'].append( int(splitRow[1]) )\n data['deaths'].append( int(splitRow[2]) )\n data['accretion'].append( int(splitRow[3]) )\n\n return data\n\ndef main():\n try:\n # Get data from CSV\n file = open(\"RV030sm.csv\")\n data = get_data(file)\n\n # Plot the data\n fig, ax1 = pyplot.subplots()\n ax1.set_xlabel(\"Aasta\")\n ax1.set_ylabel(\"Inimeste arv\")\n\n ax1.plot(data['years'], data['births'], \"g\")\n ax1.plot(data['years'], data['deaths'], \"r\")\n ax2 = ax1.twinx()\n ax2.plot(data['years'], data['accretion'], \"b\")\n ax2.set_ylabel(\"accretion\")\n pyplot.show()\n\n finally:\n file.close()\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6455172300338745,
"alphanum_fraction": 0.6579310297966003,
"avg_line_length": 22.233333587646484,
"blob_id": "c70703b5dc1411821c7427e2da9e8714037cb466",
"content_id": "23299e1dc1758484aae44d7212aa3e6f4c2da0b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 731,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 30,
"path": "/pythonLessons/Advanced programming/Ülesanne 6 - Moodulid ja erindid/Ülesanne 6 - moodulid.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nKuues kodune töö\r\nMoodulid ja erindid\r\nJuhend: https://courses.cs.ttu.ee/w/images/5/5d/ITI0140_Loeng_6_-_Moodulid_ja_erindid.pdf\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\nimport text_processing\r\n\r\ndef main():\r\n\t# Ava fail\r\n\ttry:\r\n\t\ttekst = open('The Hound of the Baskervilles.txt', 'r')\r\n\texcept:\r\n\t\tprint('Ilmnes viga')\r\n\t\texit()\r\n\t# Tekita sõnade list\r\n\tword_list = text_processing.process_file(tekst)\r\n\t# Loe korduvad sõnad kokku\r\n\tdct = text_processing.count_words(word_list)\r\n\t# Sordi sõnastik\r\n\tsorted_dct = text_processing.find_top_words(dct, 7)\r\n\t# Väljasta tulemus\r\n\ttext_processing.print_top_words(sorted_dct, 'ex6_output.txt')\r\n\t# Sulge fail\r\n\ttekst.close()\r\n\r\nif __name__ == '__main__':\r\n\tmain()"
},
{
"alpha_fraction": 0.6199460625648499,
"alphanum_fraction": 0.6199460625648499,
"avg_line_length": 17.600000381469727,
"blob_id": "06791a15382fffdb4296d6beca8c1205df512bd2",
"content_id": "bc65b2444f6f99a6a396b7fcc5dcc0892c3928f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 20,
"path": "/exercise/lesson4-classes.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "class Character(object):\n\tdef __init__(self):\n\t\tself.name = 'Borka'\n\t\tsuper(Character, self).__init__()\n\n\tdef add(self, a, b):\n\t\treturn a + b\n\nclass Test(object):\n\tdef __init__(self):\n\t\tself.name = 'Martin'\n\t\tsuper(Test, self).__init__()\n\nclass Human(Character, Test):\n\tdef __init__(self):\n\t\tsuper(Human, self).__init__()\n\nprint Human.mro()\nAndy = Human()\nprint Andy.name"
},
{
"alpha_fraction": 0.5857885479927063,
"alphanum_fraction": 0.6334488987922668,
"avg_line_length": 11.55434799194336,
"blob_id": "cadb7d34139a2f0b162c9a180a111d565dd1750f",
"content_id": "97f3e92bded3a5d71e5a7a7ebe6e5ac0f84ea4e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1154,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 92,
"path": "/pythonLessons/lesson.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "from modules import module as md\n\n# Single Comment\n\n\"\"\"\n\"\"\"\n\n'''\n\tMultiline comment\n'''\n\n\ninteger = 2\nif integer == 2:\n\tprint \"hey\"\nelif integer != 2:\n\tprint \"no\"\nelse:\n\tprint \"nope\"\n\nwhile integer < 5:\n\tprint integer\n\tinteger = integer + 1\n\nlst = [1, 2, \"Hey\"]\n\nfor item in lst:\n\tprint item\n\nfor index, item in enumerate(lst):\n\tprint index, item\n\ndef add(a,b):\n\tif a == 0 & b == 0:\n\t\treturn 0\n\telse:\n\t\treturn a + b\nprint add(3,4)\n\nprint md.add(10,10)\n\ntry:\n\tprint \"Borka\" / 2\nexcept NameError:\n\tprint \"Nope\"\nexcept:\n\tprint \"No\"\n\nfail = open('book.txt', 'r')\n\nfor line in fail:\n\tline = line.replace('\\n', '')\n\tprint line\n\nfail.close()\n\nfail = open('book.txt', 'a')\nfail.write('Hello')\nfail.close()\n\nclass className:\n\tdef __init__(self,a,b):\n\t\tself.a = a\n\t\tself.b = b\n\n\tdef add(self, a, b):\n\t\treturn a+b\n\n\tdef __del__(self):\n\t\tpass\n\nobj = className(4,5)\nprint obj.add(10, 10)\nprint obj.a\n\nboolean = True\nboolean2 = False\n\n'''\nFunction, accepts 2 parameters. 1 tuple, a list up tuples. \nThose tuples are (x,y) coordinates\nFirst tuple is a playing field size.\nThe list is a set of coordinates.\n\nfield((5,5),[(1,1), (1,3)])\n\n11211\n1X2X1\n11211\n00000\n00000\n\"\"\""
},
{
"alpha_fraction": 0.5774818658828735,
"alphanum_fraction": 0.5932203531265259,
"avg_line_length": 21.324323654174805,
"blob_id": "c4541e86e911a8808632af0ca45cbec684ed5200",
"content_id": "88112db93dcd1f11abe310b8718ef86419c7a0b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 826,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 37,
"path": "/pythonLessons/Advanced programming/Ülesanne 16 - Otsing/Tund16gen.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\nModule to generate lists and random numbers.\n\"\"\"\n\nimport random\n\ndef __gimme_a_generator(max_number, seed):\n \"\"\"\n Generator to use for generating random numbers.\n \"\"\"\n r = random.Random(seed)\n while True:\n yield r.randrange(1, max_number)\n \ndef __gimme_a_list(generator, size):\n \"\"\"\n Returns a generated list to use for searching.\n \"\"\"\n return [next(generator) for _ in range(size)]\n\ndef gimme_my_input(size, seed):\n \"\"\"\n Returns a tuple consisting of the generated list and the generator to use.\n \"\"\"\n gen = __gimme_a_generator(2 * size, seed)\n lst = __gimme_a_list(gen, size)\n return (lst, gen)\n \n\ndef __main():\n inp = gimme_my_input(10, \"123456\")\n print(inp[0])\n print(next(inp[1]))\n print(next(inp[1]))\n\nif __name__ == \"__main__\":\n __main()\n"
},
{
"alpha_fraction": 0.5883575677871704,
"alphanum_fraction": 0.5945945978164673,
"avg_line_length": 21,
"blob_id": "39dfe3047dc7a374544332bc0dc4af81dae61d26",
"content_id": "ea13fa48dd6706f7d0f649c1244f4c8b1cad74da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 21,
"path": "/documents-export-2015-01-30/pythonlesson3.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "class Character(object):\r\n def __init__(self,name):\r\n self.health=100\r\n self.name=name\r\n def printName(self):\r\n print(self.name)\r\n\r\nclass Blacksmith(Character):\r\n def __init__(self,name,forgeName):\r\n super(Blacksmith,self).__init__(name)\r\n self.forge=Forge(forgeName)\r\n\r\n\r\nclass Forge:\r\n def __init__(self,forgeName):\r\n self.name=forgeName\r\n \r\n\r\nbs=Blacksmith(\"Bob\",\"Rick\\'s Forge\")\r\nbs.printName()\r\nprint(bs.forge.name)"
},
{
"alpha_fraction": 0.3881952464580536,
"alphanum_fraction": 0.5402951240539551,
"avg_line_length": 29.535715103149414,
"blob_id": "5f3c6816ef9c5abbdd3c03c4de7fcb384840dea6",
"content_id": "49aed1fabb1d2aa5e51c61a0ab327f863da7dc41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 883,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 28,
"path": "/pythonLessons/Advanced programming/Ülesanne 11 - RegEx/Ülesanne 11 - RegEx.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nÜlesanne 11 - RegEx\r\nJuhend: https://courses.cs.ttu.ee/w/images/0/07/2014_Loeng_11_-_Regular_expressions.pdf\r\n\r\nÜlesanded lehelt https://cs.uwaterloo.ca/~dtompkin/teaching/08a/lab7/\r\n1-5, 9-12, valida 3 13-19 seast.\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\ndef main():\r\n\texcercise_1 = '^[10]*'\r\n\texcercise_2 = '^[10]*0'\r\n\texcercise_3 = '^(00|01|10|11)*$'\r\n\texcercise_4 = '^[01]*(0110|1001)[01]*$'\r\n\texcercise_5 = '^([01]*)((0110[01]*1001)|(1001[01]*0110)|(011001)|(100110))([01]*)$'\r\n\r\n\texcercise_9 = '^pick[\\s-]{0,1}(up)\\s(truck)$'\r\n\texcercise_10 = '([A-Za-z0-9,]*\\s){2,3}[A-Za-z0-9]*[.!?]?$'\r\n\texcercise_11 = '^(cat)\\s([a-zA-Z]*\\s){0,2}(hat)$'\r\n\texcercise_12 = '([01]?[0-9]|2[0-3]):[0-5][0-9]'\r\n\r\n\texcercise_13 = '[ACGT]*(ATG)([ACGT]{3}){1,}(TAA|TAG|TGA)[ACGT]*'\r\n\texcercise_16 = '(1|01*0)+'\r\n\texcercise_17 = '^0?(10)*1?$'\r\n\r\nif __name__ == '__main__':\r\n\tmain()"
},
{
"alpha_fraction": 0.6226685643196106,
"alphanum_fraction": 0.6377331614494324,
"avg_line_length": 22.491228103637695,
"blob_id": "fbcd03d4b13d12a0349b384c6dba6a028206acd7",
"content_id": "0ff500c9c64ad7ea8e64f314aa0598e5dbe7b53f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1405,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 57,
"path": "/pythonLessons/Advanced programming/Ülesanne 12 - Image Processing/Ül 12 - Image processing.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nÜlesanne 12 - Image Processing\r\nJuhend: https://courses.cs.ttu.ee/w/images/d/d4/2014_Loeng_12_-_Image_processing.pdf\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\nfrom PIL import Image, ImageDraw\r\n\r\ndef get_word_list(filename):\r\n\t\"\"\"\r\n\tGenereerib antud failist sõnade järjendi\r\n\t\"\"\"\r\n\tfile_pointer = open(filename, 'r')\r\n\tword_list = []\r\n\r\n\tfor line in file_pointer:\r\n\t\twords = line.split()\r\n\t\tfor word in words:\r\n\t\t\tif word.isalpha():\r\n\t\t\t\tword_list.append(word)\r\n\treturn word_list\r\n\r\ndef pair_frequency(word_list):\r\n\t\"\"\"\r\n\tgenereerib sisendiks antud sõnede järjendist (nt ['Tere', 'banaan', ...]) kahemõõtmelise\r\n\ttähepaaride esinemissageduse maatriksi (nt 'te'*1, 'er'*1, 're'*1, 'ba'*1, 'an'*2, 'na'*1, 'aa'*1)\r\n\t\"\"\"\r\n\tpairs = {}\r\n\tfor word in word_list:\r\n\t\tfor index, char in enumerate(word):\r\n\t\t\tif index != len(word) - 1:\r\n\t\t\t\tpair = char + word[index + 1]\r\n\t\t\t\tif pair in pairs:\r\n\t\t\t\t\tpairs[pair] = pairs[pair] + 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tpairs[pair] = 1\r\n\treturn pairs\r\n\r\ndef create_heat_map(word_matrix):\r\n\t\"\"\"\r\n\tgenereerib kahemõõtmelisest maatriksist soojuskaardi.\r\n\tKasuta loodud funktsioone andes sisendiks sõned\r\n\tBaskerville'ide failist.\r\n\t\"\"\"\r\n\treturn 0\r\n\r\n\r\n\r\ndef main():\r\n\tword_list = get_word_list('The Hound of the Baskervilles.txt')\r\n\tword_matrix = pair_frequency(word_list)\r\n\tprint(word_matrix)\r\n\t#create_heat_map(word_matrix)\r\n\r\nif __name__ == '__main__':\r\n\tmain()"
},
{
"alpha_fraction": 0.5347925424575806,
"alphanum_fraction": 0.5583142638206482,
"avg_line_length": 26.339284896850586,
"blob_id": "94f145e234745e4448eb2878eba2f5c65addff5f",
"content_id": "76ec34f2c5c035a6a67327b1f761609b4a9cca8c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3080,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 112,
"path": "/pythonLessons/Advanced programming/Ülesanne 18 - Laiuti otsing/Ülesanne 18 - Laiuti otsing.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\nÜlesanne 18 - Laiuti otsing\nJuhend: https://courses.cs.ttu.ee/w/images/5/52/2014_Loeng_19_-_BFS.pdf\n\"\"\"\n__author__ = \"Borka Martin Orlov\"\n__email__ = \"[email protected]\"\n\nimport networkx\nimport random\n\ndef bfs1(source, target, graph):\n\t\"\"\"\n\tKasutatakse järjekorda (queue) järgmise tipu valikuks ja ei jäta meelde\n\ttippe, mis on juba läbi käidud või mida on juba nähtud. \n\t\"\"\"\n\n queue = [source]\n nodesSearched = 0\n \n while True:\n nodesSearched += 1\n node = queue.pop(0)\n if node == target: break\n queue += graph[node]\n\n return nodesSearched\n\ndef bfs2(source, target, graph):\n\t\"\"\"\n\tKasutatakse järjekorda järgmise tipu valikuks ja hulka (set), et jätta\n\tmeelde tipud, mis on juba läbi käidud.\n\t\"\"\"\n queue = [source]\n visited = set()\n nodesSearched = 0\n \n while queue != []:\n nodesSearched += 1\n node = queue.pop(0)\n while queue != [] and node in visited:\n node = queue.pop(0)\n if node not in visited:\n queue += graph[node]\n\n visited.add(node)\n if target in visited: break\n\n return nodesSearched\n\ndef bfs3(source, target, graph):\n\t\"\"\"\n\tKasutatakse järjekorda järgmise tipu valikuks ja hulka, et jätta meelde\n\ttipud, mida on juba nähtud ja mis on järjekorda mingil hetkel lisatud.\n\tIga otsingualgoritm peaks tagastama läbivaadatud tippude arvu.\n\t\"\"\"\n queue = [source]\n seen = set()\n nodesSearched = 0\n \n while queue != []:\n nodesSearched += 1\n node = queue.pop(0)\n while queue != [] and node in seen:\n node = queue.pop(0)\n if node not in seen:\n queue += graph[node]\n\n seen.add(node)\n seen |= set(graph[node])\n if target in seen: break\n\n return nodesSearched\n\ndef main():\n random.seed()\n print(\"| algorithm \\t| total nodes \\t| nodes searched\")\n\n for i in range(3, 10):\n print()\n G = networkx.fast_gnp_random_graph(2**i, random.uniform(0.0, 0.3), 142443)\n graph = networkx.to_dict_of_lists(G)\n tests1 = []\n tests2 = []\n tests3 = []\n\n for n in range(100):\n while True:\n source = random.randrange(0, len(graph))\n target = random.randrange(0, len(graph))\n if source != target: break\n\n # Test for path\n try:\n test = networkx.shortest_path(G, source, target)\n path_exists = True\n except networkx.NetworkXNoPath:\n path_exists = False\n\n if path_exists:\n tests1.append(bfs1(source, target, graph))\n else: # Path doesn't exist\n tests1.append(len(graph))\n\n tests2.append(bfs2(source, target, graph))\n tests3.append(bfs3(source, target, graph))\n\n print(\"| {0} \\t\\t| {1} \\t\\t| {2}\".format(\"bfs1\", 2**i, sum(tests1)))\n print(\"| {0} \\t\\t| {1} \\t\\t| {2}\".format(\"bfs2\", 2**i, sum(tests2)))\n print(\"| {0} \\t\\t| {1} \\t\\t| {2}\".format(\"bfs3\", 2**i, sum(tests3)))\n\nif __name__ == \"__main__\":\n main()"
},
{
"alpha_fraction": 0.7310606241226196,
"alphanum_fraction": 0.7452651262283325,
"avg_line_length": 35.08771896362305,
"blob_id": "13750570a8f92e2aaa58c0375272bbd745c9bbcb",
"content_id": "e7b7b1cccb2e3af00f7972a0970acf35503beca4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2152,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 57,
"path": "/pythonLessons/Advanced programming/Ülesanne 5 - Failid/Ülesanne 5 - Unikaalsed sõnad.py",
"repo_name": "Herra-Dronich/PythonLessons",
"src_encoding": "UTF-8",
"text": "\"\"\"\r\nTekstitöötluse ülesande alus on kaks A. C. Doyle'i raamatut, mis on vabalt kättesaadavad Project\r\nGutenberg kaudu:\r\n1)A Study in Scarlet (.txt)\r\n2)The Hound of the Baskervilles (.txt)\r\n\r\nTekstid on vaja jaotada sõnadeks. Sõnaks loeme antud juhul kõik järjest asetsevad tähed, mille kohta Pythoni isalpha()\r\nfunktsioon vastab tõeselt. Sõnade ümber asetsevad muud sümbolid. Sõnadeks jaotamine ei peaks olema tõstutundlik.\r\n\r\nLeida:\r\n1)kui palju on sõnu kokku tekstis 1\r\n2)kui palju on sõnu kokku tekstis 2\r\n3)kui palju on unikaalseid sõnu tekstis 1\r\n4)kui palju on unikaalseid sõnu tekstis 2\r\n5)kui palju on unikaalseid sõnu tekstis 1 ja tekstis 2 kokku\r\n6)kui palju on unikaalseid sõnu tekstis 1, mida tekstis 2 pole\r\n7)kui palju on unikaalseid sõnu tekstis 2, mida tekstis 1 pole\r\n8)kui palju on unikaalseid sõnu, mis esinevad mõlemas tekstis\r\n\r\n\r\n\"\"\"\r\n__author__ = \"Borka Martin Orlov\"\r\n__email__ = \"[email protected]\"\r\n\r\nimport unique_words\r\n\r\n# Unikaalsed sõnad mõlema teksti kohta\r\nunique_words_scarlet = []\r\nunique_words_baskervilles = []\r\n\r\ndef main():\r\n\tglobal unique_words_baskervilles, unique_words_scarlet\r\n\r\n\t# Ülesanded 1-4\r\n\t# Leida sõnade kogusumma ja unikaalsete sõnade arv mõlemas tekstis ja lisab nad listi\r\n\t# Tagastab unikaalsete sõnade arvu järgneva ülesande jaoks\r\n\twordcount_1 = unique_words.count_words('A Study in Scarlet.txt', unique_words_scarlet)\r\n\twordcount_2 = unique_words.count_words('The Hound of the Baskervilles.txt', unique_words_baskervilles)\r\n\r\n\t# Ülesanne 5\r\n\t# Unikaalseid sõnu kahe teksti peale kokku \r\n\tprint('Unikaalseid sonu on kahe teksti peale', wordcount_1 + wordcount_2)\r\n\r\n\t# Ülesanded 6-7\r\n\t# Unikaalseid sõnu mida teises tekstis pole\r\n\tprint('A Study in Scarlet:', end=' ')\r\n\tunique_words.compare_words(unique_words_scarlet, unique_words_baskervilles)\r\n\tprint('The Hound of the Baskervilles:', end=' ')\r\n\tunique_words.compare_words(unique_words_baskervilles, unique_words_scarlet)\r\n\r\n\t# Ülesanne 8\r\n\t# Unikaalsete sõnad mis esinevad mõlemas tekstis\r\n\tunique_words.words_in_both(unique_words_scarlet, unique_words_baskervilles)\r\n\t\r\n# Beginning\r\nif __name__ == '__main__':\r\n\tmain()"
}
] | 24 |
cjlyth/sous-vide-pi | https://github.com/cjlyth/sous-vide-pi | ba379339fab0f6e1885ec80c517955cd58b0bc74 | bc65437a3dba6fedab8f693843383c6f62c6e0bb | f7aacd07a74adc4f002da76ae2334f5070227656 | refs/heads/master | 2021-01-12T13:52:26.519038 | 2016-09-25T18:27:00 | 2016-09-25T18:27:00 | 69,071,582 | 0 | 1 | null | 2016-09-24T01:17:24 | 2016-09-24T23:00:30 | 2016-09-25T18:27:00 | Java | [
{
"alpha_fraction": 0.5782312750816345,
"alphanum_fraction": 0.5938775539398193,
"avg_line_length": 24.807018280029297,
"blob_id": "395b92a65d2398be9adba168ba5376a78655fcb0",
"content_id": "6678138a6ac8cebe561cc14c507af66bff918713",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1470,
"license_type": "permissive",
"max_line_length": 87,
"num_lines": 57,
"path": "/src/temp_control.py",
"repo_name": "cjlyth/sous-vide-pi",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\nimport time\nimport serial\nimport requests\nimport json\nfrom decimal import Decimal\n\nser = serial.Serial(\n port='/dev/ttyACM0',\n baudrate = 1200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\ncounter=0\nlogsUrl='https://sousvide.lyth.io/api/logs'\nheaders = {'Content-Type': 'application/json'}\n\nser.write('0')\n\ndef getConfig():\n resp = requests.get(\"https://sousvide.lyth.io/api/configuration\")\n if resp.status_code == 200 :\n return resp.json();\n else :\n return {'running': False}\n\ndef sendLog(temp):\n payload = json.dumps({'temperature': temp})\n resp = requests.post(logsUrl, headers = headers, data = payload)\n if resp.status_code == 200 :\n return resp.json();\n else :\n return {'temperature':0, 'timestamp':0}\n\nwhile True:\n config = getConfig()\n if config['running'] :\n minTemp = config['temperature'] - 5\n maxTemp = config['temperature']\n sTemp = ser.readline()\n dTemp = Decimal(sTemp)\n print('read temp: {}').format(dTemp)\n sendLog(sTemp)\n print('current temp: {}, desired range: {} - {}').format(dTemp,minTemp,maxTemp)\n if dTemp > 0:\n if dTemp < maxTemp:\n ser.write('1')\n else:\n ser.write('0')\n # print('is running: {}').format(config['running'])\n else:\n ser.write('0')\n time.sleep(60)"
}
] | 1 |
jnothman/pickleback | https://github.com/jnothman/pickleback | 3ca6efe6b6e7e51d4a2a7e4bb8aba4b7d4846d3b | 7f132a36596c7aa9daac2fa869c7dd24f171c88f | c981778b89b586b544db8a67ab26bc4e7a0bb58f | refs/heads/master | 2021-05-04T19:03:14.298691 | 2017-10-15T12:31:21 | 2017-10-15T12:31:21 | 106,520,160 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8461538553237915,
"alphanum_fraction": 0.8615384697914124,
"avg_line_length": 8.285714149475098,
"blob_id": "5834316a89ba5a173ffd1eb720ad8495594d724a",
"content_id": "7ff8a7c5d9ef9049fc3c26578aeb7a36cedb04f0",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 65,
"license_type": "permissive",
"max_line_length": 16,
"num_lines": 7,
"path": "/test_requirements.txt",
"repo_name": "jnothman/pickleback",
"src_encoding": "UTF-8",
"text": "setuptools\npytest\nflake8\npytest-cov\npython-coveralls\n\nmatplotlib\n"
},
{
"alpha_fraction": 0.6826589703559875,
"alphanum_fraction": 0.6867052316665649,
"avg_line_length": 31.037036895751953,
"blob_id": "d206a45924d855c944d96e570db9f4c014e56f3c",
"content_id": "7552c71275ca63e17f0df31ebadc8c92b8639213",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1730,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 54,
"path": "/README.rst",
"repo_name": "jnothman/pickleback",
"src_encoding": "UTF-8",
"text": "Matplotlib pickle backend : pickleback\n======================================\n\nMatplotlib backend defining export to pickle (extension `.pkl` or `.pickle`).\n\n|version| |licence| |py-versions|\n\n|issues| |build| |docs| |coverage|\n\n\nInteractive usage::\n\n >>> import pickleback\n >>> pickleback.register()\n >>> my_figure.savefig('/path/to/output.pkl') # doctest: +SKIP\n\nScript-based usage, assuming my script uses ``savefig`` to the path specified\nby ``-o``::\n\n $ python -m pickleback /path/to/myscript.py -o /path/to/output.pkl\n\nor::\n\n $ python -m pickleback -m my_module.command -o /path/to/output.pkl\n\n\n\n.. |py-versions| image:: https://img.shields.io/pypi/pyversions/pickleback.svg\n :alt: Python versions supported\n\n.. |version| image:: https://badge.fury.io/py/pickleback.svg\n :alt: Latest version on PyPi\n :target: https://badge.fury.io/py/pickleback\n\n.. |build| image:: https://travis-ci.org/jnothman/pickleback.svg?branch=master\n :alt: Travis CI build status\n :scale: 100%\n :target: https://travis-ci.org/jnothman/pickleback\n\n.. |issues| image:: https://img.shields.io/github/issues/jnothman/pickleback.svg\n :alt: Issue tracker\n :target: https://github.com/jnothman/pickleback\n\n.. |coverage| image:: https://coveralls.io/repos/github/jnothman/pickleback/badge.svg\n :alt: Test coverage\n :target: https://coveralls.io/github/jnothman/pickleback\n\n.. |docs| image:: https://readthedocs.org/projects/pickleback/badge/?version=latest\n :alt: Documentation Status\n :scale: 100%\n :target: https://pickleback.readthedocs.io/en/latest/?badge=latest\n\n.. |licence| image:: https://img.shields.io/badge/Licence-BSD-blue.svg\n :target: https://opensource.org/licenses/BSD-3-Clause\n"
},
{
"alpha_fraction": 0.5249221324920654,
"alphanum_fraction": 0.5311526656150818,
"avg_line_length": 48.38461685180664,
"blob_id": "95c80132e28e47b83c5e203a8be55130a5a2d7e8",
"content_id": "b79c40211708a53ac54964961a2f0b6ea871d73d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1284,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 26,
"path": "/test_pickleback.py",
"repo_name": "jnothman/pickleback",
"src_encoding": "UTF-8",
"text": "import pickle\nimport subprocess\n\n\ndef test_with_script(tmpdir):\n tmpdir.join('plot.py').write('import sys\\n'\n 'import matplotlib.pyplot as plt\\n'\n 'import matplotlib\\n'\n 'matplotlib.use(\"Agg\")\\n'\n 'plt.scatter([5, 6, 7], [8, 9, 10])\\n'\n 'plt.title(\"Hello world\")\\n'\n 'plt.savefig(sys.argv[1])\\n'\n )\n script_path = str(tmpdir.join('plot.py'))\n subprocess.check_call(['python', script_path,\n str(tmpdir.join('plot.raw'))])\n subprocess.check_call(['python', '-m', 'pickleback', script_path,\n str(tmpdir.join('plot.pkl'))])\n fig = pickle.load(open(str(tmpdir.join('plot.pkl')), 'rb'))\n # FIXME: fig.canvas comes back None. I've not yet understood why/how\n from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n fig.canvas = FigureCanvas(fig)\n fig.savefig(str(tmpdir.join('plot-via-pkl.raw')))\n expected_bytes = tmpdir.join('plot.raw').read(mode='rb')\n actual_bytes = tmpdir.join('plot-via-pkl.raw').read(mode='rb')\n assert expected_bytes == actual_bytes\n"
},
{
"alpha_fraction": 0.6010165214538574,
"alphanum_fraction": 0.6010165214538574,
"avg_line_length": 27.445783615112305,
"blob_id": "6890420ea24509dc9541a6b3ac6ba0bf20c61290",
"content_id": "16c368b002b8b8b71cfd85c063394ffa0d74dcf5",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2361,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 83,
"path": "/pickleback/backend_pkl.py",
"repo_name": "jnothman/pickleback",
"src_encoding": "UTF-8",
"text": "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport pickle\n\nfrom matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase\nfrom matplotlib.figure import Figure\n\n\n########################################################################\n#\n# The following functions and classes are for pylab and implement\n# window/figure managers, etc...\n#\n########################################################################\n\ndef new_figure_manager(num, *args, **kwargs):\n \"\"\"\n Create a new figure manager instance\n \"\"\"\n # if a main-level app must be created, this (and\n # new_figure_manager_given_figure) is the usual place to\n # do it -- see backend_wx, backend_wxagg and backend_tkagg for\n # examples. Not all GUIs require explicit instantiation of a\n # main-level app (egg backend_gtk, backend_gtkagg) for pylab\n FigureClass = kwargs.pop('FigureClass', Figure)\n thisFig = FigureClass(*args, **kwargs)\n return new_figure_manager_given_figure(num, thisFig)\n\n\ndef new_figure_manager_given_figure(num, figure):\n \"\"\"\n Create a new figure manager instance for the given figure.\n \"\"\"\n canvas = FigureCanvasPickle(figure)\n manager = FigureManagerPickle(canvas, num)\n return manager\n\n\nclass FigureCanvasPickle(FigureCanvasBase):\n \"\"\"\n The canvas the figure renders into. Not applicable.\n\n Attributes\n ----------\n figure : `matplotlib.figure.Figure`\n A high-level Figure instance\n\n \"\"\"\n\n def draw(self):\n pass\n\n filetypes = {}\n filetypes['pkl'] = 'Python pickle format'\n filetypes['pickle'] = 'Python pickle format'\n\n def print_pkl(self, filename, *args, **kwargs):\n pickle.dump(self.figure, open(filename, 'wb'))\n\n print_pickle = print_pkl\n\n def get_default_filetype(self):\n return 'pkl'\n\n\nclass FigureManagerPickle(FigureManagerBase):\n \"\"\"\n Wrap everything up into a window for the pylab interface\n\n For non interactive backends, the base class does all the work\n \"\"\"\n pass\n\n########################################################################\n#\n# Now just provide the standard names that backend.__init__ is expecting\n#\n########################################################################\n\n\nFigureCanvas = FigureCanvasPickle\nFigureManager = FigureManagerPickle\n"
},
{
"alpha_fraction": 0.5321375131607056,
"alphanum_fraction": 0.5426008701324463,
"avg_line_length": 23.77777862548828,
"blob_id": "fd1bcdbc00599cb0d0b6b451a723ce5b45f0ea55",
"content_id": "cbb63477468a5bf2a3dbd3138e7a9988854e0a5e",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 669,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 27,
"path": "/pickleback/__main__.py",
"repo_name": "jnothman/pickleback",
"src_encoding": "UTF-8",
"text": "from __future__ import print_function\nimport sys\nimport runpy\n\nfrom pickleback import register\n\n\ndef main():\n if len(sys.argv) == 1:\n print('Usage: {prog} /path/to/python-script.py [script-args...]\\n'\n 'Usage: {prog} -m path.to.module [script-args...]\\n'\n '\\n'\n 'Loads matplotlib backend for pkl extension before running'\n 'script')\n sys.exit(1)\n\n register()\n del sys.argv[0]\n if sys.argv[0] == '-m':\n del sys.argv[0]\n runpy.run_module(sys.argv[0], run_name='__main__')\n else:\n runpy.run_path(sys.argv[0], run_name='__main__')\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.5925925970077515,
"avg_line_length": 48.5,
"blob_id": "78210d3693ba8d5c26d5d607fcdc440cd2d89f82",
"content_id": "50d723262fd965cf2d10d9f6a3f6ef4c5166fe24",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 6,
"path": "/pickleback/__init__.py",
"repo_name": "jnothman/pickleback",
"src_encoding": "UTF-8",
"text": "def register():\n from matplotlib.backend_bases import register_backend\n for ext in ['pkl', 'pickle']:\n register_backend(ext, 'pickleback.backend_pkl',\n 'Python pickle format, to be used with caution on a '\n 'single matplotlib version')\n"
}
] | 6 |
efficientc/python_encrypt | https://github.com/efficientc/python_encrypt | 066998ef03efdb00d948ec77ce4731177c3e1e00 | a15a712cf5baa8a0c42624be44ce70980a355b97 | e2f10e819f0d5fb469f527607da9403fce2b5100 | refs/heads/master | 2022-04-26T20:14:55.649891 | 2020-04-21T18:34:58 | 2020-04-21T18:34:58 | 255,650,365 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.4610532820224762,
"alphanum_fraction": 0.4780826270580292,
"avg_line_length": 40.279998779296875,
"blob_id": "c5448fc4ecae00c5ce5907a165872166915b79b2",
"content_id": "b88f294100f1c0adc8b5e908267a5e329696f480",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3171,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 75,
"path": "/decrypt_all.py",
"repo_name": "efficientc/python_encrypt",
"src_encoding": "UTF-8",
"text": "__author__ = 'EfficientC'\r\n# this is a script that will decrypt a message back into the original string enter before it was passed through encryptall.py\r\n# 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\\'()*+,-./:;?@[\\\\]^_`{|}~\r\n# 56789abcdefghij01234klmnopqwxyzABCDErstuvFGHI$PQR./:;?@[STUVWXYZ!\"#%JKLMNO&\\'()*+,-\\\\]^_`{|}\r\n# ~defg 56789ab_`{|}chij01234klmno()*+,-pqwxyzABCDErstuvFGHI$PQR./:;?@[STUVWXYZ!\"#%JKLMNO&\\'\\\\]^\r\nimport string\r\n\r\n# create list of all characters to use:\r\nfull_list = []\r\nfull_list_string = '~defg 56789ab_`{|}chij01234klmno()*+,-pqwxyzABCDErstuvFGHI$PQR./:;?@[STUVWXYZ!\"#%JKLMNO&\\'\\\\]^'\r\nfor char in full_list_string:\r\n full_list.append(char)\r\n\r\n# ========================================================================================================\r\n# define the decryption function\r\ndef decrypt(message):\r\n # this array will hold the index number of all string chars and then be converted to new index numbers\r\n encrypted_list = []\r\n converted_chars = []\r\n decrypted_letters = []\r\n # ----------------------------------------------------------------------------------------------------\r\n # assign values to letters/char index\r\n values = dict()\r\n for index, letter in enumerate(full_list):\r\n values[letter] = index + 1\r\n\r\n # assign letters to values\r\n letter_list = dict()\r\n for index, letter in enumerate(full_list):\r\n letter_list[index + 1] = letter\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # concatenates the encrypted string back into a list\r\n for i in message:\r\n encrypted_list.append(i)\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # assign values to letters\r\n for i in encrypted_list:\r\n converted_chars.append(values[i])\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # assign the correct numeric value to each list item\r\n for n, i in enumerate(converted_chars):\r\n # for those below 45, add 45\r\n if int(i) < 46:\r\n converted_chars[n] = int(i) + 46\r\n else:\r\n # subtract 45\r\n converted_chars[n] = int(i) - 46\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # assign letters to values\r\n for i in converted_chars:\r\n # add alpha char to array\r\n decrypted_letters.append(letter_list[i])\r\n\r\n # print(encrypted_list)\r\n # print(converted_chars)\r\n # print(decrypted_letters)\r\n\r\n # function that concatenates a list into a string\r\n def listtostring(list):\r\n string = ''\r\n for i in list:\r\n string += i\r\n return string\r\n\r\n decrypted_msg = listtostring(decrypted_letters)\r\n print('The decrypted message is: ' + decrypted_msg)\r\n\r\n# ========================================================================================================\r\n\r\n# prompt user for message to decrypt\r\ndecrypt(input('Enter an encrypted message to decrypt: '))\r\n"
},
{
"alpha_fraction": 0.47715404629707336,
"alphanum_fraction": 0.49575719237327576,
"avg_line_length": 42.40579605102539,
"blob_id": "642377e014e28d209fd7318cb8524b6077978c46",
"content_id": "9489e6c54becede75feb0f5052635dc35b4a56cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3064,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 69,
"path": "/encrypt_all.py",
"repo_name": "efficientc/python_encrypt",
"src_encoding": "UTF-8",
"text": "__author__ = 'EfficientC'\r\n# this is a script that will encrypt all characters a message into an illegible string of characters\r\n# 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\\'()*+,-./:;?@[\\\\]^_`{|}~\r\n# 56789abcdefghij01234klmnopqwxyzABCDErstuvFGHI$PQR./:;?@[STUVWXYZ!\"#%JKLMNO&\\'()*+,-\\\\]^_`{|}\r\n# ~defg 56789ab_`{|}chij01234klmno()*+,-pqwxyzABCDErstuvFGHI$PQR./:;?@[STUVWXYZ!\"#%JKLMNO&\\'\\\\]^\r\nimport string\r\n\r\n# this array will hold the index number of all string chars and then be converted to new index numbers\r\nmsg_index_array = []\r\nconverted_chars = []\r\n\r\n# ========================================================================================================\r\n# define the encryption function\r\ndef encrypt(message):\r\n # create list of all characters to use:\r\n full_list = []\r\n full_list_string = '~defg 56789ab_`{|}chij01234klmno()*+,-pqwxyzABCDErstuvFGHI$PQR./:;?@[STUVWXYZ!\"#%JKLMNO&\\'\\\\]^'\r\n for char in full_list_string:\r\n full_list.append(char)\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # assign values to letters/char index\r\n values = dict()\r\n for index, letter in enumerate(full_list):\r\n values[letter] = index + 1\r\n\r\n # assign letters to values\r\n letter_list = dict()\r\n for index, letter in enumerate(full_list):\r\n letter_list[index + 1] = letter\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # append an index number for each char in the message passed (only lowercase with spaces)\r\n for char in message:\r\n # add numeric value to new array\r\n msg_index_array.append(values[char])\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # change new index array values for encryption process\r\n for n, i in enumerate(msg_index_array):\r\n # to prevent an index from going above 92, the index no. is subtracted by 46 to start at 1 again\r\n if i > 46:\r\n msg_index_array[n] = i - 46\r\n else:\r\n # each index increases by 46\r\n msg_index_array[n] = i + 46\r\n\r\n # ----------------------------------------------------------------------------------------------------\r\n # assign values to letters\r\n for n, i in enumerate(msg_index_array):\r\n # if i == 91:\r\n # converted_chars.append('~')\r\n # else:\r\n converted_chars.append(letter_list[i])\r\n\r\n# ----------------------------------------------------------------------------------------------------\r\n # function that concatenates a list into a string\r\n def listtostring(list):\r\n string = ''\r\n for i in list:\r\n string += i\r\n return string\r\n\r\n # create and print newly encrypted string\r\n cryptostring = listtostring(converted_chars)\r\n print('Here is your encrypted message: ' + cryptostring)\r\n\r\n# prompt user for message to encrypt\r\nencrypt(input('Type a message to encrypt: '))\r\n"
}
] | 2 |
1515806183/flask-toys | https://github.com/1515806183/flask-toys | 6f7e7ca3cdaf0d44a3023ff252f2eed47b502b42 | ab3ec41eb1b9ca0b3fec7593b4b79ad744243f59 | fc0afdf5ac13ea83e1beb9ab650fadc5791cd036 | refs/heads/master | 2020-12-02T02:37:17.875986 | 2020-01-06T07:26:29 | 2020-01-06T07:26:29 | 230,860,403 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.762499988079071,
"avg_line_length": 38.5,
"blob_id": "4bf50d3b217164632de69fdf0a08b8a3160052b0",
"content_id": "2a2334ab8ccea1a39807a108180f01c319fb8cd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 2,
"path": "/Dockerfile",
"repo_name": "1515806183/flask-toys",
"src_encoding": "UTF-8",
"text": "FROM flask-toys:v1\nRUN pip install baidu-aip -i https://pypi.douban.com/simple\n\n"
},
{
"alpha_fraction": 0.6124030947685242,
"alphanum_fraction": 0.6511628031730652,
"avg_line_length": 19.421052932739258,
"blob_id": "a8226820130ea08f7027f4644c7a40bc6e3d63a2",
"content_id": "708668f4f85cb0af6c53271bbbfaf0f522cc7963",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 19,
"path": "/s1.py",
"repo_name": "1515806183/flask-toys",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom aip import AipSpeech\n\nAPP_ID = '18139977'\nAPI_KEY = 'G8LLOmt8fivpBX0RPQNzQKwc'\nSECRET_KEY = 'x2LbbQFHepdaI3fnxsk7LBbEYh9KlH7S'\n\nclient = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n\nresult = client.synthesis('小哥哥来玩呀', 'zh', 1, {\n 'vol': 5,\n})\n\nprint(result)\n\n# 识别正确返回语音二进制 错误则返回dict 参照下面错误码\nif not isinstance(result, dict):\n with open('audio.mp3', 'wb') as f:\n f.write(result)"
},
{
"alpha_fraction": 0.5989717245101929,
"alphanum_fraction": 0.6452442407608032,
"avg_line_length": 14.600000381469727,
"blob_id": "ee22740fc86aa1c90ce8834925d52dac9d85da71",
"content_id": "2f0b24a52e8f31265d8b818e847ec673c67e27f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 25,
"path": "/s4.py",
"repo_name": "1515806183/flask-toys",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom aip import AipNlp\nimport os\n\nAPP_ID = '18139977'\nAPI_KEY = 'G8LLOmt8fivpBX0RPQNzQKwc'\nSECRET_KEY = 'x2LbbQFHepdaI3fnxsk7LBbEYh9KlH7S'\n\nclient = AipNlp(APP_ID, API_KEY, SECRET_KEY)\n\ntext1 = \"你姓什么\"\n\ntext2 = \"你贵姓\"\n\n\"\"\" 调用短文本相似度 \"\"\"\nclient.simnet(text1, text2)\n\n\"\"\" 如果有可选参数 \"\"\"\noptions = {}\noptions[\"model\"] = \"CNN\"\n\n\"\"\" 带参数调用短文本相似度 \"\"\"\nret = client.simnet(text1, text2, options)\n\nprint(ret)"
},
{
"alpha_fraction": 0.5309478044509888,
"alphanum_fraction": 0.5715667605400085,
"avg_line_length": 20.081632614135742,
"blob_id": "4f7575ccc7a56344f49085b90d8c987455799992",
"content_id": "01c1b13eeeade00762da101455818d7a4eeee9ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1090,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 49,
"path": "/s3.py",
"repo_name": "1515806183/flask-toys",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom aip import AipSpeech\nimport time, os\n\nAPP_ID = '18139977'\nAPI_KEY = 'G8LLOmt8fivpBX0RPQNzQKwc'\nSECRET_KEY = 'x2LbbQFHepdaI3fnxsk7LBbEYh9KlH7S'\n\nclient = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n\ndef text2audio(text):\n filename = f\"{time.time()}.mp3\"\n result = client.synthesis(text, 'zh', 1, {\n 'vol': 5,\n 'spd': 3,\n 'pit': 7,\n \"per\": 4\n })\n\n if not isinstance(result, dict):\n with open(filename, 'wb') as f:\n f.write(result)\n\n return filename\n\n# print(text2audio('今天风和日历'))\n\n\ndef audio2text(filename):\n # 读取文件\n def get_file_content(filename):\n os.system(f\"ffmpeg -y -i {filename} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {filename}.pcm\")\n with open(f\"{filename}.pcm\", 'rb') as fp:\n return fp.read()\n\n # 识别本地文件\n ret = client.asr(get_file_content(filename), 'pcm', 16000, {\n 'dev_pid': 1536,\n })\n\n return ret['result'][0]\n\ntext = audio2text('1.m4a')\nprint(text)\n\nif text == '你的名字叫什么':\n text = '我的名字叫peach'\n\nfilename = text2audio(text)\n\n"
},
{
"alpha_fraction": 0.5782178044319153,
"alphanum_fraction": 0.6435643434524536,
"avg_line_length": 22,
"blob_id": "2e33c8b7ec01ff3ecc88cc9bc7f8dd305f7078bf",
"content_id": "165cb5b644b59f588f94b9b2636af3684dd40118",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 525,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 22,
"path": "/s2.py",
"repo_name": "1515806183/flask-toys",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom aip import AipSpeech\nimport os\n\nAPP_ID = '18139977'\nAPI_KEY = 'G8LLOmt8fivpBX0RPQNzQKwc'\nSECRET_KEY = 'x2LbbQFHepdaI3fnxsk7LBbEYh9KlH7S'\n\nclient = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n\n# 读取文件\ndef get_file_content(filePath):\n os.system(f\"ffmpeg -y -i {filePath} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {filePath}.pcm\")\n with open(f\"{filePath}.pcm\", 'rb') as fp:\n return fp.read()\n\n# 识别本地文件\nret = client.asr(get_file_content('1.m4a'), 'pcm', 16000, {\n 'dev_pid': 1536,\n})\n\nprint(ret)"
}
] | 5 |
Caiasaura/saura-bot | https://github.com/Caiasaura/saura-bot | 13b3c821911990e190ca5cd55432a231791b41d3 | be2cc781eabb377bfe9be0910c7939e67d7d0b15 | b7301b96099f4c60e92e63a09e0a9d80b485fedd | refs/heads/main | 2023-06-29T10:50:38.877421 | 2021-08-01T02:30:10 | 2021-08-01T02:30:10 | 391,505,003 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6693877577781677,
"alphanum_fraction": 0.6952381134033203,
"avg_line_length": 19.05714225769043,
"blob_id": "a0cd1e0b9824b40978d66a318020f83699ecdd7c",
"content_id": "857c3c585f434b226530989b5cc723bc971bc1dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 743,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 35,
"path": "/saura-bot.py",
"repo_name": "Caiasaura/saura-bot",
"src_encoding": "UTF-8",
"text": "import discord\r\n\r\nTOKEN = open(\"token.txt\",\"r\").readline()\r\n\r\nintents = discord.Intents.default()\r\nintents.members = True\r\n\r\n\r\nclient = discord.Client(intents = intents)\r\n\r\[email protected]\r\nasync def on_ready():\r\n\tprint('We have logged on under the name {0.user}'.format(client))\r\n\r\[email protected]\r\nasync def on_message(message):\r\n\tif message.author == client.user:\r\n\t\treturn\r\n\r\n\tif message.content.startswith(‘.hello’):\r\n\t\tawait message.channel.send(‘Hello!’)\r\n\r\[email protected]\r\n\r\nasync def on_member_join(member):\r\n\tprint('got a thing')\r\n\tchannel = client.get_channel([862127249889296385])\r\n\tawait channel.send('Welcome to the server!')\r\n\r\[email protected]\r\nasync def on_member_remove(member):\r\n\r\n\tprint('lost a thing')\r\n\r\nclient.run(TOKEN)"
}
] | 1 |
iakirago/AiLearning-Theory-Applying | https://github.com/iakirago/AiLearning-Theory-Applying | 6ac5d6dcbc90a0ec68242bd72fffaa960abc4b69 | 86f8bdc562127a0fae60bf33f5067e2db3486769 | d9c3b987f89dfa5cbf9c9a0f22856bba3cf3afd4 | refs/heads/master | 2023-09-04T21:14:25.765843 | 2021-10-27T04:38:13 | 2021-10-27T04:38:13 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4512578547000885,
"alphanum_fraction": 0.6983752846717834,
"avg_line_length": 122.12903594970703,
"blob_id": "37f1b614bf6585fb6704d2c6151092df31965ee6",
"content_id": "8ed9347c766b1fe5f3e2bce3d9deaae243846f99",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4358,
"license_type": "permissive",
"max_line_length": 405,
"num_lines": 31,
"path": "/机器学习竞赛实战_优胜解决方案/README.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# README\n\n相关数据集可在[全部数据集下载地址.txt](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%85%A8%E9%83%A8%E6%95%B0%E6%8D%AE%E9%9B%86%E4%B8%8B%E8%BD%BD%E5%9C%B0%E5%9D%80.txt)下载\n\n### 难度由上至下递增\n\n- [信用卡欺诈检测](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E4%BF%A1%E7%94%A8%E5%8D%A1%E6%AC%BA%E8%AF%88%E6%A3%80%E6%B5%8B)\n - INFO: 逻辑回归、分类任务、预测0/1\n- [工业化工生产预测](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%B7%A5%E4%B8%9A%E5%8C%96%E5%B7%A5%E7%94%9F%E4%BA%A7%E9%A2%84%E6%B5%8B)\n - INFO: xgboost、细粒度处理、冠军方案\n- [智慧城市-道路通行时间预测](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%99%BA%E6%85%A7%E5%9F%8E%E5%B8%82-%E9%81%93%E8%B7%AF%E9%80%9A%E8%A1%8C%E6%97%B6%E9%97%B4%E9%A2%84%E6%B5%8B)\n - INFO: 时间分片、特征交叉、预测道路通行时间\n- [建筑能源利用率预测](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%BB%BA%E7%AD%91%E8%83%BD%E6%BA%90%E5%88%A9%E7%94%A8%E7%8E%87%E9%A2%84%E6%B5%8B)\n - INFO: 模型流程、多模型比较及可视化、预测得分\n- [快手短视频用户活跃度分析](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%BF%AB%E6%89%8B%E7%9F%AD%E8%A7%86%E9%A2%91%E7%94%A8%E6%88%B7%E6%B4%BB%E8%B7%83%E5%BA%A6%E5%88%86%E6%9E%90)\n - INFO: RNN网络、时间序数据、冠军方案\n- [Indoor Location & Navigation](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/Indoor%20Location%20%26%20Navigation)\n - INFO: 回归+分类、后处理、博主Top2%/19th\n- [ACM SIGSPATIAL 2021 GISCUP-预估到达时间](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP)\n - INFO:时空域模型、DCN-蒸馏+WD+LGB、博主Top1%/7th\n\n\n\n### 以及实战(练手)的小项目\n\n- [基于相似度的酒店推荐系统](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%AE%9E%E6%88%98%E5%B0%8F%E9%A1%B9%E7%9B%AE/%E5%9F%BA%E4%BA%8E%E7%9B%B8%E4%BC%BC%E5%BA%A6%E7%9A%84%E9%85%92%E5%BA%97%E6%8E%A8%E8%8D%90%E7%B3%BB%E7%BB%9F)\n - INFO: 文本处理、词权重、根据描述信息推荐\n- [常用特征构建方法](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%AE%9E%E6%88%98%E5%B0%8F%E9%A1%B9%E7%9B%AE/%E5%B8%B8%E7%94%A8%E7%89%B9%E5%BE%81%E6%9E%84%E5%BB%BA%E6%96%B9%E6%B3%95)\n - INFO: 图像特征、常用特征处理方法、文本特征处理方法\n- [特征工程建模](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%AE%9E%E6%88%98%E5%B0%8F%E9%A1%B9%E7%9B%AE/%E7%89%B9%E5%BE%81%E5%B7%A5%E7%A8%8B%E5%BB%BA%E6%A8%A1)\n - INFO: 模型解释工具、银行违约、疾病引起原因"
},
{
"alpha_fraction": 0.6298637390136719,
"alphanum_fraction": 0.6425795555114746,
"avg_line_length": 37.17558670043945,
"blob_id": "c7d3db03a2018f529e7ee8c78ee26e8994e51765",
"content_id": "32f08b27b07607ea4393bd0b33a3a9c46da51bd5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 43324,
"license_type": "permissive",
"max_line_length": 157,
"num_lines": 1065,
"path": "/NLP通用框架BERT项目实战/第二章——BERT源码解读与应用实例/BERT源码工作流解读.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "### BERT源码工作流解读\n\n#### 数据读取模块\n\n处理MRPC数据的类\n\n~~~python\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"] # 是否是二分类\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3]) # 相关的test_a和b怎么切分\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n~~~\n\n\n\n读取训练数据代码:\n\n~~~python\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) # 得到需要迭代的次数,len(train_examples)计算出多少数据量 除以 我们设置的train_batch_size,再乘上epochs次数。\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) # 在刚开始时,让学习率偏小,经过warmup的百分比后,再还原回原始的学习率\n~~~\n\n\n\n#### 数据预处理模块\n\n~~~python\n# 衔接上一个\n file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\n\n# ctrl点击file_based_xxx函数跳转\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file) # TFRecord读取数据块,在bert中要求数据是TFRecord的形式。\n \n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples))) # for循环变量取数据\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer) # ctrl点击convert_xxx跳转\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {} # 构建标签0, 1\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a) # ctrl点击tokenize,对第一句话分词\n tokens_b = None\n if example.text_b: # 第二句话分词\n tokens_b = tokenizer.tokenize(example.text_b)\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\" # 保留3个特殊字符\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) # 如果太长就截断的操作\n else: # 没有b的时候保留两个字符\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n \n # The convention in BERT is:\n # (a) For sequence pairs: # 将下面一对话,CLS开始,SEP断点,变成type_ids的0/1形式,0表示前一句,1表示后一句\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n def tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text): # 词切片,将一个词切片成多个小段,让表达的含义更丰富\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n~~~\n\n\n\n#### tfrecord制作\n\n~~~~python\n# 延续上面的convert_single_example模块\n # 开始构建,创建两个列表来承接\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\") # 第一个词是CLS\n segment_ids.append(0) # 第一个的编码也肯定是0\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0) # 遍历获取,a(第一句话)都是0\n tokens.append(\"[SEP]\") # 遍历完增加个SEP连接符/断电\n segment_ids.append(0) # tokens添加完SEP后,ids也添加对应的0\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1) # b和a一样,唯一不同的是添加的是1\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n \n input_ids = tokenizer.convert_tokens_to_ids(tokens) # 转成ID的映射,就是vocab语料库索引\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length. 保证输入的长度是一样的,多退少补\n while len(input_ids) < max_seq_length: # PAD的长度取决于设置的最大长度,小于全补0\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\") # 打印结果,这时候预处理的部分大致完成\n ...\n return feature\n~~~~\n\n> 将数据制作成tfcord的形式,以便除了速度更快\n\n\n\n返回原先的convert_single_example\n\n~~~python\n for (ex_index, example) in enumerate(examples): # 不断遍历处理数据\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer) # ctrl点击convert_xxx跳\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict() # 下面执行格式处理,处理成模型所需的格式\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features)) # 最后转换成tf的数据格式\n writer.write(tf_example.SerializeToString())\n writer.close()\n~~~\n\n\n\n#### Embedding层的作用\n\n~~~python\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel( # ctrl点击BertModel跳转\n config=bert_config, # 配置\n is_training=is_training,\n input_ids=input_ids, # 特征\n input_mask=input_mask, # 特征0/1\n token_type_ids=segment_ids, # 特征维度表示第一句话还是第二句\n use_one_hot_embeddings=use_one_hot_embeddings)\n 。。。\n\nclass BertModel(object):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n\n ```python\n # Already been converted into WordPiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n model = modeling.BertModel(config=config, is_training=True,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)\n\n label_embeddings = tf.get_variable(...)\n pooled_output = model.get_pooled_output()\n logits = tf.matmul(pooled_output, label_embeddings)\n ...\n ```\n \"\"\"\n\n def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None: # 如果没设置mask,默认都是1\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None: # 没设置就默认一句话\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids. 词的embeddings\n (self.embedding_output, self.embedding_table) = embedding_lookup( # ctrl点击embedding_lookup跳转\n input_ids=input_ids, # 词\n vocab_size=config.vocab_size, # 语料库\n embedding_size=config.hidden_size, # 编码映射成多少维\n initializer_range=config.initializer_range, # 初始化范围\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n~~~\n\n\n\n~~~python\ndef embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable( # 词映射矩阵\n name=word_embedding_name, # 词向量\n shape=[vocab_size, embedding_size], # 获取语料库大表vovab.txt\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) # 查出所有词做one_hot\n output = tf.matmul(one_hot_input_ids, embedding_table) # 运算一个batch里所有的映射结果\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size]) # 制作返回结果\n return (output, embedding_table) # 返回,词变成了向量\n~~~\n\n> 给数据做Embedding,再加入位置编码\n\n\n\n#### 位置编码\n\n~~~python\nclass BertModel(object):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n\n ```python\n # Already been converted into WordPiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n model = modeling.BertModel(config=config, is_training=True,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)\n\n label_embeddings = tf.get_variable(...)\n pooled_output = model.get_pooled_output()\n logits = tf.matmul(pooled_output, label_embeddings)\n ...\n ```\n \"\"\"\n\n def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n\t...\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor( # 制作位置编码,ctrl点击embedding_postprocessor\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n~~~\n\n\n\n~~~python\ndef embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type: # 判断是第一句还是第二句,再做相应处理\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings: # 判断是否要做位置编码信息\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1]) # 如果位置编码给的过大,为了加速只需取出部分\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output\n~~~\n\n> 给数据加入位置编码\n\n\n\n#### mask机制\n\n~~~python\nclass BertModel(object):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n\n ```python\n # Already been converted into WordPiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n model = modeling.BertModel(config=config, is_training=True,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)\n\n label_embeddings = tf.get_variable(...)\n pooled_output = model.get_pooled_output()\n logits = tf.matmul(pooled_output, label_embeddings)\n ...\n ```\n \"\"\"\n\n def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask) # 创建mask矩阵\n # 比如一个矩阵:[45,54,85,...,0,0,0]\n # [12,31,11,...,0,0,0]\n # [91,51,18,...,12,21,0]\n # 后面长度不足的都补0,mask后,有信息的变1,无信息的变0\n # [1,1,1,...,0,0,0]\n # [1,1,1,...,0,0,0]\n # [1,1,1,...,1,1,0]\n # 不管要知道二维的,还要知道三维的,如开头这句话This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # 把里面的维度再分一个维度,如左上角的45\n # [1,1,1,...,0,0,0] , 这里的1是指45能看到的信息是那些,有的则为1,并与其计算,为0则不与其进行计算\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model( # Ctrl点击跳转transformer_model\n input_tensor=self.embedding_output, # 3种embedding\n attention_mask=attention_mask, # 上面的需不需要计算的0,1,1则是要计算\n hidden_size=config.hidden_size, # 特征结果\n num_hidden_layers=config.num_hidden_layers, # Transformer中的隐层神经元个数\n num_attention_heads=config.num_attention_heads, # 多头机制,在bert的图解中有讲解\n intermediate_size=config.intermediate_size, # 全连接层神经元个数\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n~~~\n\n> 对数据进行mask,此时数据部分已加工完,开始做QKV计算\n\n\n\n#### 构建QKV矩阵\n\n~~~python\n# 通过上面的点击函数跳转到transformer_model\ndef transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https://arxiv.org/abs/1706.03762\n\n Also see:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0: # 判断是否能整除,否则后面会报错\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size / num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers): # 遍历层数,这层结果是下一层的输入\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer( # Ctrl点击attention_layer跳转\n from_tensor=layer_input,\n to_tensor=layer_input, # from和to都是self_tensor,即自己和自己本句的关联\n attention_mask=attention_mask, # 0/1\n num_attention_heads=num_attention_heads, # 多头参数\n size_per_head=attention_head_size, # 头大小\n attention_probs_dropout_prob=attention_probs_dropout_prob, # 丢弃\n initializer_range=initializer_range, # 初始化位置\n do_return_2d_tensor=True, # 是否返回2D特征\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n ...\n\ndef create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask\n\n\ndef attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H] 加速内积计算\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H] 加速内积计算\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n~~~\n\n> 此时完成QKV的计算,接下来消除维度影响、softmax\n\n\n\n#### 完成Transformer模块构建\n\n~~~python\n# 衔接上面的\ndef attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n ...\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(size_per_head))) # 消除维度对结果的影响\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 # mask为1时结果为0,mask为0时结果为非常大的负数\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder # 把上面的值加入原始得分里相当于mask为1不变,mask为0则变成很大的负数\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores) # 再softmax时,非常大的负数则无限接近于0,就相当于不考虑\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor: # 返回结果前判断维度是否一样,因为连接了很多层,会不断输入输出\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer\n~~~\n\n> 上面处理完后,还有残差连接,防止训练结果比不训练的更差\n\n\n\n#### 训练BERT模型\n\n~~~python\n# 回到transformer_model\ndef transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"): # 残差连接\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"): # 残差连接完,数据维度会增大,需要变回一直的维度\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n \n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output\n~~~\n\n> 最终,所有的结果已处理完成并输出向量,这样BertModel模块已经讲完,modeling.py的部分也完成了,我们再回到run_classifier.py\n\n~~~python\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n ...\n # 前面的modeling.BertModel已经看过了,最终我们得到了QVK计算后的softemax层和残差连接后的结果\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value # 获取向量\n\n output_weights = tf.get_variable( # 构造全连接层,二分类的权重参数\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable( # 构造偏值b\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"): # 常规的loss function\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True) # 结果乘上权重\n logits = tf.nn.bias_add(logits, output_bias) # 再加上偏值项\n probabilities = tf.nn.softmax(logits, axis=-1) # 加上softmax层\n log_probs = tf.nn.log_softmax(logits, axis=-1) # 加上softmax层\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) # 计算得到损失\n loss = tf.reduce_mean(per_example_loss) # 优化损失\n\n return (loss, per_example_loss, logits, probabilities) # 返回结果\n~~~\n\n"
},
{
"alpha_fraction": 0.45478442311286926,
"alphanum_fraction": 0.7486855983734131,
"avg_line_length": 14.5819673538208,
"blob_id": "c848251062ffaebb19e56b1064816bf9e41bd6fb",
"content_id": "bb823ac2aaebf44dfa3b204633b6a6b0d434a0d1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3102,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 122,
"path": "/机器学习算法原理及推导/李航——统计学习方法/4.朴素贝叶斯.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 4.朴素贝叶斯\n\n### 知识树\n\nKnowledge tree\n\n\n\n> P(y|x),P给定x的条件下,y的概率。如:P(y=我招女孩子喜欢的概率|我是学生)\n\n\n\n### 一个小故事\n\nA story\n\n1. 女朋友和妈妈掉河里,路人拿出3颗豆,两颗红豆1颗绿豆。如果我抽中红豆救女朋友,抽中绿豆救妈妈。\n2. 我和路人各种抽一颗,路人发现自己抽中的是绿豆,他想用剩下的那颗跟我换,我换不换?换豆和女朋友活下去的概率一样吗?\n\n\n\n**直觉来讲**:\n\n换不换豆我抽中红豆的概率应该都是1/3。这时路人跟我说他的是绿豆,排除一颗,我抽中红豆的概率是1/2。换不换概率都是1/2\n\n**条件概率**:\n\nP(A|B)表示在B发生的条件下,发生A的概率。\n\n\n\n计算:设A表示我抽中的是红豆,B表示路人抽中的是绿豆\n\n\n\n结论:如果要救女朋友,最好和路人交换(2/3)。如果要救妈,最好不要换。\n\n\n\n### 直观理解\n\nIntuitive understanding\n\n假设有一个手写数据集,里面有100条记录,分别是0-10。\n\n此时小红写了个数字X,怎么判断是数字几?\n\n朴素贝叶斯工作原理:\n\nP(Y = 0|X) = ?, P(Y = 1|X)=? ......, P(Y = 10|X) = ?\n\n找到概率最高的,就是对应的数字。\n\n\n\n### 数学理解\n\nMathmetical\n\n上面的数字判别公式修改为P(Y=Ck|X=x)。\n\n\n\n\n\n\n\n\n\n> 朴素贝叶斯的“朴素”原因是因为这里假设它们都是相互独立的。\n\n\n\n\n\n\n\n\n\n### 参数估计\n\nMathematical understanding\n\n\n\n\n\n> 其中I(yi = Ck) 这里的是指示函数,如果yi属于当前类别,则计1,否则0\n\n\n\n\n\n\n\n### 举个例子\n\nExample\n\n试由下表的训练数据学习一个朴素贝叶斯分类器,并确定x=(2,S)T的类标签记y。表中X(1),X(2)为特征,取值的集合分别为A1={1,2,3},A2={S,M,L},Y为类标记,Y∈C = {1,-1}。\n\n\n\n对于给定的计算:\n\n\n\n\n\n\n\n\n\n### 总结\n\nSummarization\n\n1. 条件概率公式:,表示在已发生事件B的情况下,事件A发生的概率。\n2. 使用条件概率公式逐步导出最后参数估计的步骤需牢记。\n3. 后续会遇到很多类似的推导过程,一般都是先各种替换变复杂最后简化。\n\n另,公式存在一点点问题,如公式的分母可能为0。\n\n"
},
{
"alpha_fraction": 0.7202346324920654,
"alphanum_fraction": 0.7337243556976318,
"avg_line_length": 28.39655113220215,
"blob_id": "f766591dcc696c285277163f3a8c1d6218a56cd7",
"content_id": "8a42c5cf420f299f511356bb228bb7146264578d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1911,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 58,
"path": "/机器学习算法原理及推导/其它/第二章——手写线性回归算法/LinearRegression/UnivariateLinearRegression.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom linear_regression import LinearRegression\ndata = pd.read_csv('../data/world-happiness-report-2017.csv') # 导入数据\n# 得到训练和测试数据,以8:2切分\ntrain_data = data.sample(frac=0.8)\ntest_data = data.drop(train_data.index)\n\ninput_param_name = 'Economy..GDP.per.Capita.' # 特征features\noutput_param_name = 'Happiness.Score' # 标签label\n\nx_train = train_data[[input_param_name]].values # 构建数据\ny_train = train_data[[output_param_name]].values\n\nx_test = test_data[[input_param_name]].values\ny_test = test_data[[output_param_name]].values\n\n# 可视化展示 run, 可以看到训练数据和预测数据的分布\nplt.scatter(x_train, y_train, label='Train data')\nplt.scatter(x_test, y_test, label='Test data')\nplt.xlabel(input_param_name)\nplt.ylabel(output_param_name)\nplt.title('Happy')\nplt.legend()\nplt.show()\n\n# 训练线性回归模型\nnum_iterations = 500 # 迭代次数\nlearning_rate = 0.01 # 学习率\n\nlinear_regression = LinearRegression(x_train, y_train) # 初始化模型\n(theta, cost_history) = linear_regression.train(learning_rate, num_iterations)\n\nprint('开始时的损失:', cost_history[0])\nprint('训练后的损失:', cost_history[-1])\n\nplt.plot(range(num_iterations), cost_history)\nplt.xlabel('Iteration')\nplt.ylabel('Cost')\nplt.title('GD')\nplt.show()\n\n# 测试线性回归模型\npredictions_num = 100 # 预测100个\n# 拿最大和最小值画一条线\nx_predictions = np.linspace(x_train.min(), x_train.max(), predictions_num).reshape(predictions_num, 1)\ny_predictions = linear_regression.predict(x_predictions)\n\nplt.scatter(x_train, y_train, label='Train data')\nplt.scatter(x_test, y_test, label='Test data')\nplt.plot(x_predictions, y_predictions, 'r', label='Prediction')\nplt.xlabel(input_param_name)\nplt.ylabel(output_param_name)\nplt.title('Happy')\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.49095216393470764,
"alphanum_fraction": 0.6598448753356934,
"avg_line_length": 17.200000762939453,
"blob_id": "6603bba8dd46b80e84c30153587001a7a3201f42",
"content_id": "12559b82e9e505b1254e06821da88727ee6f171e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8547,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 255,
"path": "/机器学习算法原理及推导/李航——统计学习方法/5.决策树——每次选一边.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 5.决策树——每次选一边\n\nDecision tree\n\n\n\n### 知识树\n\nKnowledge tree\n\n\n\n\n\n### 一个小故事\n\nA story\n\n挑苹果:\n\n\n\n> 根据这些特征,如颜色是否是红色、硬度是否是硬、香味是否是香,如果全部满足绝对是好苹果,或者红色+硬但是无味也是好苹果,从上图可以看出来,只要做足够的循环判断即可得到结果。\n\n如下图:\n\n\n\n> 一步步走下来,就能挑到好苹果。这就是决策树\n\n1. 最顶端的叫根节点,所有样本的预测都是从根节点开始。\n2. 每一个圆形节点表示判断,每个节点只对样本的某个属性进行判断。\n3. 圆形节点是标记节点,走到圆形节点表示判断结束,将圆形节点中的标签作为对应的预测结果。\n\n如何构建决策树:\n\n1. 构建的决策树按顺序对每个特征进行判断(低效)\n2. 每个判断节点都尽可能让一半进入A分支,另一半进入B分支(高效)\n\n引入新的知识,信息熵\n\n\n\n### 信息熵\n\nInformation entropy\n\n1. 每走一步,我们都在确定苹果的好坏。\n2. 在根节点时,我们对苹果的好坏一无所知。\n3. 经过对颜色的判断后,如果是红色,我们明白好坏的概率是1/2。虽然还包含了1/2的不确定性。\n4. 如果苹果红色的前提下又硬,我们100%确定它是好苹果。此时不确定性坍塌为0。\n5. 这是一个减少不确定性的过程。\n\n从整体来讲,我们希望决策树每走一步,不确定性都下降的快一些,让我们的判断步数无限小。\n\n**什么是信息的不确定性?**\n\n就是信息熵\n\n在信息论与概率统计中,熵(entropy)是表示随机变量不确定性的度量,设X是一个取有限个值的离散随机变量,其概率分布为\n\n\n\n则随机变量X的熵定义为\n\n\n\n> 面试可能会问到这个公式,还有交叉熵、相对熵\n\n熵越大,则随机变量的不确定性越大。其中0 ≤ H(P) ≤ log n\n\n\n\n### 举例计算\n\nExample\n\n假设投色子,6个的概率分别是1/6,计算如下:\n\n\n\n> 其中6个1/6(log左边的六分之一)加起来就是1\n\n\n\n> \n\n则最终=log6\n\n这也解释了为什么上面H(P) ≤ log n\n\n另外,均由分布的时候,熵最大,因为所有可能都是一样的,如上面的6个面都是1/6。\n\n\n\n如果有1个坏苹果和9个好苹果时,我们可以认为大部分都是坏苹果。内部并不混乱,确定性很大,熵很小。\n\n\n\n### 信息增益\n\nInformation gain\n\n表示得知特征X的信息而使得类Y的信息的不确定性减少的程度。\n\n特征A对训练集D的信息增益g(D,A),定义为集合D的经验熵H(D)与特征A给定条件下D的经验条件熵H(D|A)之差,即:g(D, A) = H(D) - H(D|A)\n\n当前的信息熵等于划分完(如划分成两个)的信息熵之和。\n\n\n\n**信息增益算法**\n\n输入:训练数据集D和特征A\n\n输出:特征A对训练数据集D的信息\n\n1. 计算数据集D的经验熵H(D)\n\n \n\n2. 计算特征A对数据集D的经验条件熵H(D|A)\n\n \n\n \n\n3. 计算信息增益\n\n \n\n\n\n### 举个例子\n\nExample\n\n是否信贷\n\n| ID | 年龄 | 有工作 | 有自己房子 | 信贷情况 | 类别 |\n| ---- | ---- | ------ | ---------- | -------- | ---- |\n| 1 | 青年 | 否 | 否 | 一般 | 否 |\n| 2 | 青年 | 否 | 否 | 好 | 否 |\n| 3 | 青年 | 是 | 否 | 好 | 是 |\n| 4 | 青年 | 是 | 是 | 一般 | 是 |\n| 5 | 青年 | 否 | 否 | 一般 | 否 |\n| 6 | 中年 | 否 | 否 | 一般 | 否 |\n| 7 | 中年 | 否 | 否 | 好 | 否 |\n| 8 | 中年 | 是 | 是 | 好 | 是 |\n| 9 | 中年 | 否 | 是 | 非常好 | 是 |\n| 10 | 中年 | 否 | 是 | 非常好 | 是 |\n| 11 | 老年 | 否 | 是 | 非常好 | 是 |\n| 12 | 老年 | 否 | 是 | 好 | 是 |\n| 13 | 老年 | 是 | 否 | 好 | 是 |\n| 14 | 老年 | 是 | 否 | 非常好 | 是 |\n| 15 | 老年 | 否 | 否 | 一般 | 否 |\n\n\n\n对上表所给的训练数据集D,根据信息增益准则选择最优特征。首先计算经验熵H(D)\n\n\n\n> 计算类别:一共15个类别,9个是,6个否\n\n然后计算各特征对数据集D的信息增益,分别以A1,A2,A3,A4表示年龄、有工作、有自己房子和信贷情况4个特征,则\n\n1. 首先计算年龄\n\n > H(D)=0.971上面计算了,H(D1)青年,H(D2)中年,H(D3)老年\n\n2. 计算有工作\n\n \n\n > H(D)=0.971,H(D1)是有工作,H(D2)是无工作\n\n3. 计算有无房子\n\n \n\n4. 计算信贷情况\n\n \n\n有无房子是作为信贷的第一个划分,下降的最快\n\n\n\n### 信息增益比\n\nInformation gain ratio\n\n**信息增益比:**\n\n如果以信息增益为划分依据,存在偏向选择取值较多的特征,信息增益是对这一问题进行矫正。\n\n**举例**:\n\n如上面的例子,后面加入了身份证这个特征,身份证又是唯一的,算法对样本画了个15叉树,一层就搞定了全部的分类。\n\n这样会造成一个问题,划分会倾向于特征取值数目较多的,即分的更快。\n\n但在预测集上就出现很大的问题了,即预测集的身份证肯定也是唯一的。\n\n**定义:**\n\n特征A对训练数据集D的信息增益比定义为其信息增益g(D,A)与训练数据集D关于特征A的经验熵H(D)之比:\n\n\n\n**计算**:\n\n如上面的年龄,有3个类(青年、中年、老年),\n\n\n\n信息增益比和信息增益的区别就是除以\n\n\n\n### 决策树的构建\n\nBuild the decision tree\n\nID3算法:\n\n- 输入:训练数据集D,特征A,阈值ε ;\n- 输出:决策树T\n 1. 若D中所有实例属于同一类,则T为单节点数,并将类作为该节点的类标记,返回T;\n 2. 若A = Ø,则T为单节点树,并将D中实例数最大的类作为该节点的类标记,返回T;\n 3. 否则,按算法计算A中各特征对D的信息增益,选择信息增益最大的特征Ag;\n 4. 如果Ag的信息增益小于阈值ε,则置T为单节点树,并将D中实例数最大的类作为该节点的类标记,返回T;\n 5. 否则,对Ag的每一个可能值ai,依将D分割为若干非空子集Di,将Di中实例最大的类作为标记,构建子节点,由节点及其子节点构成树T,返回T;\n 6. 对第i个子节点,以Di为训练集,以A - {Ag}为特征集,递归地调用1~5步,得到树Ti,返回Ti。\n\nC4.5算法,大体相同,只不过计算的是信息增益比,而不是信息增益。我们通常也是用C4.5作为决策树的算法,其区别也就在于多了个分母。\n\n\n\n### 总结\n\nSummarization\n\n1. 决策树的核心思想:以树结构为基础,每个节点对某特征进行判断,进入分支,直到到达叶节点。\n\n2. 决策树构造的核心思想:让信息熵快速下降,从而达到最少的判断次数获得标签。\n\n3. 判断信息熵下降速度的方法:信息增益。\n\n4. 构建决策树算法:ID3(使用信息增益)、C4.5(使用使用信息增益比)。\n\n5. 信息增益会导致节点偏向选取取值角度的特征的问题。\n\n > 关于第5点的补充,统计学习和西瓜书都是给的这个解释,但还有另一种解释,就是信息增益导致大数问题——>概率是否准确的问题。\n\n"
},
{
"alpha_fraction": 0.5068027377128601,
"alphanum_fraction": 0.7247990369796753,
"avg_line_length": 19.7243595123291,
"blob_id": "b88e63b7e6f3e8a242bbb169d373d9b7383af0e7",
"content_id": "abc40ec6cb51430111cc4c0ecf3fb525ea7c3140",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6314,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 156,
"path": "/机器学习算法原理及推导/其它/第一章——线性回归原理.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 第一章——线性回归原理\n\n### 线性回归概述\n\n#### 例子:\n\n- 数据:工资和年龄(两个特征)\n- 目标:预测银行会贷款给我多少钱(标签)\n- 考虑:工资和年龄都会影响最终银行贷款的结果,那么它们各自有多大的影响被?(参数)\n\n| 工资 X1 | 年龄 X2 | 额度 Y |\n| ------- | ------- | ------ |\n| 4000 | 25 | 20000 |\n| 8000 | 30 | 70000 |\n| 7500 | 33 | 50000 |\n\n其中工资、年龄是特征,用来预测额度,而我们不可能直接拿工资 × 年龄,因为明显工资更重要些,那么可能建成的方程是 Y = (X1 × θ1) × (X2 × θ1),其中θ就是各种特征的权重,那么最终我们要求解的就是各种的θ。\n\n而线性回归就说得到每个数据最终的预测Y(具体的值),除了回归还有分类,分类是离散型的0/1等固定值的分类。\n\n### 通俗理解\n\n- X1,X2就是我们的两个特征工资和年龄,Y是银行最终会借给我们额度\n- 找到最合适的一条线,来拟合我们的数据点\n\n\n\n> 红色的点是数据,即前面的特征等\n\n当前的数据是线性的,也就是数据不能映射在同一个平面。那么 Y = (X1 × θ1) × (X2 × θ1)就不能覆盖所有的点进行计算。怎么样解决这个问题,或者说如果我们能尽可能的满足绝大多数数据点,是否就可以了呢。\n\n\n\n### 误差\n\n#### 误差项公式\n\n接着上面的问题,什么样的平面才是最合理最满足的呢\n\n- 假设 θ1是工资的参数, θ2是年龄的参数\n- 拟合的平面:h θ(x) = θ0 + θ1X1 + θ2X2\n - θ0是偏置项,不管θ1和θ2等什么变化,θ0的变化会影响平面向上或者向下浮动,对结果做微调\n - 上面的方程可能无法形成矩阵相乘的形式,因为θ0没有X0,我们可以添加一个不影响整体的X0,以达到矩阵相乘的效果\n- 整合:\n\n- 真实值和预测值之间肯定要存在差异的(用ε来表示该误差)\n\n- 对于每个样本:\n\n > y表示真实值,(第二项)表示预测值,ε表示误差值,即预测值和真实值之间有一个误差项,其中 i 表示每个样本之间都有自己的真实值、预测值、误差项\n\n误差项越小,代表预测的越准确。\n\n#### 独立同分布的意义\n\n- 误差 ε(i) 是独立且具有相同的分布,并服从均值为0方差为θ平方的高斯分布\n\n > 我们拆开上面的话\n\n - 独立:小明和小红一起来贷款,他们没关系\n - 同分布:他们都是去同一个银行\n - 高斯分布:银行可能会多给,也可能会少给,但绝大多数情况下这个浮动不会太大,极小情况下浮动会比较大,符合正常情况\n\n \n\n 现实中也很难有绝对的高斯分布,大多数是近似高斯分布,也就是我们算法推导的时候也很难得到一个完全正确的答案,只有最接近的答案,也就是存在误差。\n\n#### 似然函数的作用\n\n- 预测值与误差:(1)\n\n > y是真实值、x是预测值、ε误差值,现在我们要求的就是θ,它应该怎么求解\n\n- 由于误差服从高斯分布:(2)\n\n > 高斯分布的公式,这里我们要求的是θ,所以把θ移动到左边,变成y - θX = ε,即演变成\n\n- 将(1)式带入(2)式:(3)\n\n > 这里我们希望左边的x和θ组合完后,和真实值y越解决越好,即成为y的可能性越大越好\n\n- 似然函数:\n\n 解释:为什么引入,什么样的参数跟我们的数据组合后恰好是真实值\n\n- 对数似然:\n\n 解释:乘法难解,加法就容易了,对数里乘法可以转换成加法\n\n - 展开化简:\n\n - 目标:让似然函数(对数变换后也一样)越大越好\n\n (最小二乘法)\n\n#### 参数求解\n\n- 目标函数:\n- 求偏导:\n- 偏导等于0的最优解:\n\n\n\n### 梯度下降\n\n#### 通俗理解\n\n- 引入:当我们得到了一个目标函数后,如何求解?(并不一定可解,线性回归可以当做是一个特例)\n- 常规套路:机器学习的套路就是我们交给机器一堆数据,然后告诉它什么样的学习方式是对的(目标函数),然后让它朝着这个方向去做\n- 如何优化:一步步的完成迭代。\n\n#### 参数更新方法\n\n- 目标函数:\n\n > θ0和θ1分别得出方向,最终找到综合的结果。\n\n- 寻找山谷的最低点,也就是我们的目标函数终点\n\n- 下山分多步走(更新参数)\n\n 1. 找到最合适的方向\n 2. 每次走一小步\n 3. 按照方向和步伐更新参数\n\n\n\n梯度下降,目标函数:\n\n- 批量梯度下降:\n\n (容易得到最优解,但由于每次考虑所有样本,速度很慢)\n\n- 随机梯度下降:\n\n (每次找到一个样本,迭代速度快,但不一定每次都朝着收敛的方向)\n\n- 小批量梯度下降发:\n\n > 简化成代码即 θ = θ - α×(1/n) × ( (残差×数据)矩阵 )\n >\n > 残差=\n\n (每次更新选择一小部分数据来算)\n\n\n\n#### 学习率(步长)\n\n> 上面小批量梯度公式里的α\n\n- 学习率(步长):对结果会产生巨大的影响,一般小一些\n- 如何选择:从小的开始,知道不能再小\n- 批处理数:32、64、128都可以,很多时候还要考虑资源和时间\n\n\n\n"
},
{
"alpha_fraction": 0.6766612529754639,
"alphanum_fraction": 0.6855753660202026,
"avg_line_length": 29.09756088256836,
"blob_id": "a67f1473303d0712cf441461235ba4f758551c81",
"content_id": "f836ad822fdf269f442bf6ed5215a6695b683970",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1274,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 41,
"path": "/机器学习算法原理及推导/其它/第二章——手写线性回归算法/util/features/prepare_for_training.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "\"\"\"Prepares the dataset for training\"\"\"\n\nimport numpy as np\nfrom .normalize import normalize\nfrom .generate_polynomials import generate_polynomials\nfrom .generate_sinusoids import generate_sinusoids\n\n\ndef prepare_for_training(data, polynomial_degree=0, sinusoid_degree=0, normalize_data=True):\n # 计算样本总数\n num_examples = data.shape[0]\n\n data_processed = np.copy(data)\n\n # 预处理\n features_mean = 0\n features_deviation = 0\n data_normalized = data_processed\n if normalize_data:\n (\n data_normalized,\n features_mean,\n features_deviation\n ) = normalize(data_processed)\n\n data_processed = data_normalized\n\n # 特征变换sinusoidal\n if sinusoid_degree > 0:\n sinusoids = generate_sinusoids(data_normalized, sinusoid_degree)\n data_processed = np.concatenate((data_processed, sinusoids), axis=1)\n\n # 特征变换polynomial\n if polynomial_degree > 0:\n polynomials = generate_polynomials(data_normalized, polynomial_degree)\n data_processed = np.concatenate((data_processed, polynomials), axis=1)\n\n # 加一列1\n data_processed = np.hstack((np.ones((num_examples, 1)), data_processed))\n\n return data_processed, features_mean, features_deviation\n"
},
{
"alpha_fraction": 0.5797722935676575,
"alphanum_fraction": 0.5931364297866821,
"avg_line_length": 59.61000061035156,
"blob_id": "bd31f4cda401d13e38e53853ffe451d04a2a30af",
"content_id": "c947d2355b2d14530ba8f60542227bf27dcb7e01",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24272,
"license_type": "permissive",
"max_line_length": 170,
"num_lines": 400,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/2_cross_fea_order_id_level.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\"\"\"\nAuthor: Aigege\nCode: https://github.com/AiIsBetter\n\"\"\"\n# date 2021.08.01\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport networkx as nx\nimport os\nimport gc\nimport warnings\nfrom utils import parallel_apply_fea,add_features_in_group\nfrom functools import partial\nwarnings.filterwarnings(\"ignore\")\n\ndef last_k_cross_time_interval(gr, periods):\n gr_ = gr.copy()\n gr_ = gr_.iloc[::-1]\n gr_['t_i_v'] = gr_['cross_time'].diff()\n gr_['t_i_v'] = gr_['t_i_v']\n gr_['t_i_v'] = gr_['t_i_v'].fillna(0)\n gr_ = gr_.drop_duplicates().reset_index(drop = True)\n\n # cross time变化\n features = {}\n for period in periods:\n if period > 10e5:\n period_name = 'zsl_cross_time_interval_all'\n gr_period = gr_.copy()\n else:\n period_name = 'zsl_cross_time_interval_last_{}_'.format(period)\n gr_period = gr_.iloc[:period]\n features = add_features_in_group(features, gr_period, 't_i_v',\n ['mean','max', 'min', 'std','sum'],\n period_name)\n return features\n\n# last k cross id time trend\ndef last_cross_time_features(gr,periods):\n gr_ = gr.copy()\n gr_ = gr_.iloc[::-1]\n features = {}\n for period in periods:\n if period > 10e5:\n period_name = 'zsl_all_'\n gr_period = gr_.copy()\n else:\n period_name = 'zsl_last_{}_'.format(period)\n gr_period = gr_.iloc[:period]\n features = add_features_in_group(features, gr_period, 'cross_time',\n ['max', 'sum', 'mean','min','std'],\n period_name)\n return features\n\n\n# last k cross id time trend\ndef trend_in_last_k_cross_id_time(gr, periods):\n gr_ = gr.copy()\n gr_ = gr_.iloc[::-1]\n features = {}\n for period in periods:\n gr_period = gr_.iloc[:period]\n features = add_trend_feature(features, gr_period,\n 'cross_time', 'zsl_{}_period_trend_'.format(period)\n )\n return features\n# trend feature\ndef add_trend_feature(features, gr, feature_name, prefix):\n y = gr[feature_name].values\n try:\n x = np.arange(0, len(y)).reshape(-1, 1)\n lr = LinearRegression()\n lr.fit(x, y)\n trend = lr.coef_[0]\n except:\n trend = np.nan\n features['{}{}'.format(prefix, feature_name)] = trend\n return features\n\ndef slice_id_change(x):\n hour = x * 5 / 60\n hour = np.floor(hour)\n hour += 8\n if hour >= 24:\n hour = hour - 24\n return hour\nif __name__ == '__main__':\n nrows = None\n root_path = '../data/giscup_2021/'\n read_idkey = np.load(root_path + 'id_key_to_connected_allday.npy', allow_pickle=True).item()\n read_grapheb = np.load(root_path + 'graph_embeddings_retp1_directed.npy', allow_pickle=True).item()\n read_grapheb_retp = np.load(root_path + 'graph_embeddings_retp05_directed.npy', allow_pickle=True).item()\n for i in read_grapheb:\n read_grapheb[i] = list(read_grapheb[i]) + list(read_grapheb_retp[i])\n del read_grapheb_retp\n head_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']\n embedding_k = 256\n fill_list = [0] * embedding_k\n df = []\n #######################################nextlinks #######################################\n nextlinks = pd.read_csv(root_path+'nextlinks.txt', sep=' ', header=None)\n nextlinks.columns=['from_id', 'to_id']\n nextlinks['to_id'] = nextlinks['to_id'].astype('str')\n nextlinks['to_id'] = nextlinks['to_id'].apply(lambda x: x.split(\",\"))\n nextlinks = pd.DataFrame({'from_id':nextlinks.from_id.repeat(nextlinks.to_id.str.len()),\n 'to_id':np.concatenate(nextlinks.to_id.values)})\n from_id_weight = nextlinks['from_id'].value_counts()\n from_id_weight = from_id_weight.to_frame()\n from_id_weight['index'] = from_id_weight.index\n\n from_id_weight.columns=['weight', 'from_id']\n nextlinks = pd.merge(nextlinks,from_id_weight, 'left', on=['from_id'])\n nextlinks = nextlinks.sort_values(by='weight',ascending=False)\n G = nx.DiGraph()\n from_id = nextlinks['from_id'].astype(str).to_list()\n to_id = nextlinks['to_id'].to_list()\n weight = nextlinks['weight'].to_list()\n edge_tuple = list(zip(from_id, to_id,weight))\n print('adding')\n G.add_weighted_edges_from(edge_tuple)\n\n dc = nx.algorithms.centrality.degree_centrality(G)\n dc = sorted(dc.items(), key=lambda d: d[1],reverse=True)\n dc = dc[:50000]\n dc = [str(i[0]) for i in dc ]\n #######################################cross #######################################\n for name in os.listdir(root_path+'train/'):\n data_time = name.split('.')[0]\n if data_time=='20200803':\n continue\n train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)\n print(\"开始处理\", data_time)\n train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\n train_head['order_id'] = train_head['order_id'].astype(str)\n train_head['ata'] = train_head['ata'].astype(float)\n train_head['distance'] = train_head['distance'].astype(float)\n train_head['simple_eta'] = train_head['simple_eta'].astype(float)\n train_head['driver_id'] = train_head['driver_id'].astype(int)\n train_head['slice_id'] = train_head['slice_id'].astype(int)\n # 处理corss数据\n data_cross = train[[2]]\n data_cross['index'] = train_head.index\n data_cross['order_id'] = train_head['order_id']\n data_cross_split = data_cross[2].str.split(' ', expand=True).stack().to_frame()\n data_cross_split = data_cross_split.reset_index(level=1, drop=True).rename(columns={0: 'cross_info'})\n data_cross_split = data_cross[['index', 'order_id']].join(data_cross_split)\n data_cross_split[['cross_id', 'cross_time']] = data_cross_split['cross_info'].str.split(':', 2, expand=True)\n data_cross_split['cross_time'] = data_cross_split['cross_time'].astype(float)\n tmp_cross_id = data_cross_split['cross_id'].str.split('_', expand=True)\n tmp_cross_id.columns=['cross_id_in','cross_id_out']\n data_cross_split = pd.concat([data_cross_split,tmp_cross_id],axis=1).drop(['cross_id','cross_info'],axis=1)\n data_cross_split['date_time'] = data_time\n data_cross_split = data_cross_split.drop('index',axis=1).reset_index(drop=True)\n print('preprocess finish!')\n print('start feature engineering')\n feature = train_head[['order_id', 'distance']]\n ###################static fea#############################################\n data_cross_split['zsl_cross_id_isnull'] =0\n data_cross_split.loc[data_cross_split['cross_id_in'].isnull(),'zsl_cross_id_isnull'] = 1\n data_cross_split.loc[data_cross_split['cross_id_in'].isnull(),'cross_id_in'] = '-1'\n data_cross_split.loc[data_cross_split['cross_id_out'].isnull(),'cross_id_out'] = '-1'\n #######################order cross_id count###############################\n df = data_cross_split.groupby('order_id', as_index=False)\n tmp_crossid_agg = df['cross_id_in'].agg({'zsl_order_cross_id_in_count': 'count'})\n tmp_crossid_agg['zsl_order_cross_id_in_count_bins'] = 0\n tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count']>=5)&(tmp_crossid_agg['zsl_order_cross_id_in_count']<10),'zsl_order_cross_id_in_count_bins']=1\n tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count']>=10)&(tmp_crossid_agg['zsl_order_cross_id_in_count']<20),'zsl_order_cross_id_in_count_bins']=2\n tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count']>=20),'zsl_order_cross_id_in_count_bins']=3\n feature = feature.merge(tmp_crossid_agg,on='order_id',how='left')\n print('order cross_id count finish!')\n #######################order cross id & distance###############################\n feature['zsl_order_cross_is_highspeed'] = 0\n feature.loc[(feature['distance']>90000)&(feature['zsl_order_cross_id_in_count']<30),'zsl_order_cross_is_highspeed'] = 1\n print('order cross id & distance finish!')\n #######################order cross id & nextlinks centry###############################\n tmp = data_cross_split[data_cross_split['cross_id_in'].isin(dc)]\n tmp = tmp.groupby('order_id', as_index=False)\n tmp_linkid_centry_count = tmp['cross_id_in'].agg({'zsl_order_cross_id_in_centry_count': 'count'})\n feature = feature.merge(tmp_linkid_centry_count,on='order_id',how='left')\n feature['zsl_order_cross_id_in_centry_count'] = feature['zsl_order_cross_id_in_centry_count'].fillna(0)\n tmp = data_cross_split[data_cross_split['cross_id_out'].isin(dc)]\n tmp = tmp.groupby('order_id', as_index=False)\n tmp_linkid_centry_count = tmp['cross_id_out'].agg({'zsl_order_cross_id_out_centry_count': 'count'})\n feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')\n feature['zsl_order_cross_id_out_centry_count'] = feature['zsl_order_cross_id_out_centry_count'].fillna(0)\n print('order cross_id & nextlinks centry finish!')\n #######################order cross_time sum mean max min var std###############################\n tmp_linktime_agg = df['cross_time'].agg({'zsl_order_cross_time_sum': 'sum','zsl_order_cross_time_mean': 'mean',\n 'zsl_order_cross_time_max': 'max','zsl_order_cross_time_min': 'min',\n 'zsl_order_cross_time_var': 'var'})\n feature = feature.merge(tmp_linktime_agg,on='order_id',how='left')\n print('order cross_time sum mean max min var std finish!')\n #######################order distance/link_id_count###############################\n feature['zsl_distance_div_cross_id_count'] = feature['distance']*10/feature['zsl_order_cross_id_in_count']\n feature = feature.drop('distance', axis=1)\n print('order distance div link_id_count finish!')\n ###################trend fea#############################################\n ###################trend cross time#####################################\n groupby = data_cross_split.groupby(['order_id'])\n func = partial(trend_in_last_k_cross_id_time, periods=[2, 5, 10, 20,100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_cross_time_features, periods=[2, 5, 10, 20,100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_k_cross_time_interval, periods=[2, 5, 10, 20, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n print('trend cross time finish!')\n ####################nextlinks graph embedding#######################\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_idkey)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna(0)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_grapheb)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna('0')\n def replace_list(x):\n if isinstance(x, str):\n x = fill_list\n return x\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].apply(replace_list)\n cross_id_in_col = ['zsl_cross_id_in_eb{}'.format(i) for i in range(embedding_k)]\n agg_col = dict(zip(cross_id_in_col, ['mean'] * len(cross_id_in_col)))\n cross_id_in_array = np.array(data_cross_split.pop('cross_id_in').to_list())\n cross_id_in_array = pd.DataFrame(cross_id_in_array, columns=agg_col, dtype=np.float16)\n data_cross_split = pd.concat([data_cross_split, cross_id_in_array], axis=1)\n tmp = data_cross_split.groupby('order_id', as_index=False)\n tmp_crossidin_agg = tmp.agg(agg_col)\n feature = feature.merge(tmp_crossidin_agg, on='order_id', how='left')\n print('trend cross_id_in eb finish!')\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_idkey)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna(0)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_grapheb)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna('0')\n def replace_list(x):\n if isinstance(x, str):\n x = fill_list\n return x\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].apply(replace_list)\n cross_id_out_col = ['zsl_cross_id_out_eb{}'.format(i) for i in range(embedding_k)]\n agg_col = dict(zip(cross_id_out_col, ['mean'] * len(cross_id_out_col)))\n cross_id_out_array = np.array(data_cross_split.pop('cross_id_out').to_list())\n cross_id_out_array = pd.DataFrame(cross_id_out_array, columns=agg_col, dtype=np.float16)\n data_cross_split = pd.concat([data_cross_split, cross_id_out_array], axis=1)\n tmp = data_cross_split.groupby('order_id', as_index=False)\n tmp_crossidout_agg = tmp.agg(agg_col)\n feature = feature.merge(tmp_crossidout_agg, on='order_id', how='left')\n print('trend cross_id_out eb finish!')\n multipy_df = []\n multipy_col = []\n for col1, col2 in zip(cross_id_in_col, cross_id_out_col):\n tmp = feature[col1] * feature[col2]\n multipy_df.append(tmp)\n multipy_col.append(col1 + '_mul_' + col2)\n multipy_df = pd.concat(multipy_df, axis=1)\n multipy_df.columns = multipy_col\n feature = pd.concat([feature, multipy_df], axis=1)\n print('trend cross_id_out eb multipy finish!')\n feature.to_csv(root_path + 'feature/train/cross_fea_order_id_level_{}.csv'.format(data_time), index=False)\n del train\n gc.collect()\n\n test = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)\n test_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\n test_head['order_id'] = test_head['order_id'].astype(str)\n test_head['ata'] = test_head['ata'].astype(float)\n test_head['distance'] = test_head['distance'].astype(float)\n test_head['simple_eta'] = test_head['simple_eta'].astype(float)\n test_head['driver_id'] = test_head['driver_id'].astype(int)\n test_head['slice_id'] = test_head['slice_id'].astype(int)\n # 处理corss数据\n data_cross = test[[2]]\n data_cross['index'] = test_head.index\n data_cross['order_id'] = test_head['order_id']\n data_cross_split = data_cross[2].str.split(' ', expand=True).stack().to_frame()\n data_cross_split = data_cross_split.reset_index(level=1, drop=True).rename(columns={0: 'cross_info'})\n data_cross_split = data_cross[['index', 'order_id']].join(data_cross_split)\n data_cross_split[['cross_id', 'cross_time']] = data_cross_split['cross_info'].str.split(':', 2, expand=True)\n data_cross_split['cross_time'] = data_cross_split['cross_time'].astype(float)\n tmp_cross_id = data_cross_split['cross_id'].str.split('_', expand=True)\n tmp_cross_id.columns = ['cross_id_in', 'cross_id_out']\n data_cross_split = pd.concat([data_cross_split, tmp_cross_id], axis=1).drop(['cross_id', 'cross_info'], axis=1)\n data_cross_split['date_time'] = '20200901'\n data_cross_split = data_cross_split.drop('index', axis=1).reset_index(drop=True)\n print('preprocess finish!')\n print('start feature engineering')\n feature = test_head[['order_id', 'distance']]\n ###################static fea#############################################\n data_cross_split['zsl_cross_id_isnull'] = 0\n data_cross_split.loc[data_cross_split['cross_id_in'].isnull(), 'zsl_cross_id_isnull'] = 1\n data_cross_split.loc[data_cross_split['cross_id_in'].isnull(), 'cross_id_in'] = '-1'\n data_cross_split.loc[data_cross_split['cross_id_out'].isnull(), 'cross_id_out'] = '-1'\n #######################order cross_id count###############################\n df = data_cross_split.groupby('order_id', as_index=False)\n tmp_crossid_agg = df['cross_id_in'].agg({'zsl_order_cross_id_in_count': 'count'})\n tmp_crossid_agg['zsl_order_cross_id_in_count_bins'] = 0\n tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count'] >= 5) & (\n tmp_crossid_agg['zsl_order_cross_id_in_count'] < 10), 'zsl_order_cross_id_in_count_bins'] = 1\n tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count'] >= 10) & (\n tmp_crossid_agg['zsl_order_cross_id_in_count'] < 20), 'zsl_order_cross_id_in_count_bins'] = 2\n tmp_crossid_agg.loc[(tmp_crossid_agg['zsl_order_cross_id_in_count'] >= 20), 'zsl_order_cross_id_in_count_bins'] = 3\n feature = feature.merge(tmp_crossid_agg, on='order_id', how='left')\n print('order cross_id count finish!')\n #######################order cross id & distance###############################\n feature['zsl_order_cross_is_highspeed'] = 0\n feature.loc[(feature['distance'] > 90000) & (\n feature['zsl_order_cross_id_in_count'] < 30), 'zsl_order_cross_is_highspeed'] = 1\n print('order cross id & distance finish!')\n #######################order cross id & nextlinks centry###############################\n tmp = data_cross_split[data_cross_split['cross_id_in'].isin(dc)]\n tmp = tmp.groupby('order_id', as_index=False)\n tmp_linkid_centry_count = tmp['cross_id_in'].agg({'zsl_order_cross_id_in_centry_count': 'count'})\n feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')\n feature['zsl_order_cross_id_in_centry_count'] = feature['zsl_order_cross_id_in_centry_count'].fillna(0)\n tmp = data_cross_split[data_cross_split['cross_id_out'].isin(dc)]\n tmp = tmp.groupby('order_id', as_index=False)\n tmp_linkid_centry_count = tmp['cross_id_out'].agg({'zsl_order_cross_id_out_centry_count': 'count'})\n feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')\n feature['zsl_order_cross_id_out_centry_count'] = feature['zsl_order_cross_id_out_centry_count'].fillna(0)\n print('order cross_id & nextlinks centry finish!')\n #######################order cross_time sum mean max min var std###############################\n tmp_linktime_agg = df['cross_time'].agg({'zsl_order_cross_time_sum': 'sum', 'zsl_order_cross_time_mean': 'mean',\n 'zsl_order_cross_time_max': 'max', 'zsl_order_cross_time_min': 'min',\n 'zsl_order_cross_time_var': 'var'})\n feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')\n print('order cross_time sum mean max min var std finish!')\n #######################order distance/link_id_count###############################\n feature['zsl_distance_div_cross_id_count'] = feature['distance'] * 10 / feature['zsl_order_cross_id_in_count']\n feature = feature.drop('distance', axis=1)\n print('order distance div link_id_count finish!')\n ###################trend fea#############################################\n ###################trend cross time#####################################\n groupby = data_cross_split.groupby(['order_id'])\n func = partial(trend_in_last_k_cross_id_time, periods=[2, 5, 10, 20, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_cross_time_features, periods=[2, 5, 10, 20, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_k_cross_time_interval, periods=[2, 5, 10, 20, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n print('trend cross time finish!')\n ####################nextlinks graph embedding#######################\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_idkey)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna(0)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].astype(int)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].map(read_grapheb)\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].fillna('0')\n def replace_list(x):\n if isinstance(x, str):\n x = fill_list\n return x\n data_cross_split['cross_id_in'] = data_cross_split['cross_id_in'].apply(replace_list)\n cross_id_in_col = ['zsl_cross_id_in_eb{}'.format(i) for i in range(embedding_k)]\n agg_col = dict(zip(cross_id_in_col, ['mean'] * len(cross_id_in_col)))\n cross_id_in_array = np.array(data_cross_split.pop('cross_id_in').to_list())\n cross_id_in_array = pd.DataFrame(cross_id_in_array, columns=agg_col, dtype=np.float16)\n data_cross_split = pd.concat([data_cross_split, cross_id_in_array], axis=1)\n tmp = data_cross_split.groupby('order_id', as_index=False)\n tmp_crossidin_agg = tmp.agg(agg_col)\n feature = feature.merge(tmp_crossidin_agg, on='order_id', how='left')\n print('trend cross_id_in eb finish!')\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_idkey)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna(0)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].astype(int)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].map(read_grapheb)\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].fillna('0')\n def replace_list(x):\n if isinstance(x, str):\n x = fill_list\n return x\n data_cross_split['cross_id_out'] = data_cross_split['cross_id_out'].apply(replace_list)\n cross_id_out_col = ['zsl_cross_id_out_eb{}'.format(i) for i in range(embedding_k)]\n agg_col = dict(zip(cross_id_out_col, ['mean'] * len(cross_id_out_col)))\n cross_id_out_array = np.array(data_cross_split.pop('cross_id_out').to_list())\n cross_id_out_array = pd.DataFrame(cross_id_out_array, columns=agg_col, dtype=np.float16)\n data_cross_split = pd.concat([data_cross_split, cross_id_out_array], axis=1)\n tmp = data_cross_split.groupby('order_id', as_index=False)\n tmp_crossidout_agg = tmp.agg(agg_col)\n feature = feature.merge(tmp_crossidout_agg, on='order_id', how='left')\n print('trend cross_id_out eb finish!')\n multipy_df = []\n multipy_col = []\n for col1, col2 in zip(cross_id_in_col, cross_id_out_col):\n tmp = feature[col1] * feature[col2]\n multipy_df.append(tmp)\n multipy_col.append(col1 + '_mul_' + col2)\n multipy_df = pd.concat(multipy_df, axis=1)\n multipy_df.columns = multipy_col\n feature = pd.concat([feature, multipy_df], axis=1)\n print('trend cross_id_out eb multipy finish!')\n feature.to_csv(root_path + 'feature/test/cross_fea_order_id_level_20200901.csv', index=False)\n"
},
{
"alpha_fraction": 0.544542670249939,
"alphanum_fraction": 0.7167173027992249,
"avg_line_length": 20.414793014526367,
"blob_id": "41cf8307136c63b91cced11c1f495887ed3b619b",
"content_id": "5689d54087299ce4103863f96bc52f5ce0326492",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 67665,
"license_type": "permissive",
"max_line_length": 286,
"num_lines": 1690,
"path": "/必备数学基础.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 必备数学基础\n\n### 高等数学基础\n\n#### 函数\n\n> **WHAT**:后面基本都是用函数,这里先理解一下函数的概念\n\n**函数的定义**:\n\n- y = f(x) 其中x是自变量,y是因变量。y随着x变化\n\n**几种特性**:\n\n奇偶性、周期性、单调性(如下图)\n\n\n\n**极限**:\n\n- 按照一定次数排列的数:x1,x2,...,xn,其中xn叫做通项\n- 对于数列{xn},当n无限增大时,其通项无限接近于一个常数A,则称该数列以A为极限或称数列收敛于A。\n\n**导数**:\n\n- 都有对应的结果,不用死记硬背,查就行了,如(C)' = 0 或者(sin x)' = cos x\n\n\n\n#### 方向导数(引出梯度)\n\n> 在函数定义域的内点,对某一*方向*求导得到的*导数*。\n>\n> 常规数学中,所有问题都有一个解。而机器学习当中,求解很难或者没有解,我们只能不断逼近这个最优解。\n\n**问题一**:蚂蚁沿着什么方向跑路不被火烧,能活下来(二维平面)\n\n\n\n> 蚂蚁沿着任意方向都可以活,最优的是沿着对角方向L,z是函数变化,也就是图中的φ。\n\n**三维平面的方向导数公式**:\n\n\n\n\n\n**求一个方向导数具体的值**:\n\n求函数在点P(1,0)处,沿着从点P(1,0)到点Q(2,-1)的方向的方向导数。\n\n\n\n\n\n所求方向导数\n\n\n\n#### 梯度\n\n> **WHAT**:简而言之,就是找到函数在某点沿着哪个梯度方向变化最大(小),也就是怎样的方向对数据逼近所需要的值最好。\n>\n> 是一个向量(矢量),表示某一函数在该点处的方向导数沿着该方向取得最大值,即函数在该点处沿着该方向(此*梯度*的方向)变化最快,变化率最大(为该*梯度*的模)。\n\n函数:z = f(x,y)在平面域内具有连续的一阶偏导数,对于其中每个点P(x,y)都有向量则其称为函数点P的梯度。\n\n\n\n是方向L上的单位向量\n\n\n\n\n\n> 根据上面的梯度导数,和方向导数的区别就在多了个*cosθ*,*θ*充当梯度和方向导数之间的关系\n\n只有当才有最大值\n\n函数在某点的梯度是一个向量,它的方向与方向导数最大值取得的方向一致。\n\n其大小正好是最大的方向导数\n\n\n\n> 注意,只有*θ*=0,*cos*导数才能=1,梯度才能取得最大值,也就是那个方向。而沿着反方向就是最小值也就是梯度下降。\n\n**求一个具体值,最大梯度方向和最小梯度方向**:\n\n设求grad u,并求在点M(0,1,-1)处方向导数的最大(小)值\n\n\n\n\n\n\n\n> 注:得出的结果(-1,0,2),求解:((-1^2) + (0^2) + (-2^2)) = √5,前面都是x的平方,所以结果也需要开根号。\n\n\n\n### 微积分\n\n#### 微积分基本理论\n\n> **WHAT**:前面说到,机器学习当中,求解很难或者没有解,而微积分也是一个用简单的方式,求一个与实际情况最接近的答案。\n>\n> 很多的微分积起来\n\n如何求A面积的值\n\n\n\n**以直代曲**:\n\n- 对于矩形,我们可以轻松求得其面积,能否用矩形代替曲线形状呢?\n\n\n- 应该用多少个矩形来代替?\n\n\n\n\n> 越小的矩形,越覆盖,然后求每个矩形的面积。\n\n**面积的由来**:\n\n- 在ab之间插入若干个点,这样就得到n个小区间。\n- 每个小矩形面积为:近似得到曲线面积\n- 当分割无限加细,每个小区间的最大长度为λ,此时λ → 0\n- 曲边面积:\n\n\n\n> 注意每个小区间的最大长度为λ,而λ无限接近于0时,那么曲边的面积我们就可以得出,当然这里的近似表达是极限,无限接近的极限。\n\n**求和**:\n\n我们需要尽可能的将每一个矩形的底边无穷小\n\n\n\n莱布尼茨为了体现求和的感觉,把S拉长了,简写成\n\n\n\n> 将上面的所有矩阵求和,∫ = sum,求和的意思\n\n**定积分**:\n\n当时,总和S总数趋于确定的极限l,则称极限l为函数f(x)在曲线[a,b]上的定积分\n\n\n\n\n\n### 泰勒公式\n\n> **what**:用简单、熟悉的多项式来近似替代复杂的函数。\n>\n> 一个概念可以自己去找找,需要就找我,我再把内容加上\n\n\n\n### 线性代数基础\n\n#### 矩阵和特征\n\n> **WHAT**:人工智能领域,数据基本是矩阵形式,而矩阵的每列(一般是除开首列),称为特征\n\n**矩阵**:\n\n> 拿到数据后,数据就长如下样子,有行有列\n\n\n\n> 左图√表示A可以到B和C,如右上图,再把√号改成0/1以存储在数据里面,就如右下图\n\n**几种特别的矩阵**:\n\n\n\n> 上三角部分有值,和下三角部分有值\n\n\n\n> 对角阵:对角有值且可以是任意值,单位矩阵:对角有值且相同\n\n\n\n> 同型矩阵:行列相同。矩阵相等:行列相同且里面的值一样\n\n#### 向量内积\n\n- 设有n维向量:\n\n- [x, y] = x1 y1 + x2 y2 + ... + xn yn,此时我们就把[x,y]叫做向量的内积。\n- \n - 对称性:[x, y] = [y, x]\n - 线性性质:[λx, y] = λ[x, y], [x + y, z] = [x, z] + [y, z]\n\n#### SVD矩阵分解\n\n> **WHAT**:为了让数据的呈现更好,且不破坏数据的原始表达\n\n数据行列可能很大,如电商行业100万客户(行),有1万的商品(特征),用一组数据表达就是\n\n| 客户ID | 商品1 | 商品2 | ... | 商品1万 |\n| -------- | ----------------- | ----- | ---- | ------- |\n| xxx1 | 1(表示买过一次) | 0 | ... | 5 |\n| xxx2 | 0 | 1 | ... | 0 |\n| ... | 5 | 10 | ... | 0 |\n| xxx100万 | ... | ... | ... | ... |\n\n那么来一个客户,就是直接多1万列表示,这样的数据是非常稀疏的,我们可以分解成A表100万客户,100个特征,而这100个特征对应这那B表的1万个商品,也就是一个表变成A表和B表,且两者关联。\n\n这就需要用到SVD矩阵。\n\n\n\n### 随机变量\n\n#### 离散和连续型数据\n\n\n\n> 离散型是有限多个的,比如10个台阶,只可能是其中的一个台阶,一个确定的结果。\n>\n> 连续型则可能是任意的值,没办法确定是哪个台阶。\n\n**离散型随机变量概率分布**\n\n- 找到离散型随机变量X的所有可能取值\n\n- 得到离散型随机变量取这些值的概率\n\n \n\n 为离散型随机变量的概率函数\n\n**连续型随机变量概率分布**\n\n- 密度:一个物体,如果问其中一个点的质量是多少?这该怎么求?\n\n 由于这个点实在太小了,那么质量就为0了,但是其中的一大块是由\n\n 很多个点组成的,这时我们就可以根据密度来求其质量了\n\n- X为连续随机变量,X在任意区间(a,b]上的概率可以表示为:\n\n 其中f(x)就叫做X的概率密度函数,也可以简单叫做密度\n\n> 还有一种方法是把每个值划分在不同区间,变成离散型,但如果有新数据进来就要再划分区间导致区间越来越多。\n\n#### 简单随机抽样\n\n抽取的样本满足两点\n\n1. 样本X1,X2...Xn是相互独立的随机变量。\n\n2. 样本X1,X2...Xn与总体X同分布。\n\n \n\n#### 极大似然估计\n\n> **WHAT**:找到最有可能的结果\n\n1. 构造似然函数:L(θ)\n\n2. 对似然函数取对数:lnL(θ)\n\n > 做log后,logAB = logA + logB,加法更好求\n\n3. 求偏导\n\n4. 求解得到 θ 值\n\n \n\n> 第一步构造函数;第二步取对数,对数后的值容易取且极值点还是那个位置;第三步求偏导;得到θ\n\n**求一个具体的值**:\n\n设 X 服从参数 λ(λ>0) 的泊松分布,x1,x2,...,xn 是来自 X 的一个样本值,求λ的极大似然估计值\n\n- 因为X的分布律为\n- 所以 λ 的似然函数为\n- \n- 令\n- 解得 λ 的极大似然估计值为 \n\n\n\n### 概率论基础\n\n#### 概率与频率\n\n- 抛硬币和王者游戏击杀数,这些都是随机的\n- 其特点:可以在相同条件下重复执行、事先知道可能出现的结果、开始前并不知道这一次的结果\n- 随机试验E的所有结果构成的集合称为E的样本空间 S = {e}\n - 抛硬币:S = {正面,反面}\n - 击杀数:S = {0,1,2,...,n}\n\n**频率与概率**\n\n- A在这N次试验中发生的频率:,其中发生的次数(频数);n—总试验次数。\n- 的稳定值P定义为A的概率P(A) = p\n\n- 次数越多则结果越稳定\n\n#### 古典概型\n\n- 定义:试验E中样本点是有限的,出现每一样本点的概率是相同。\n\n P(A) = A所包含的样本点数 / S中的样本点数\n\n- 一袋中有8个球,编号为1 - 8,其中1 - 3号为红球,4 - 8 为黄球,设摸到每一球的可能性相等,从中随机摸一球,记A={摸到红球},求P(A)。\n\n - S={1,2,...,8}\n - A={1,2,3} => P(A) = 3/8\n\n#### 条件概率\n\n> **WHAT**:在一定条件下的某个事件发生的概率\n\n- 有个不放回的抽奖,一共三种可能性,两个不中奖一个中奖,也就是3个人抽奖,必有一个中奖,所有可能为{YNN, NYN, NNY},N表示不中间,Y表示中间\n- 第一名没中则:A = {NYN, NNY},第三中的概率\n- 样本空间变了,概率也变了\n\n#### 独立性\n\n> **WHAT**:两个或多个随机事件的发生概率不相互影响。\n\n例题:\n\n甲、乙两人同时向一个目标射击,甲击中率为0.8,乙击中率为0.7,求目标被击中的概率。\n\n设A={甲击中},B={乙击中},C={目标被击中}\n\n则:C = A ∪ B,P(C) = P(A) + P(B) - P(AB)\n\n∵ 甲、乙同时射击,其结果互不影响,\n\n∴ A, B相互独立\n\nP(C) = 0.7+0.8-0.56 = 0.94\n\n#### 二维随机变量\n\n> **WHAT**:关心两个指标并了解其相互关系\n\n如:为了了解学生的身体状况,观察学生的身高(X)和体重(Y)及两者的相互关系\n\n- 有二维离散型随机变量\n- 有二维连续型随机变量\n\n#### 期望\n\n> **WHAT**:期望达到什么,反映了随机变量的取值水平\n\n- 离散型随机变量X的分布律为:\n\n 若级数绝对收敛,则称其为随机变量X的数学期望,\n\n > Xk是每种情况,Pk是每种情况对应的概率\n\n - 投骰子的期望则是1 / (1/6) + 2 / (1/6) + ... + 6 / (1/6) = 21 / 6 = 3.5\n\n- 连续型随机变量X的概率密度为f(x),若积分绝对收敛,则称积分的值为随机变量X的数学期望。\n\n - 随机变量X满足于均匀分布,求其期望。\n\n =>\n\n \n\n**方差**\n\n> 衡量随机变量相对于数学期望的分散程度\n\n\n\n#### 贝叶斯拼写纠错\n\n问题:我们看到用户输入了一个不在字典中的单词,我们需要去猜测用户到底想输入的是什么\n\n- P(猜测想输入的单词|用户实际输入的单词)\n- 用户实际输入的单词记为D(D代表Data,即观测数据)\n- 猜测1:P(h1|D),猜测2:P(h2|D),猜测3:P(h3|D) ...\n- P(h|D) = P(h) * P(D|h) / P(D)\n\n> p(h) 在字典里某个词出现的次数占总体的比(先验概率)\n>\n> P(D|h)指输入一个词,输错的概率多大;\n>\n> P(D)客户输入的值D,可以约掉\n\n贝叶斯方法计算:P(h|D) = P(h) * P(D|h) ,P(h)是特定猜测的先验概率。\n\n比如用户输入tlp,到底是top还是tip?当最大似然不能作出决定性判断时(可能两边都是一半可能性),这是先验概率就可以插手给出指示,告诉我们,一般来说top出现的程度要高许多,所以他更可能想打的是top。\n\n\n\n#### 垃圾邮件过滤\n\n模型比较理论\n\n- 最大似然:最符合观测数据的(即P(D|h)最大的)最有优势\n- 奥卡姆剃刀:P(h)较大的模型有较大的优势\n- 抛一枚硬币,观察到的是“字”,根据最大似然估计的理念,我们应该猜测这枚硬币抛出“字”的概率是1,因为这个才能最大化P(D|h)的猜测\n\n实例:\n\n- 问题:给定一封邮件,判定它是否属于垃圾邮件\n\n D来表示这封邮件,注意D是由N个单词组成。\n\n 我们用h+表示垃圾邮件,h-表示正常邮件\n\n- P(h+|D) = P(h+) * P(D|h+) / P(D)\n\n P(h-|D) = P(h-) * P(D|h-) / P(D)\n\n > P(h+)是先验概率,只需要计算一个邮件库垃圾邮件和正常邮件的比例;\n >\n > P(D|h+) 垃圾邮件中,目前这封邮件里面的词有多少个相似。D里面含有N个单词d1,d2,d3,P(D|h+) =P(d1,d2,...,dn|h+),扩展:P(d1|h+) × P(d2|d1,h+) × P(d3|d2,d1, h+)× ...,垃圾邮件第一个词是d1的概率 × 垃圾邮件第一个词是d1且第二个词是d2的概率 × 垃圾邮件第一个词是d1且第二个词是d2且第三个词是d3的概率...\n\n- 上面的公式太麻烦了,例用朴素贝叶斯简化,朴素贝叶斯假设特征之间是独立,互不影响的。这么假设完d1,d2,d3完全没关系了,\n\n 简化为P(d1|h+) × P(d2|h+) × P(d3|h+) × ...\n\n- 对于P(d1|h+) × P(d2|h+) × P(d3|h+) × ... 只要统计di这个词在垃圾邮件中出现的概率。如:全部100封邮件中,di个词出现的概率\n\n- 再回到最上面 P(h+|D) = P(h+) × P(D|h+) / P(D),P(D)正常异常相同,一起省略,P(h+)是先验概率,P(D|h+) 是该封信的每个词在垃圾邮件中出现的概率,这样就可以得到结果了\n\n\n\n### 数据科学的几种分布\n\n#### 正态分布\n\n> 代表宇宙中大多数的运转状态,大量的随机变量被证明是正态分布的。\n\n若随机变量X服从一个数学期望为μ、方差为σ^2的正态分布,记为N(μ, σ^2)。其概率密度函数为正态分布的期望值μ决定了其位置,其标准差σ决定了分别的幅度。当μ = 0,σ = 1时的正态分布是标准正态分布。\n\n- 公式\n\n μ是均值\n\n σ是标准差\n\n\n\n#### 二项式分布\n\n> 结果只有两个\n\n投篮只有进球或者不进球,进球概率可能是0.5也可能不是,而不进球概率 = 1 - 进球概率。\n\n二项式得属性包括:\n\n- 每个试验都是独立的。\n- 试验中的结果只有两种可能:进球和不进球。\n- 总共进行了n次相同得试验。\n- 所有试验进球和不进球的概率是相同的。\n\n公式\n\nN * p表示分布的均值\n\n#### 泊松分布\n\n适用于在随机时间和空间上发生事件的情况,其中,我们只关注事件发生的次数,如:\n\n- 医院在一天内录制的紧急电话的数量\n- 某个地区在一天内报告的失窃的数量\n- 在特定城市上报自杀的人数\n\n当以下假设有效时,则称为泊松分布\n\n- 任何一个成功的事件都不应该影响另一个成功的事件\n- 在短时间内成功的概率必须等于在更长时间内成功的概率\n- 时间间隔很小时,在给间隔时间内成功的概率趋向于零\n\n泊松分布中使用的符号\n\n- λ是事件发生的速率\n- t是时间间隔的长\n- X是该时间间隔内的事件数\n- 其中,X称为泊松随机变量,X的概率分布称为泊松分布\n- 令μ表示长度为t的间隔中的平均事件数。μ = λ * t\n\n公式\n\n**求一个具体的值**\n\n- 已知平均每小时出生3个婴儿,请问接下来的两小时,一个婴儿都不出生的概率?\n\n 描述某段时间内,事件具体的发生概率\n\n \n\n- P表示概率,N表示某种函数关系,t表示时间,n表示数量,1小时内出生3个婴儿的概率,就表示为P(N(1)=3),λ是事件的频率。\n\n \n\n#### 均匀分布\n\n对于骰子来说,结果是1到6,得到任何一个结果的概率是相等的,这就是均匀分布的基础。与伯努利分布不同,均匀分布的所有看你结果的n个数都是相等的。\n\n如果变量X是均匀分布的,则密度曲线可以表示为: \n\n均匀分布的曲线:\n\n\n\n均与分布曲线是一个矩形,又称为矩形分布。\n\n**求一个具体的值**:\n\n花店每天销售的花束数量是均匀分布的,最多40,最少为10,求日销量在15到30之间的概率。\n\n日销量在15到30之间的概率为(30-15)*(1/(40-10)) = 0.5\n\n也可求日销量大于20的概率为 0.667\n\n#### 卡方分布\n\n> 通过小数量的样本容量取预估总体容量的分布情况\n\n卡方验证统计样本的实际观测值与理论推断值之间的偏离程度\n\n公式\n\nwhere \n\n#### Beta分布\n\n> 一个概率的概率分布,当不知道一个东西的具体概率时,可以给出所有概率的可能性大小\n\n举一个简单的例子,熟悉棒球运动的都知道有一个指标就是棒球击球率(batting average),就是用一个运动员击中的球数除以击球的总数,我们一般认为0.266是正常水平的击球率,而如果击球率高达0.3就被认为是非常优秀的。\n\n现在有一个棒球运动员,我们希望能够预测他在这一赛季中的棒球击球率是多少。你可能就会直接计算棒球击球率,用击中的数除以击球数,但是如果这个棒球运动员只打了一次,而且还命中了,那么他就击球率就是100%了,这显然是不合理的,因为根据棒球的历史信息,我们知道这个击球率应该是0.215到0.36之间才对。\n\n最好的方法来表示这些经验(在统计中称为先验信息)就是用beta分布,这表示在我们没有看到这个运动员打球之前,我们就有了一个大概的范围。beta分布的定义域是(0,1)这就跟概率的范围是一样的。\n\n接下来我们将这些先验信息转换为beta分布的参数,我们知道一个击球率应该是平均0.27左右,而他的范围是0.21到0.35,那么根据这个信息,我们可以取α=81,β=219。\n\n之所以取这两个参数是因为:\n\n- beta分布的均值是从图中可以看到分布主要落在(0.2,0.35)间,这是经验中得出的合理范围\n- 在这个例子中,x轴就表示各个击球率的取值,x对应的y值就是这个击球率对应的概率。也就是beta分布可以看作一个概率的概率分布\n\n\n\n- α和β是一开始的参数,在这里是81和219。当α增加了1(击中一次)。β没有增加(没有漏球)。这就是我们新的beta分布Beta(81+1,219)。\n- 当得到了更多的数据,假设一共打了300次,其中击中100,200次没击中,那么新的分布就是Beta(81+100,219+200)\n\n\n\n根据公式 α / (α+β) = (82+100) / (82+100+219+200) = 0.303,命中率提升了,蓝色曲线右移。\n\n\n\n### 核函数\n\n#### 核函数的目的\n\n> 最基本的出发点是升维,使得数据更好一些,更多一些\n\n核函数是SVM支持向量机当中最重要的函数\n\n出发点\n\n- 如果数据有足够多的可利用的信息,那么可以直接做想要的事情。但是现在没有那么多的信息,我们可不可以在数学上进行一些投机呢?\n\n- 低维(比如我只知道一个人的年龄,性别,那我们能对他有更多了解吗)\n\n 高维(比如我知道从他出生开始,做过哪些事,赚过哪些钱等)\n\n- 如果我们对数据更好的了解,得到的结果也会更好(机器也是一样)\n\n\n\n> 上图中,我们很难说画一个圈来区分红点和绿点,一般画直线或者曲线,如果我们把二维转换成三维,我们只需要一个面就可以切分开了,低维很难解决的问题,高维能很容易解决。核函数就是解决这么一个问题\n\n低维的数据变成高维后,数据量和计算量也会有所增加,引出下面的解决方法。\n\n#### 线性核函数\n\n- Linear核函数对数据不做任何变换。\n\n- 何时用,特征已经比较丰富,样本数据量巨大,需要进行实时得出结果的问题\n - 越复杂的模型,针对的数据集越窄,泛化能力越差,且处理速度很慢,当然越复杂也代表着越强大。\n- 不需要设置任何参数,直接就可以用\n\n#### 多项式核函数\n\n- 需要给定3个参数\n\n > Q越大,越复杂\n\n- 一般情况下2次的更常见\n\n- γ(gama)对内积进行缩放,ζ(zeta)控制常数项,Q控制高次项。\n\n 其特例就是线性核函数\n\n#### 核函数实例\n\n还是先从一个小例子来阐述问题。假设我们有俩个数据,x=(x1,x2,x3);y=(y1,y2,y3),此时在3D空间已经不能对其进行线性划分,那么我们通过一个函数将数据映射到更高维的空间,比如9维的话,那么(x)=(x1x2,x1x2,x1x3,x2x1,X2x2,x2x3,x3x1,x3x2,x3x3),由于需要计算内积,所以在新的数据在9维空间,需要计算<fx),f(y)>的内积,需要花费O(n^2)。\n\n再具体点,令x = (1,2,3); y = (4,5,6), 那么f(x) = (1,2,3,2,4,6,3,6,9),计算方式如上的x1x2内积相乘,f(y) = (16,20,24,20,25,36,24,30,36),(此时<f(x),fy)>=16+40+72+40+100+180+72+180+324=1024。\n\n似乎还能计算,但是如果将维数扩大到一个非常大数时候,计算起来可就不是一丁点问题了。\n\n但是发现,K(x,y)=(<x,y>)^2\n\nK(x, y) = (4+10+18)^2 = 32^2 = 1024\n\n俩者相等,`K(x,y)=(<x,y>)^2=<f(x),f(y)>`,但是K(x,y)计算起来却比<f(x),f(y)>简单的多\n\n也就是说只要用K(x,y)来计算,,效果和<f(x),f(y)>是一样的,但是计算效率却大幅度提高了,如:K(x,y)是O(n),而<f(x),f(y)>是0(n^2)。\n\n所以使用核函数的好处就是,可以在一个低维空间去完成高维度(或者无限维度)样本内积的计算,比如K(xy)=(4+10+18)^2的3D空间对比<f(x),f(y)> = 16+40+72+40+100+180+72+180+324的9D空间。\n\n#### 高斯核函数\n\n> 最常用的,最好用的核函数\n\n一维的高斯\n\n\n\n二维的高斯\n\n\n\n公式:\n\n- 看起来像两个样本点之间的距离的度量,如果X和Y很相似,那么结果也就是1,如果不相似那就是0。\n\n- 这样做的好处,特征多了无穷个,得到无穷维。\n\n \n\n#### 参数的影响\n\n高斯核函数看起来不错,但是它对参数是极其敏感的,效果差异也很大\n\nσ^2 = 0.5\n\n> σ越小,顶部越尖,我们计算的是样本到样本点的距离,也就是尖尖到底部的距离,底部到顶部变化越快,层次更分明,那么特征也就越明显,识别的越容易,风险也会比较高\n\nσ^2 = 3\n\n> σ越大,层次越平衡,也就是大家的特征都差不多,那么识别也越不容易,但是风险也相对低\n\n决策边界如下图,σ越小,切分越厉害,越容易过拟合\n\n\n\n> 原σ在下面,注意上面的公式||x - x'||^2 / 2σ^2,这里移上去了,所以前面加上负号,第一个是负1,第二个是负10,第三个是负100\n\n\n\n### 熵和激活函数\n\n#### 熵的概念\n\n- 物体内部的混乱程度。(一件事发生的不确定性)\n- \n\n- 所有的概率值都在0-1之间,那么最终H(X)必然也是一个正数\n\n#### 熵的大小意味着\n\n- 假如有100个商品,那么选到某个商品的概率非常低,而如果商品只有几个,那么选到某个商品的概率非常高\n- 如公式,商品越多,所有log后的值就越高,且公式是求和,那么值就更大\n\n想象一个分类任务,我们希望得到如下的那种结果\n\n- A[1,1,1,1,1,1,1,1,1,1,1]\n- B[1,2,3,4,5,3,4,2,2,1,1]\n\n显然A集合才是我们希望得到的结果,它的熵值表现是非常小的。\n\n比如我们手上有一份数据,有两个指标性别和资产,判断是否给该用户贷款,性别和资产分组完后,如果资产熵值小,那么我们可以认为资产对是否可以贷款的影响更重要。\n\n#### 激活函数(Sigmoid函数)\n\n- Sigmoid是常用的非线性激活函数\n- 能够把连续值压缩到0-1区间,不断的下降\n- 缺点:杀死梯度,非原点中心对称\n\n\n\n解决当正负样本不好分类的时候,无法用线性分割,那么用一个概率值去定义一个样本是否是正负,比如大于0.5定义为正,否则是负。\n\n又如5分类任务时,我们可以输出成以下形式\n\n| 样本 | 类别1 | 类别2 | 类别3 | 类别4 | 类别5 |\n| ---- | ----- | ----- | ----- | ----- | ----- |\n| A | 0.1 | 0.9 | 0.3 | 0.6 | 0.1 |\n| B | 0.9 | 0.1 | 0.1 | 0.1 | 0.1 |\n| C | 0.1 | 0.1 | 0.1 | 0.1 | 0.6 |\n\n如上所示,我们可以认为A是类别2和类别4的,B是类别1的,C是类别5的。\n\n#### 激活函数的问题\n\n**杀死梯度**:\n\n之前我们计算梯度下降时,当值无限接近于边缘,如X轴,那么梯度就为0,也就没办法应用,特别是神经网络是串行的,结果是累乘的求梯度,这样其中一个为0时,那么乘以0就全部结果为0\n\n**非原点中心对称**:\n\n> 原点对称是0的位置\n\nSigmoid没有负数,都是大于0的,当梯度更新的时候,要么全为负要么全为正,不会有正有负,也就是一根筋的往一个方向跑,优化更新会产生阶梯式\n\n\n\n那么更新就会慢,收敛效果一般\n\n**Tanh函数**\n\n- 原点中心对称\n- 输出在-1到1直接\n- 梯度消失现象依然存在,会贴近边缘\n\n\n\n**ReLU函数**\n\n- 公司简单使用\n- 解决梯度消失现象,计算速度更快\n\n\n\n> 没有了梯度为0的地方\n\n\n\n> 会直接把小于0的杀死\n\n\n\n> 虚线是其它函数,实现是relu,可以看到relu函数收敛的非常快\n\n但是上面还存在一个问题,就是杀死小于0的神经元,那么这些神经元就不会再更新,它可能会存在作用,所以改进后\n\n**Leaky ReLU**\n\n- 解决了Rulu会杀死一部分神经元的情况\n\n\n\n> 可以看到max里面最小值是0.01x,也就是不会直接杀死\n\n\n\n### 回归分析\n\n#### 概述\n\n> 相关分析是研究两个或多个以上的变量之间相关程度及大小的一种统计方法。\n>\n> 回归分析是寻找存在相关关系的变量间的数学表达式,并进行统计推断的一种统计方法。\n\n在对回归分析进行分类时,主要有两种分析方式:\n\n- 根据变量的数目,可以分类一元回归、多元回归\n- 根据自变量与因变量的表现形式,分为线性和非线性\n\n所以回归分析包括四个方向:一元线性回归分析、多元线性回归分析、一元非线性回归分析、多元非线性回归分析\n\n\n\n> 曲线上的点,叫估计值(预测值),观测值也是真实值,观测值和估计值之间的差异叫残差。我们希望这个残差越小越好\n\n回归分析的一般步骤:\n\n- 确认回归方程中解释变量和被解释变量\n- 确定回归模型建立回归方程\n- 对回归方程进行各种校验\n- 利用回归方程进行预测\n\n#### 回归方程的定义\n\n- 因变量:被预测或被解释的变量,用y表示\n- 自变量:预测或解释因变量的一个或多个变量,用x表示\n- 对于具有线性关系的两个变量,可以用一个方程来表示它们之间的线性关系\n- 描述因变量y如何以来自变量x和误差项ε的方程称为回归模型。\n\n对于只涉及一个变量的一元线性回归模型可表示为:\n\n\n\n- y因变量\n- x自变量\n- β0表示截距\n- β1表示斜率\n- ε表示误差项,反映除x和y之间的线性关系外的随机因素对y的影响\n\n如何求出β0和β1\n\n一元例子:\n\n- 人均收入是否会影响人均食品消费支出\n- 贷款余额是否影响到不良贷款\n- 航班正点率是否对顾客投诉次数有显著影响\n\n**回归方程**\n\n描述因变量y的期望值入喝依赖于自变量x的方程称为回归方程。根据一元线性回归模型的假设,可以得到它的回归方程为:\n\n\n\n- 如果回归方程中的参数已知,对于一个给定的x值,利用回归方程就能计算出y的期望值\n- 用样本统计量代替回归方程中的未知参数,就得到估计的回归方程,简称回归直线\n\n#### 误差项的定义\n\n- 误差ε是独立且具有相同的分布,并服从均值为0方差为θ^2的高斯分布\n- 独立:张三和李四一起来贷款,他俩没关系\n- 同分布:他俩都是来同一银行,即我们假定的银行\n- 高斯分布:银行可能会多给,也可能会少给,但是绝大多数情况下浮动不会太大,极小情况下浮动大,符合正常情况\n\n#### 最小二乘法推导与求解\n\n- 预测值与误差\n\n- 由于误差服从高斯分布\n\n- 将(1)式代入(2)式:\n\n > 什么样的θ和x相乘后,得到最相近或者相同的y\n\n- 似然函数:\n\n 解释:什么样的参数和我们的数据组合后恰好是真实值\n\n- 对数似然:\n\n 解释:乘法难解,加法就容易了,对数里乘法可以转换成加法\n\n- 展开化简:\n\n \n\n- 目标:让似然函数越大越好\n\n \n\n 最小二乘法\n\n**参数的最小二乘法估计**\n\n对于回归直线,关键在于求解参数,常用高斯提出的最小二乘法,它使因变量的观察值y与估计值之间的离差平方和达到最小来求解。\n\n\n\n\n\n展开得:\n\n\n\n求偏导可得:\n\n\n\n求解:\n\n\n\n#### 回归方程求解小例子\n\n实例:70年代世界制造业总产量与世界制成品总出口量的变化关系如表:\n\n| 年度 | 总产量年增长率(%) x | 总出口量年增长率(%) y |\n| ---- | ------------------- | --------------------- |\n| 1970 | 4.0 | 8.5 |\n| 1971 | 4.0 | 8.0 |\n| 1972 | 8.5 | 10.5 |\n| 1973 | 9.5 | 15.5 |\n| 1974 | 3.0 | 8.5 |\n| 1975 | -1.0 | -4.5 |\n| 1976 | 8.0 | 13.5 |\n| 1977 | 5.0 | 5.0 |\n| 1978 | 5.0 | 6.0 |\n| 1979 | 4.0 | 7.0 |\n\n\n\n利用回归直线进行估计和预测:\n\n- 点估计:利用估计的回归方程,对于x的某一特定的值,求出y的一个估计值,就是点估计\n- 区间估计:利用估计的回归方程,对于x的一个特定值,求出y的一个估计值的区间就是区间估计\n\n**估计标准误差的计算**\n\n为了度量回归方程的可靠性,通常计算估计标准误差。它度量观察值回绕着回归直线的变化程度或分散程度。\n\n估计平均误差:\n\n\n\n- 公式中根号内的分母是n-2,而不是n,这是由于自由度为n-2。\n- 估计标准误差越大,则数据点围绕回归直线的分散程度就越大,回归方程的代表性越小。\n- 估计标准误差越小,则数据点围绕回归直线的分散程度越小,回归方程的代表愈大,其可靠性越高。\n\n**置信区间估计**\n\n\n\n**在1—a置信水平下预测区间为**:\n\n\n\n**求一个具体的值**\n\n某企业从有关资料中发现广告投入和产品销售有密切的关系。近年该企业广告费和销售额资料如下表,若2003年广告费为120万,请用医院线性回归求2003年产品销售额的置信区间和预测区间(α=0.05)\n\n| 年份 | 广告费x(万元) | 销售额y(百万元) |\n| ---- | --------------- | ----------------- |\n| 1994 | 35 | 18 |\n| 1995 | 52 | 25 |\n| 1996 | 60 | 30 |\n| 1997 | 72 | 38 |\n| 1998 | 85 | 41 |\n| 1999 | 80 | 44 |\n| 2000 | 95 | 49 |\n| 2001 | 100 | 52 |\n| 2002 | 105 | 60 |\n\n求解如下\n\n- \n- =-3.65 + 0.57 ×120 = 64.75\n- \n- =64.75±2.365 × 2.43 × 0.743=64.75 ± 4.2699\n- =64.72 ± 2.365 × 2.43 ×1.2459 = 64.75 ± 4.3516\n\n结果图\n\n\n\n影响区间宽度的因素:\n\n- 置信水平(1-a),区间宽度随置信水平的增大而增大\n- 数据的离散程度Se,区间宽度随离程度的增大而增大样本容量\n- 区间宽度随样本容量的增大而减小\n- X0与X均值之间的差异,随着差异程度的增大而增大\n\n#### 回归直线拟合优度\n\n回归直线与各观测点的接近程度称为回归直线对数据的拟合优度\n\n- 总平方和(SST):\n\n \n\n- 回归平方和(SSR):\n\n \n\n- 残差平方和(SSE):\n\n \n\n总平方和可以分解为回归平方和、残差平方和两部分:SST = SSR + SSE\n\n- 总平方和(SST),反映因变量的 n 个观察值与其均值的总离差\n- 回归平方和(SSR),反映了y的总变差中,由于x与y之间的线性关系引起的y的变化部分\n- 残差平方和(SSE),反映了除了x对y的线性影响之外的其他因素对y变差的作用,是不能由回归直线来解释的y的变差部分\n\n**判定系数**\n\n回归平方和占总平方和的比例,用R^2表示,其值在0到1之间\n\n- R^2 == 0:说明y的变好与x无关,x完全无助于解释y的变差\n- R^2 == 1:说明残差平方和为0,拟合是完全的,y的变化只与x有关\n\n\n\n\n\n**显著性校验**\n\n著性检验的主要目的是根据所建立的估计方程用自变量x来估计或预测因变量y的取值。当建立了估计方程后,还不能马上进行估计或预测,因为该估计方程是根据样本数据得到的,它是否真实的反映了变量x和y之间的关系,则需要通过检验后才能证实根。\n\n据样本数据拟合回归方程时,实际上就已经假定变量x与y之间存在着线性关系,并假定误差项是一个服从正态分布的随机变量,且具有相同的方差。但这些假设是否成立需要检验\n\n显著性检验包括两方面:\n\n- 线性关系检验\n- 回归系数检验\n\n**线性关系检验**\n\n线性关系检验是检验自变量x和因变量y之间的线性关系是否显著,或者说,它们之间能否用一个线性模型来表示。\n\n将均方回归(MsR)同均方残差(MsE)加以比较,应用F检验来分析二者之间的差别是否显著。\n\n- 均方回归:回归平方和(SSR)除以相应的自由度(自变量的个数K)\n- 均方残差:残差平方和(SSE)除以相应的自由度(n-k-1)\n\nH0:β1 = 0所有回归系数与零无显著差异,y与全体x的线性关系不显著\n\n计算检验统计量F:\n\n\n\n**回归系数的显著性检验**\n\n回归系数显著性检验的目的是通过检验回归系数β的值与0是否有显著性差异,来判断Y与X之间是否有显著的线性关系若β=0,则总体回归方程中不含X项(即Y不随X变动而变动),因此,变量Y与X之间并不存在线性关系;若β≠0,说明变量Y与X之间存在显著的线性关系\n\n是根据最小二乘法求出的样本统计量,服从正态分布;\n\n的分布具有如下性质数学期望E()=\n\n标准差:\n\n由于δ未知,需用其估计量Se来代替得到的估计标准差\n\n\n\n计算检验的统计量:\n\n**线性关系检验与回归系数检验的区别**:\n\n线性关系的检验是检验自变量与因变量是否可以用线性来表达,而回归系数的检验是对样本数据计算的回归系数检验总体中回归系数是否为0\n\n- 在一元线性回归中,自变量只有一个,线性关系检验与回归系数检验是等价的\n- 多元回归分析中,这两种检验的意义是不同的。线性关系检验只能用来检验总体回归关系的显著性,而回归系数检验可以对各个回归系数分别进行检验\n\n#### 多元与曲线回归问题\n\n经常会遇到某一现象的发展和变化取决于几个影响因素的情况,也就是一个因变量和几个自变量有依存关系的情况,这时需用多元线性回归分析。\n\n- 多远线性回归分析预测法,是指通过对两上或两个以上的自变量与变量的相关分析,建立预测模型进行预测和控制的方法\n\n- 多元线性回归预测模型一般式为\n\n\n\n**调整的多重判定系数**:\n\n用样本容量n和自变量的个数k去修正R^2得到:\n\n\n\n- 避免增加自变量而高估R^2\n\n**曲线回归分析**:\n\n直线关系是两变量间最简单的一种关系,曲线回归分析的基本任务是通过两个相关变量x与y的实际观测数据建立曲线回归方程,以揭示x与y间的曲线联系的形式。\n\n曲线回归分析最困难和首要的工作是确定自变量与因变量间的曲线关系的类型,曲线回归分析的基本过程:\n\n- 先将x或y进行变量转换\n- 对新变量进行直线回归分析、建立直线回归方程并进行显著性检验和区间估计\n- 将新变量还原为原变量,由新变量的直线回归方程和置信区间得出原变量的曲线回归方程和置信区\n\n由于曲线回归模型种类繁多,所以没有通用的回归方程可直接使用。但是对于某些特殊的回归模型,可以通过变量代换、取对数等方法将其线性化,然后使用标准方程求解参数,再将参数带回原方程就是所求。\n\n**实例**:**某商店各个时期的商品流通费率和商品零售额资料**\n\n| 商品零售额x(万元) | 9.5 | 11.5 | 13.5 | 15.5 | 17.5 | 19.5 | 21.5 | 23.5 | 25.5 | 27.5 |\n| ------------------- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- |\n| 商品流通费率y(%) | 6 | 4.6 | 4 | 3.2 | 2.8 | 2.5 | 2.4 | 2.3 | 2.2 | 2.1 |\n\n散点图如下:\n\n\n\n散点图显示出x与y的变动关系为一条递减的双曲线。\n\n\n\n\n\n> 这样转换后,公式和线性公式是一样的\n\n标准方程为\n\n将计算数据代入\n\n解得\n\n= -0.4377 + 60.4x'\n\nx' = 1/x 代入\n\n=-0.4377+\n\n**多重共线性**\n\n回归模型中两个或两个以上的自变量彼此相关的现象\n\n举例 x1={10,8,6,4},x2={5,4,3,2},那么x1是两倍的x2\n\n\n\n多重共线性带来的问题有:\n\n- 回归系数估计值的不稳定性增强\n- 回归系数假设检验的结果不显著等\n\n**多重共线性检验的主要方法**\n\n- 容忍度\n- 方差膨胀因子(VIF)\n\n容忍度:\n\n\n\n- Ri是解释变量xi与方程中其它解释变量间的复相关系数;\n- 容忍度在0~1之间,越接近于0,表示多重共线性越强,越接近于1,表示多重共线性越弱;\n\n**方差膨胀因子**\n\n方差膨胀因子是容忍度的倒数\n\n\n\n- VIFi越大,特别是大于等于10,说明解释变呈xi与方程中其他解释变量之间有严重的多重共线性;\n- VIFi越接近1,表明解释变量xi和其他解释变量之间的多重共线性越弱;\n\n#### Python工具包\n\n统计分析库https://www.statsmodels.org/stable/index\n\n> anaconda自带\n\n专门机器学习的包https://scikit-learn.org/stable/\n\n> anaconda自带,含大量模型和分析工具等\n\nhttps://scikit-learn.org/stable/auto_examples/\n\n> 在这里,你可以随意选择你想要的图,复制代码换成自己的即可,无需再自己写代码\n\n还有pandas和numpy是必不可少的\n\n#### statsmodels回归分析\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E5%81%87%E8%AE%BE%E6%A3%80%E9%AA%8C%E7%AB%A0%E8%8A%82)\n\n#### 高阶与分类变量实例\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E5%81%87%E8%AE%BE%E6%A3%80%E9%AA%8C%E7%AB%A0%E8%8A%82)\n\n#### 案例:汽车价格预测任务\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E5%9B%9E%E5%BD%92%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82)\n\n##### 缺失值填充\n\n##### 特征相关性\n\n##### 预处理\n\n##### 回归求解\n\n上述notebook中的功能都集成在sklearn中,怎么用sklearn,\n<br>\n去到分类模型的页面https://scikit-learn.org/stable/modules/classes\n\n找到lasso点击进去\n\n\n\n可以看到其公式,及参数的默认值等\n\n\n\n使用小案例等\n\n\n\n具体的案例\n\n\n\n\n\n### 假设检验\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E5%81%87%E8%AE%BE%E6%A3%80%E9%AA%8C%E7%AB%A0%E8%8A%82)\n\n#### 假设检验基本思想\n\n#### 左右侧检验与双侧检验\n\n#### Z检验基本原理\n\n#### Z检验实例\n\n#### T检验\n\n#### T检验实例\n\n#### 卡方检验\n\n#### 假设检验中的两类错误\n\n以上内容均已更新在notebook\n\n\n#### Python假设校验实例\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E5%81%87%E8%AE%BE%E6%A3%80%E9%AA%8C%E7%AB%A0%E8%8A%82)\n\n#### Python卡方检验实例\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E5%81%87%E8%AE%BE%E6%A3%80%E9%AA%8C%E7%AB%A0%E8%8A%82)\n\n\n\n### 相关分析\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E7%9B%B8%E5%85%B3%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82)\n\n#### 皮尔逊相关系数\n\n#### 计算与检验\n\n#### 肯德尔和谐系数\n\n#### 质量相关分析\n\n#### 品质相关分析\n\n#### 偏相关分析\n\n#### 复相关分析\n\n以上内容均已更新在notebook\n\n\n\n### 方差分析\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E6%96%B9%E5%B7%AE%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82)\n\n#### 方差分析概述\n\n#### 方差分析计算方法\n\n#### 单因素方差分析\n\n#### 方差分析中的多重比较\n\n#### 多因素方差分析\n\n以上内容均已更新在notebook\n\n#### Python方差分析实例\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E6%96%B9%E5%B7%AE%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82)\n\n\n\n### KMEANS算法\n\n#### KMEANS算法概述\n\n聚类概念:\n\n- 无监督问题:手上没有标签\n- 聚类:相似的东西分到一组\n- 难点:如何评估,如何调参\n\n\n\n基本概念:\n\n- 想要得到簇的个数,需要指定K值,即聚成几个堆\n- 质心:均值,即向量各维取平均,最中间的位置\n- 距离度量:常用欧几里得距离和余弦相似度(先标准化)\n- 优化目标:,让每一个样本到中心点(质心)的距离越小越好,即每个点到中心点的和最小,越小越相似\n\n#### KMEANS工作流程\n\n\n\n- a:我们拿到这一堆数据\n- b:k=2,初始化两个点\n- c:计算各个点到初始化的两个点的距离,近的聚类\n- d:更新初始化点\n- e:接着更新所有点到该两点的距离\n- f:不断更新,直到无法更新,聚类完成\n\n**优势:**简单、快速、适合常规数据集\n\n**劣势:**\n\n- K值难确定\n- 复杂度与样本呈线性关系\n- 很难发现任意形状的簇\n\n如下图这种\n\n\n\n<https://www.naftaliharris.com/blog/visualizing-k-means-clustering/>可视化网站\n\n#### DBSCAN聚类算法\n\n基本概念: (DensityBased Spatial Clustering of Applications with Noise)\n\n- 核心对象: 若某个点的密度达到算法设定的阈值则其为核心点。(即r邻域内点的数量不小于 minPts),以点为圆心,多于我们设定的阈值,即是一个核心对象\n- ∈-邻域的距离阈值: 设定的半径r\n- 直接密度可达: 若某点p在点q的r邻域内,且q是核心点则p-q直接密度可达。\n- 密度可达:若有一个点的序列q0、q1、….qk,对任意qi-qi-1是直接密度可达的,则称从q0到qk密度可达,这实际上是直接密度可达的“传播”。\n\n只需要设置阈值和半径,不再需要设置k值\n\n- 密度相连:若从某核心点p出发,点q和点k都是密度可达的,则称点q和点k是密度相连的。\n- 边界点:属于某一个类的非核心点,不能发展下线了。\n- 直接密度可达:若某点p在点q的r邻域内,且q是核心点则p-q直接密度可达。\n- 噪声点:不属于任何一个类簇的点,从任何一个核心点出发都是密度不可达的\n\n如图:\n\n\n\n- A: 核心对象\n- B,C: 边界点\n- N: 离群点\n\n#### DBSCAN工作流程\n\n- 参数D:输入数据集\n- 参数∈:指定半径\n- MinPts:密度阈值\n\n1. 标记所有对象为 unvisited;\n2. Do;\n3. 随机选择一个 unvisited 对象 p;\n4. 标记 p 为visited;\n5. If p 的∈-领域至少有 MinPts 个对象;\n6. 创建一个新簇C,并把 p 添加到C;\n7. 令 N 为 p 的∈-领域中的对象集合;\n8. For N中每个点p\n9. If p 是 unvisited;\n10. 标记 p 为 visited;\n11. If p 的∈-领域至少有 MinPts 个对象,把这些对象添加到N;\n12. 如果p还不是任何簇的成员,把p添加到C;\n13. End for;\n14. 输出C;\n15. Else 标记 p 为噪声;\n16. Until 没有标记为 unvisited 的对象;\n\n\n\n参数选择:\n\n- 半径∈,可以根据K距离开设定:找突变点\n- k距离:给定数据集P={p(i);i=0,1,...n},计算点P(i)到集合D的子集S中所有点之间的距离,距离按照从小到大的顺序排序,d(k)就被称为k-距离。\n- MinPts:k-距离中k的值,一般取得小一些,多次尝试\n\n优势:\n\n- 不需要指定簇个数\n- 可以发现任意形状得簇\n- 擅长找到离群点(检测任务)\n- 两个参数就够了\n\n劣势:\n\n- 高维数据有些困难(可以降维)\n- 参数难以选择(参数对结果得的影响非常大)\n- Sklearn中效率慢(数据消减策略)\n\n如下两图:\n\n\n\n<https://www.naftaliharris.com/blog/visualizing-dbscan-clustering/>可视化网站\n\n**多种聚类算法比较**\n\n\n\nX轴上的特征表示归一化后,是对某个特征增强10倍,其它不变,测试多个聚类算法的结果,越大越好。\n\n可以看到DBSCAN总体上都是偏好的,所以一般情况下,首选是DBSCAN\n\n#### 层次聚类\n\n#### Python层次聚类\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E8%81%9A%E7%B1%BB%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82)\n\n#### 聚类案例实战\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E8%81%9A%E7%B1%BB%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82)\n\n\n\n### 贝叶斯分析\n\n#### 贝叶斯分析概述\n\n**一句话解释**\n\n经典的概率论对小样本事件并不能进行准确的评估,若想的到相对准确的结论往往需要大量的现场实验;而贝叶斯理论能较好的解决这一问题,利用己有的先验信息,可以得到分析对象准确的后验分布,贝叶斯模型是用参数来描述的,并且用概率分布描述这些参数的不确定性\n\n贝叶斯分析的思路由证据的积累来推测一个事物发生的概率,它告诉我们当我们要预测一个事物需要的是首先根据已有的经验和知识推断一个先验概率,然后在新证据不断积累的情况下调整这个概率。整个通过积累证据来得到一个事件发生概率的过程我们称为贝叶斯分析\n\n\n\n**故事背景**\n\n贝叶斯全名为托马斯·贝叶斯( Thomas Bayes,1701-1761),是一位与牛顿同时代的牧师,是一位业余数学家,平时就思考些有关上帝的事情,当然,统计学家都认为概率这个东西就是上帝在掷骰子。当时贝叶斯发现了古典统计学当中的一些缺点,从而提出了自己的“贝叶斯统计学”,但贝叶斯统计当中由于引入了一个主观因素(先验概率,下文会介绍),一点都不被当时的人认可。直到20世纪中期,也就是快200年后了,统计学家在古典统计学中遇到了瓶颈,伴随着计算机技术的发展,当统计学家使用贝叶斯统计理论时发现能解决很多之前不能解决的问题,从而贝叶斯统计学一下子火了起来,两个统计学派从此争论不休。\n\n\n\n##### 基本概念:\n\n**1、分布函数( Distribution Function)**\n\n分布函数是指随机变量小于某个值的函数,即它和累积密度函数( Cumulative Density Function).是同一个意思对于连续型分布来说,分布函数或者累积密度函数是概率密度函数( (Probability Density Functio的积分对离散型分布来说,分布函数或者累积密度函数是个阶梯状的分段函数。\n\n**2、概率密度函数( Probability Density Function)**\n\n仅针对连续型变量定义,可以理解成连续型随机变量的似然函数。它是连续型随机变量的分布函数的一阶导数,即变化率。如一元髙斯分布的密度函数为\n\n\n\n**3、概率质量函数( Probability Mass Function)**\n\n仅针对离散型随机变量定义,它是离散型随机变量在各个特定值上取值的概率。注意,连续型随机变量的概率密度函数虽然与离散型随机变量的概率质量函数对应,但是前者并不是概率,前者需要在某个区间进行积分后表示概率,而后者是特定值概率。连续型随机变量没有在某一点的概率的说法(因为毎一点的概率密度函数都是0)。假设X是抛均匀硬币的结果反面取值为0,正面取值为1。那么其概率质量函数为\n\n\n\n**4、似然函数( Likelihood Function)**\n\n简称似然,是指在某个参数下,关于数据的函数。它在统计推断问题中极其重要。一般表示为:\n\n\n\n由于我们般假设所有的数据都是独立同分布的,因此,似然的计算是所有数据的密度函数的乘积,这在计算中非常麻烦。所以我们般使用Log-似然来计算。\n\n**5、边缘分布( Marginal Distribution)**\n\n在统计理论中,边绿分布指—组随机变量中,只包含其中部分变量的概率分布。例如随机变量Ⅹ和Y,X的边緣分布是(离散型随机变量):\n\n\n\n连续型随机变量的边缘分布\n\n\n\n\n\n##### 概率:\n\n\n\n什么是概率这个问题需要好好想一想了。咱们来抛硬币吧,大家的第一反应就是五五开。为什么会这样觉得呢? 因为我做了很多少次试验,其中基本是一半半,这就说明了古典统计学的思想,概率是基于大量实验的,也就是大数定理。对于硬币来说我们可以来试一试,那有些事没办法进行试验该怎么办呢? 今天下雨的概率50%,日本某城市下个月发生地震的概率30%,这些概率怎么解释呢? 日本在100次试验中,地震了30次? 这很难玩啊!所以古典统计学就无法解释了。这只是其一,再比如说,你去赌场了,你问了10个人赢没赢钱,他们都说赢了,按照古典统计学思想,咱们是不是稳赢啊\n\n\n\n**世界观的区别:**\n\n**统计学派:**\n\n- 观察到的数据被认为是随机的,因为它们是随机过程的实现,因此每次观察系统时都会发生变化。\n\n- 模型参数认为是固定的。参数的值是未知的,但它们是固定的,因此我们对它们进行条件设置。\n\n \n\n概率推理通常涉及推导未知参数的估计,基于一些选择的最优性准则选择,如无偏性、方差最小化。\n\n比如说,我在今天绝地求生里面吃鸡了的真假。定义参数θ:\n\n- θ=1,吃鸡\n- θ=0,没有。\n\n那么频率派认为,θ是取值0或者1的固定数,不能说θ=1的概率是多少。\n\n**贝叶斯学派:**\n\n- 数据被认为是固定的。他们使用的是随机的,但是一旦他们被拿到手了,就不会改变\n- 贝叶斯用概率分布来描述模型参数的不确定性,这样一来,他们就是随机的了\n\n\n\n我们要得到的就是对应该数据所有参数的可能性(分布)的情况。\n\n还是上面的例子,这回我们可以说θ=1概率是30%。而且随着所得样本的增多,我们可以把这个概率加以变化,得到θ|x的分布。这个概率其实是信心的含义。\n\n#### 贝叶斯算法概述:\n\n要解决的问题:\n\n- 正向概率:假设袋子里面有N个白球,M个黑球,你伸手进去摸一把,摸出黑球的概率是多大\n- 逆向概率:如果我们事先并不知道袋子里面黑白球的比例,而是闭着眼睛摸出一个(或好几个)球,观察这些取出来的球的颜色之后,那么我们可以就此对袋子里面的黑白球的比例作出什么样的推测\n\n什么使用贝叶斯:\n\n- 现实世界本身就是不确定的,人类的观察能力是有局限性的\n- 我们日常所观察到的只是事物表面上的结果,因此我们需要提供一个猜测\n\n#### 贝叶斯推导实例:\n\n一所学校的男女比例如下\n\n\n\n- 男生总是穿长裤,女生则一半穿长裤一半穿裙子\n- 正向概率:随机选取一个学生,他(她)穿长裤的概率和穿裙子的概率是多大\n- 逆向概率:迎面走来一个穿长裤的学生,你只看得见他(她)穿的是否长裤,而无法确定他(她)的性别,你能够推断出他(她)是女生的概率是多大吗?\n\n公式:\n\n- 假设学校里面人的总数是U个\n- 穿长裤的(男生):U * P(Boy) * P(Pants|Boy)\n - P(Boy)是男生的概率=60%\n - P(Pants|Boy)是条件概率,即在Boy这个条件下穿长裤的概率是多大,这里是100%,因为所有男生都穿长裤\n- 穿长裤的(女生):U * P(Girl) * P(Pants|Girl)\n\n求解:穿长裤的人里面有多少女生\n\n- 穿长裤总数:U * P(Boy) * P(Pants|Boy) + U * P(Girl) * P(Pants|Girl)\n\n- P(Girl|Pants) = U * P(Girl) * P(Pants|Girl) / 穿长裤总数\n\n U * P(Girl) * P(Pants|Girl) / [U * P(Boy) * P(Pants|Boy) + U * P(Girl) * P(Pants|Girl)]\n\n与总人数有关吗?\n\n- U * P(Girl) * P(Pants|Girl) / [U * P(Boy) * P(Pants|Boy) + U * P(Girl) * P(Pants|Girl)]\n- 容易发现这里与总人数是无关的,可以消去\n- P(Girl|Pants) = P(Girl) * P(Pants|Girl) / [P(Boy) * P(Pants|Boy) + P(Girl) * P(Pants|Girl)]\n\n- 假设学校里面的总数是U个\n- 穿长裤的(男生):U * P(Boy) * P(Pants|Boy)\n - P(Boy)是男生的概率=60%\n - P(Pants|Boy)是条件概率,即在Boy这个条件下穿长裤的概率是多大,这里是100%,因为所有男生都穿长裤\n- 穿长裤的(女生):U * P(Girl) * P(Pants|Girl)\n\n化简:\n\n- P(Girl|Pants) = P(Girl) * P(Pants|Girl) / [P(Boy) * P(Pants|Boy) + P(Girl) * P(Pants|Girl)]\n- 分母其实就是P(Pants)\n- 分子其实就是P(Pants,Girl)\n\n贝叶斯公式\n\n- \n\n > P(A|B):B情况下A发生的概率\n >\n > P(B|A):A情况下B发生的概率\n >\n > P(A):A发生的概率\n >\n > P(B):B发生的高女\n\n#### 拼写纠正实例:\n\n- 问题是我们看到用户输入了一个不在字典中的单词,我们需要去猜测:“这个家伙到底真正想输入的单词是什么呢”\n- P(我们猜测他想输入的单词|他实际输入的单词)\n\n用户实际输入的单词记为D (D代表Data,即观测数据)\n\n- 猜测1:P(h1|D),猜测2:P(h2|D),猜测3:P(h3|D) ...\n\n 统一为:P(h|D)\n\n- P(h|D) = P(h) * P(D|h) / P(D)\n\n- 对于不同的具体猜测h1 h2 h3 ...,P(D)都是一样的,所以在比较P(h1|D)和P(h2|D)的时候我们可以忽略这个常数\n\n- P(h|D) = P(h) * P(D|h)\n\n 对于给定观测数据,一个猜测是好是坏,取决于“这个猜测本身独立的可能性大小(先验概率,Prior)”和“这个猜测生成我们观测到的数据的可能性大小”\n\n- 贝叶斯方法计算:P(h) * P(D|h),P(h) 是特定猜测的先验概率\n- 比如用户输入tlp,到底是top还是tip?这个时候,当最大似然不能做出决定性的判断时,先验概率就可以插手进来给出指示——“既然你无法决定,那么我告诉你,一般来说top出现的程度要高许多,所以更可能他想打的top”\n\n#### 垃圾邮件过滤实例:\n\n- 最大似然:最符合观测数据的(即P(D|h)最大的)最优优势\n- 奥卡姆剃刀:P(h)较大的模型有较大的优势\n- 掷一个硬币,观测到的是“正”,根据最大似然估计的精神,我们应该猜测这枚硬币掷出“正”的概率是1,因为这个才是能最大化P(D|h)的那个猜测\n\n- 如果平面上有N个点,近似构成一条直线,但绝不精确地位于一条直线上。这时我们既可以用直线来拟合(模型1),也可以用二阶多项式(模型2)拟合,也可以用三阶多项式(模型3),特别地,用N1阶多项式便能够保证肯定能完美通过N个数据点。那么,这些可能的模型之中到底哪个是最靠谱的呢?\n- 奥卡姆剃刀: 越是高阶的多项式越是不常见\n\n问题:给定一封邮件,判定它是否属于垃圾邮件D来表示这封邮件,注意D由N个单词组成。我们用h+来表示垃圾邮件,h-表示正常邮件\n\n- P(h+|D) = P(h+) * P(D|h+) / P(D)\n\n P(h-|D) = P(h-) * P(D|h-) / P(D)\n\n- 先验概率: P(h+)和P(h-)这两个先验概率都是很容易求出来的,只需要计算一个邮件库里面垃圾邮件和正常邮件的比例就行了。\n\n- D里面含有N个单词d1,d2,d3,P(D|h+)=P(d1,d2,...,dn|h+),P(d1,d2,...,dn|h+)就是说在垃圾邮件当中出现跟我们目前这封邮件一模样的一封邮件的概率是多大!\n\n P(d1,d2,,dn|h+)扩展为:P(d1|h+) × P(d2|d1,h+) × P(d3|d2,d1,h+) × ...\n\n- P(d1|h+) × P(d2|d1,h+) × P(d3|d2,d1,h+) × ...\n\n 假设di与di-1是完全条件无关的(朴素贝叶斯假设特征之间是独立,互不影响)\n\n 简化为P(d1h+) × P(d2|h+) × P(d3|h+)对于P(d1|h+) × P(d2|h+) × P(d3|h+) × …只要统计di这个单词在垃圾邮件中出现的频率即可\n\n#### 贝叶斯解释\n\n最终目标就是得到后验分布:\n\n这个条件概率就是在给定观测数据的时候,求得的参数的概率。以前我们想知道一个参数,要通过大量的观测值才能得出,而且是只能得出一个参数值,而现在运用了贝叶斯统计思想,这个后验概率分布其实是一系列参数值θ的概率分布。\n\n\n\n\n\n\n\n积分求的区间指的是参数θ所有可能取到的值的域,所以可以看出后验概率是在知道x的前提下在θ域内的一个关于θ的概率密度分布,每一个θ都有一个对应的可能性(也就是概率)。\n\n**Priors**\n\n先验分布就是你在取得实验观测值以前对一个参数概率分布的主观判断。\n\n**Likelihood functions**\n\n以然函数帮助我们依据数据中的信息将先验分布更新到我们想要的后验分布。\n\n\n\n**一个小例子**\n\n这个例子很容易就能求解出来大多数情况贝叶斯分析的计算量会很大血友病是一种罕见的遗传性疾病,该病是一种X连锁隐性遗传性状,这意味着男性只有一个基因,而女性只有两个基因,这种特征可以被显性基因等位基因所掩盖在这个例子中,我们需要计算这个母亲是携带者的概率。\n\n> 可以先通俗的理解为,有基因大X和小x,女性有两个基因且是两个小x才是感染者,男性有一个基因且一个小x就是感染者,而遗传是一半一半,即会从父亲中拿一个,母亲中也拿一个遗传。注意,这里的感染者不代表会发病。\n\n\n\n> 红色:感染affected\n>\n> 橙色:携带carrier\n>\n> 绿色:正常no gene\n>\n> 灰色:未知 unknown\n\nW=1表示是感染的,W=0表示是未感染的。求解公式如下:\n\n\n\n> S1和S2表示两个孩子,都是0未感染,即W=1在两个孩子都是未感染的情况下的概率\n\n先验知识:Pr(W = 1) = 0.5,另一种表达方式:\n\n\n\n> Pr(W = 1) = 0.5:已知女人的母亲是携带者两个x,兄弟感染且发病,那么她至少有一个x,而两个孩子是未感染的,那么她至多有一个x,如果女人是两个小x,根据遗传对半,小孩肯定是感染者\n\n计算女人是感染和未感染者的两种可能性\n\n对于似然函数\n\n\n\n> (0.5)怎么来的:根据上面情况可知,女人是Xx(感染者),那么给两个小孩的概率大X还是小x的概率都是0.5。\n>\n> (1)怎么来的:W=0未感染的情况下,给两个孩子都是大X,即只有1种可能\n\n我们再计算最后女人是携带者的概率,根据贝叶斯公式:\n\n\n\n> 分母:两个孩子是未感染者的情况下,女人是感染者的概率 × 感染者概率 + 两个孩子是未感染者的情况下,女人是未感染者的概率 × 未感染的概率。\n>\n> 分子:两个孩子是未感染者的情况下,女人是感染者的概率 × 感染者概率\n\n#### 贝叶斯建模\n\n[请跳转至notebook](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E8%B4%9D%E5%8F%B6%E6%96%AF%E5%88%86%E6%9E%90)"
},
{
"alpha_fraction": 0.39401912689208984,
"alphanum_fraction": 0.6461722254753113,
"avg_line_length": 198,
"blob_id": "7c0cf4aab0f7df4ea69258d118bde2096ed29e74",
"content_id": "4f19265d0b83fe962e7022c637c6c4489cba1a26",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4410,
"license_type": "permissive",
"max_line_length": 475,
"num_lines": 21,
"path": "/机器学习算法原理及推导/README.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# README\n\n### 学习章节:\n\n<ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC'>机器学习算法原理及推导</a>\n <ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95'>李航——统计学习方法</a></li>\n <ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/1.%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95%E6%A6%82%E8%AE%BA.md'>1.统计学习方法概论.md</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/2.%E6%84%9F%E7%9F%A5%E6%9C%BA%E2%80%94%E2%80%94%E5%85%A5%E9%97%A8%E7%9A%84%E5%9E%AB%E8%84%9A%E7%9F%B3.md'>2.感知机——入门的垫脚石.md</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/3.K%E8%BF%91%E9%82%BB%E2%80%94%E2%80%94%E7%89%A9%E4%BB%A5%E7%B1%BB%E8%81%9A.md'>3.K近邻——物以类聚.md</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/4.%E6%9C%B4%E7%B4%A0%E8%B4%9D%E5%8F%B6%E6%96%AF.md'>4.朴素贝叶斯.md</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/5.%E5%86%B3%E7%AD%96%E6%A0%91%E2%80%94%E2%80%94%E6%AF%8F%E6%AC%A1%E9%80%89%E4%B8%80%E8%BE%B9.md'>5.决策树——每次选一边.md</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/6.%E9%80%BB%E8%BE%91%E6%96%AF%E8%92%82%E5%9B%9E%E5%BD%92%E4%B8%8E%E6%9C%80%E5%A4%A7%E7%86%B5.md'>6.逻辑斯蒂回归与最大熵.md</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/7.%E6%94%AF%E6%8C%81%E5%90%91%E9%87%8F%E6%9C%BA%E2%80%94%E2%80%94%E5%8D%87%E7%BB%B4%E6%89%93%E5%87%BB.md'>7.支持向量机——升维打击.md</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95/8.%E6%8F%90%E5%8D%87%E6%96%B9%E6%B3%95%E2%80%94%E2%80%94%E4%B8%89%E4%B8%AA%E8%87%AD%E7%9A%AE%E5%8C%A0%EF%BC%8C%E9%A1%B6%E4%B8%AA%E8%AF%B8%E8%91%9B%E4%BA%AE.md'>8.提升方法——三个臭皮匠,顶个诸葛亮.md</a>\n <li><a href=''></a>\n </ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E5%AE%8F%E6%AF%85%E2%80%94%E2%80%94%E5%BC%82%E5%B8%B8%E6%A3%80%E6%B5%8B'>李宏毅——异常检测</a> \n </li>\n\n"
},
{
"alpha_fraction": 0.6570327281951904,
"alphanum_fraction": 0.6705202460289001,
"avg_line_length": 20.625,
"blob_id": "c079e26cc657e9ccd98885bab598eed9b65e5f89",
"content_id": "d4db28c288f42b01c15a5d8a5b7a8d606d849501",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 565,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 24,
"path": "/机器学习算法原理及推导/其它/第二章——手写线性回归算法/util/features/normalize.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "\"\"\"Normalize features\"\"\"\n\"\"\"数据标准化\"\"\"\n\nimport numpy as np\n\n\ndef normalize(features):\n features_normalized = np.copy(features).astype(float)\n\n # 计算均值\n features_mean = np.mean(features, 0)\n\n # 计算标准差\n features_deviation = np.std(features, 0)\n\n # 标准化操作\n if features.shape[0] > 1:\n features_normalized -= features_mean\n\n # 防止除以0\n features_deviation[features_deviation == 0] = 1\n features_normalized /= features_deviation\n\n return features_normalized, features_mean, features_deviation\n"
},
{
"alpha_fraction": 0.5669800043106079,
"alphanum_fraction": 0.7702702879905701,
"avg_line_length": 12.897958755493164,
"blob_id": "19ecca8d7af23ab12f34d02a82aee412800fc0e1",
"content_id": "e68c12ddbd5d597605fd488371727817ab869616",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6581,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 245,
"path": "/机器学习算法原理及推导/李航——统计学习方法/1.统计学习方法概论.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 1.统计学习方法概论\n\n### 知识树\n\n\n\n> 苹果表示比较重要的\n\n\n\n### 监督学习\n\nSupervised learning\n\n#### 监督学习的实现步骤:\n\n1. 得到一个有限的训练数据集合\n2. 确定模型的假设空间,也就是所有的备选模型\n3. 确定模型选择的准则,即学习策略\n4. 实现求解最优模型的算法\n5. 通过学习方法选择最优模型\n6. 利用学习的最优模型对新数据进行预测或分析\n\n\n\n> 告诉它哪些东西是好的,哪些东西是坏的,它依据这些来学习\n\n比如下面的例子:\n\n\n\n> 根据是否招女孩子喜欢的一些特征,来推导其它未知的数据是否招女孩子喜欢,而无监督学习是没有是否招女孩子喜欢这个标签,可能会采取一些聚类的方法,比如把身高聚的时候,高的一堆,矮的一堆,成绩高的一堆低的一堆,以得到类似的结果。\n\n按照常理来讲有监督学习肯定比无监督学习好,但现实生活中,有监督学习需要标注,也就是要大量的人力成本,而无监督数据的获取往往是最低成本的。\n\n\n\n### 统计学习三要素\n\nElement of statistical learning\n\n#### 模型(假设空间)\n\n> 假设所有的点都在空间中,每个点是一个模型或者函数\n\n决策函数:\n\n> f(x):把所有的函数假设为f(x)\n>\n> θ:f(x)里头的参数,用来确定模型\n\n条件概率分布:\n\n策略:\n\n> 怎么确定θ的参数,让模型知道是否预测错了,错的偏差有多大\n\n0-1损失函数\n\n\n\n> 预测相同为0,否则为1,得继续努力,但这样有个很明显的缺陷,就是它只知道错了,但是不知道错在哪里\n\n平方损失函数\n\n\n\n> 弥补上面的缺陷,告诉它差距有多大,相等则不需要\n\n绝对损失函数\n\n\n\n> 防止小于0的情况,即Y-f(x)小于0,相当于非常非常好,这显然是不对的,所以加入绝对值,要么大于0,要么小于0\n\n对数损失函数\n\n\n\n> 让为0的概率越来越大,让为1的概率越来越小\n\n经验风险最小化:\n\n\n\n> 对每个样本跑一遍,将所有的loss平均计算,loss越大表示离真实的越大,loss越小说明里真实越接近,模型也越好\n\n结构风险最小化:\n\n\n\n> 加入正则项,防止过拟合,也就是模型过于复杂,过于只适合当前数据,导致预测其它数据的时候很差(泛化能力)。\n\n算法:挑选一个合适的算法,使得可以求解最优模型\n\n\n\n### 模型评估与选择\n\nModel evaluation and model selection\n\n训练误差:\n\n\n\n> 对所有训练数据的结果做一个平均,误差越大模型可能越大。但如果以训练集来评估,就想考试一样,如果考试题目平时已经见过,当然能做出来,要解决的是平均没见过的类似题目。\n\n测试误差:\n\n\n\n> 利用测试集去测试模型的训练情况。\n\n验证集:我们通常来讲,会挑选测试集表现最好的,但是也有总可能就是测试集刚好和训练集的“题目”类似,那它当然能表现的好,这时候就需要引入验证集。我们一般选择验证集表现最好的模型。\n\n多项式拟合问题:\n\n\n\n> 左上欠拟合严重,右上欠拟合,左下拟合正常,右下过拟合。实际中怎么判断过拟合,即训练集上误差非常低,但是在其它数据集上的误差非常高,一般就是过拟合。\n\n\n\n### 正则化与交叉验证\n\nRegularization and cross validation\n\n防止过拟合\n\n最小化结构风险:\n\n\n\n交叉验证:\n\n数据集随机划分为以下3部分:\n\n- 训练集:模型的训练\n- 测试集:模型的选择\n- 验证集:模型的评估\n\n\n\n\n\n### 泛化能力\n\nGeneralization ability\n\n定理1.1泛化误差上界\n\n对于二分类问题,当假设空间是有限个函数的集合F = {f1,f2,...,fd}时,对任意一个函数f ∈ F,至少以概率1 - δ,以下不等式成立:\n\n其中,\n\n\n\n> 即以某种依据来确定模型是否具备泛化能力。但现实生活中这个是非常难实现的,问题在于有限个,而这个有限,在我们实际操作中是不可能知道是有限个的。\n\n\n\n### 生成模型与判别模型\n\nGenerative model and discriminant model\n\n生成方法:\n\n> P(Y|X):表示PX条件下,Y的概率\n\n判别方法:\n\n例子:如何知道女孩子的姓名呢?\n\n生成方法:我要是把她爸妈建模出来,直接问她爸妈不就行了吗?\n\n判别方法:她叫小红的概率是多少?她叫小刘的概率是多少?\n\n\n\n### 分类问题\n\nClassification\n\nTP——将正类预测为正类数;\n\nFN——将正类预测为负类数;\n\nFP——将负类预测为正类数;\n\nTN——将负类预测为负类数;\n\n精确率:预测为正类的样本中有多少分对了;\n\n\n\n召回率:在实际正类中,有多少正类被模型发现了\n\n\n\nF1值:\n\n\n\n\n\n一般会配合一个混淆矩阵:\n\n\n\n\n\n### 标注问题\n\nTagging\n\n输入:\n\nx = (x1, x2, ..., xn) T\n\ny = (y1, y2, ..., yn) T\n\n\n\n### 回归问题\n\nRegression\n\n输出一个连续的值,不是0/1这种固定值\n\n\n\n## 总结\n\nSummarization\n\n1. 统计学习路线:设计模型->训练->预测\n2. 监督学习与非监督学习的联系与区别\n3. 统计学习三要素:模型、策略、算法\n4. 模型的评估:训练误差、验证误差、测试误差\n5. 正则化与交叉严重\n6. 泛化能力:泛化误差上界\n7. 生成模型与判别模型的联想与区别\n8. 分类问题:准确率、精确率、召回率、F1值\n9. 标准问题\n10. 回归问题:输出为连续的值"
},
{
"alpha_fraction": 0.5421266555786133,
"alphanum_fraction": 0.7103428244590759,
"avg_line_length": 15.960591316223145,
"blob_id": "06282ba62b5f04f52a411a29c2cc1b07416e0bf4",
"content_id": "9bcb1aabe143fbd5bdeec4e95173f2f824f69b57",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6738,
"license_type": "permissive",
"max_line_length": 134,
"num_lines": 203,
"path": "/机器学习算法原理及推导/李航——统计学习方法/2.感知机——入门的垫脚石.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 2.感知机——入门的垫脚石\n\n### 概述\n\n每个算法都是为了解决一类问题,或者说解决之前的问题所创造出来的,而感知机,在解决一类问题的时候也暴露了很多问题,变相的推动了以后的算法的改进方向。\n\n### 知识树\n\n\n\n> 苹果表示相对重要的\n\n\n\n### 直观介绍\n\n现在有一盘红豆和绿豆,怎么把红豆和绿豆分开来\n\n\n\n> 当红豆和绿豆放在一起时,它们却能自己分开来,为什么呢?\n\n我们怎么区分呢,假设我们随意花如下的线\n\n\n\n> 这样是不是就完美的区分开来了,完美解决。\n>\n> 那么程序怎么知道x和y都大于0的豆是红豆呢?\n>\n> 或者说,它是怎么学会这个规则的?\n\n那是不是只要一条直线不分错一个点,或者说尽可能的少分错点,那就是一条好的直线。\n\n即:我们把所有分错的点和直线的距离的和(蓝色示例),求和的距离最小表示这条直线越好,因为这表示这条线分错的最少。\n\n\n\n> 如图中红色线的和的距离肯定比黑色或者黄色的线大。\n\n\n\n### 总结\n\n1. 一条直线不分错一个点,这就是好的直线。\n2. 模型要尽可能找到好的直线。\n3. 如果没有好的直线,在差的直线中找到好的直线。\n4. 判断直线多差的方式:分错的点到直线的距离求和。\n\n\n\n### 感知机模型\n\nPerceptron\n\n确定终究目标\n\nf(x)\n\n> 我们希望一个函数,如f(x),进来一个豆,它告诉我们是红豆还是绿豆\n\nf(x) = sign(w * x + b)\n\n> sign如图:\n\n把w * x + b 比作x,相当于给了一个“豆”进来,就只得到两个结果,也就是红豆(+1)或者绿豆(-1)。那么w * x + b 是什么?\n\nw * x + b是超平面\n\n\n\n> 即那条黄色的分割线,且当豆在线的上面,就是红豆,在下面是绿豆。\n>\n> 超平面是n维欧氏空间中余维度等于一的线性子空间,也就是必须是(n-1)维度。即如果是2维,超平面就是1维,如果是3维,超平面就是2维。\n\nw * x + b分别代表什么\n\n> w是权重,x是特征的实际值,b是偏值\n>\n> 比如知道豆的直径,如x1表示某个豆的直径 = w1 * x1 + b1,大于1则是红豆,小于则是绿豆\n>\n> 有时候一个特征不一定能区分开来,可能还需要质量,(x1,x2)计算 = (w1, w2) * (x1, x2)\n>\n> 亦或者还有豆的光滑度,则有x1, x2, x3 ,这样就变成3维了。\n\n**正式的说**\n\nw * x + b 是一个n维空间中的超平面S,其中w是超平面的法向量,b是超平面的截距,这个超平面将特征空间划分成两部分,位于两部分的点分别被分为正负两类。所以,超平面S称为分离超平面。\n\n特征空间也就是整个n维空间,样本的每个属性都叫一个特征,特征空间的意思是在这个空间中可以找到样本所有的属性的组合。\n\n\n\n### 感知觉的学习策略\n\nLearning policy\n\n> 求空间中任意一个点X0到超平面S的距离\n\n**函数间隔与几何间隔**\n\n函数间距:\n\n> 缺点:已知wx + b = 0,当不等于0时(比如1),那如果等比例放大缩小w和b是不是就更小,等同于1/2 (wx + b) = 1/2(wx) + 1/2(b) = 1/2,超平面是没有动的,但是结果却缩小了,那么模型就觉得它知道等比例缩小就能得到更小的结果。\n>\n> 这时候就引入几何间距\n\n几何间距:\n\n\n\n\n\n> 加入了二范式,相当于有了一个度量的标准,即等比例缩小w时,外面的w也会等比例变化。\n\n通俗来讲,函数间距是将你去吃饭的那段路本来要1000m,它改成1km,1就小于1000了,而几何间距就杜绝这种情况,只能都是m或者km\n\n\n\n对于误分类数据而已,\n\n> yi表示真实是+1还是-1\n>\n> w*xi+b表示预测的结果\n>\n> 绿豆预测成绿豆:预测结果w*xi+b < 0(绿豆),且真实为绿豆(-1)时,则- * -1(wxi + b) < 0。\n>\n> 绿豆预测成红豆:w*xi+b > 0,真实为绿豆-1,则- * -1 (wxi + b) > 0。\n>\n> 也就是说只要分错,那么数据一定是大于0\n\n误分类点xi到超平面S的距离为:\n\n因此,所有误分类点到超平面S的总距离为:\n\n> M:误分类点的集合\n\n\n\n### 感知觉的学习策略——原始形式\n\nLearning policy\n\n1. 任选一个超平面w0,b0\n\n > 随机初始化,如w0 = 1,可能有多个维度,如x0,x1,x2,那么w也会有w0,w1,w2的随机初始化\n\n2. 采用梯度下降法极小化目标函数\n\n \n\n > L:loss\n >\n > 这时候用的还是函数间隔,因为感知机的超平面的目标是不分错任何一个点。所以不存在说要某个点到超平面的距离无限的小。\n >\n > 也可以用几何间隔,但是会增加计算量,没有必要,但后面基本都是用几何间隔。\n >\n > 侧面反映了感知机只能在线性可分的数据集上使用,也就是线性模型。\n\n3. 更新w,b\n\n \n\n\n\n### 例子\n\nExample\n\n训练数据中,正例(y=+1)点位x1 = (3,3)T,x2 = (4,3)T,负例(y=-1)为x3 = (1, 1)T,求解感知机模型f(x) = sign(w*x + b),其中w = (w1, w2)T,x = (x1, x2)T\n\n解:\n\n1. 构造损失函数\n\n \n\n2. 梯度下降求解w,b。设步长为1\n\n 1. 取初值w0 = 0,b0 = 0\n 2. 对于x1,y1(w0 * x1 + b0) = 0未被正确分类,更新w,b。w1 = w0 + x1y1 = (3,3)T,b1 = b0 + y1 = 1 => w1 * x + b1 = 3x + 3x + 1\n\n3. 对x1,x2,显然yi(w1 * xi + b1) > 0,被正确分类,不做修改。对于x3,y3(w1 * x3 + b1) 应该小于0,结果是大于0被误分类,更新w,b。\n\n \n\n4. 以此往复,直到没有误分类点,损失函数达到极小。\n\n \n\n \n\n### 总结\n\nSummarization\n\n1. 感知机通过构造超平面的形式划分不同类的点。\n\n2. 感知机属于线性判别模型,因为它的判别边界是线性的。\n\n3. 函数间隔和几何间隔的区别\n\n > 大多时候是用的几何间隔,因为有“度量标准”,而感知机是误分类驱动,目标是不分错任何一个点,即结果为0,所以即使度量标准改变也没用。"
},
{
"alpha_fraction": 0.5571752786636353,
"alphanum_fraction": 0.7659540176391602,
"avg_line_length": 15.599462509155273,
"blob_id": "bc86fc00975421da2ff2b4f7b3d874789e7a3125",
"content_id": "fc3a887f1e077000ea723b26a65b01b407c49c8f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11891,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 372,
"path": "/机器学习算法原理及推导/李航——统计学习方法/7.支持向量机——升维打击.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 7.支持向量机——升维打击\n\nSupport vector machines\n\n### 知识树\n\nKnowledge tree\n\n\n\n> 支持向量机也是李航的统计学习中效果最好的模型,面试中也是问的最多的\n>\n> 苹果表示重点\n>\n> 间隔:使用了几何间隔,保证w b的度量,感知机则是函数间隔\n>\n> 间隔最大化思想:则是支持向量机的独有,这使得它找到最优超平面\n>\n> 核函数:面试当中可能会问到是否能写出其中的一个核函数\n\n\n\n### 红豆绿豆的前世今生\n\n前面章节讲到划分超平面,来区分红豆和绿豆\n\n\n\n> 从上面可以看到,能找到很多的超平面,黄色的线,那哪条黄色的线才是最好的呢?当然是对角的黄色线,因为这条可以让红豆绿豆区分的最开,也就是线和豆的距离最远,即使区分新的豆(预测集),也能最好的区分开,因为可能豆有接近的情况。\n\n**如何找到最优的超平面**\n\n\n\n> 从上图可知,超平面A是最优的。因为它与两个类的距离都足够大。\n\n结论:我们试图找到一个超平面,这个超平面可以使得与它最近的样本点的距离必须大于其他所有超平面划分时与最近的样本点的距离。\n\n在SVM中,这叫间隔最大化。\n\n> 即该超平面与最近的样本点的距离,都大于所有超平面离最近样本点的距离\n\n此时我们可以说,我们找到了最优的超平面,但随着时代的变迁,红豆绿豆也发生了变化,比如下图的\n\n\n\n> 它不再是左右分开,而是混在一起\n\n\n\n> 单纯用线性无法解决,如果是非线性呢?\n\n\n\n> 我们需要找到这么个圈的超平面,那么圈能是超平面呢?\n\n\n\n> 如上图,原本二维空间的样本,因为线性不可分, 即需要投射到三维空间,那么在三维空间就能用超平面切分。\n>\n> 再将三维空间的超平面投射到二维空间,那么超平面在二维空间上就是曲线的,即非线性。\n\n那么接下来,我们要考虑的是,怎么进行低维和高维之间的转换。\n\n\n\n### 升维可分问题\n\n1. 当有人拿着棍子指着你时,你只能看到棍子的横截面,是一个点,它是一维的。我们无法将两个点区分开来。因为她们重叠了。\n\n \n\n > 右边红色线表示看的方向\n\n2. 当有人拿着棍子指着其它地方,我们能看到整个棍子,这时候是二维的。我们可以一把刀劈开,把红豆和绿豆区分开。所以,红豆和绿豆虽然在一维的时候不能分开,但在二维时线性可分了。\n\n \n\n3. 也可能二维不可分, 如下图\n\n \n\n4. 这时候我们可以把棍子看作三维中的棍子,有体积的。如果把棍子立在地上,很有可能红豆靠南侧,绿豆靠北侧,我们像劈柴一样从上至下即可劈开(分开),也一样是线性可分。\n\n5. 如果三维还不能线性可分,那就升到思维\n\n**总会从某一个维度开始,它变成线性可分了,即只要不断的增加维度(特征)总能区分开来**\n\n同时,我们发现高维中的超平面,映射到低维空间中时,可能会变成曲线或其它的划分形式。\n\n这也就是为什么,在SVM中我们同样使用超平面来划分,SVM可以划分非线性的数据集。\n\n它本质上仍然是线性超平面,不过是高维中的线性超平面。\n\n\n\n**那么升维一定会线性可分吗?**\n\n**会不会升到无穷维了仍然线性不可分?**\n\n答案是不会的,首先要明白,我们的数据集一定是基于真实的某种分布,分为A类的样本和B类的一定在本质上有区别。只要有区别,就一定可以区分开来,一定在某个高维度上线性可分。\n\n**另外,总能上升到有个维度空间中线性可分,无限上升的同时有没有可能在N+1维度又不可分了?**\n\n不会,随着维度的上升,我们获得的信息越来越多。当第N维的数据已经足够划分时,更多的信息量并不会出现又不可分的情况。\n\n\n\n### 总结\n\nSummarization\n\n1. SVM使用间隔最大化思想构造最优超平面。\n2. 构造出来的超平面使得其与最近的点的距离最大。\n3. SVM也可划分非线性数据集。\n4. 它通过高维中的线性超平面再低维中的投影来完成非线性的划分。因此从直观上来讲,我们的模型必定有一个升维的操作。\n5. 这是总体的概念。\n\n\n\n### 支持向量机\n\nSupport vector machines\n\n函数间隔:\n\n几何间隔:\n\n这里使用的是几何间隔,前面讲到这里就不重复了\n\n\n\n**最大间隔分离超平面:**\n\n目前讲的是线性超平面\n\n\n\n> 图中心,虚线到实线的距离我们称之为γ,我们要做的是最大化γ,使得这个超平面调整为γ的一个最大值,等价于找到了最优的超平面\n\n**式子如下:**\n\n\n\n> γ:表示几何间隔\n>\n> s.t. 表示约束\n>\n> yi:正负1。保证算出来的数始终是大于0的,如上图中“圈”表示+1的样本“×”表示负一的样本,那么某个“圈”是正数,乘以上方的yi(正数),正正得正,某个点“×”是负数,乘以下方的yi(负数),负负得正。\n\n**简化上方的式子:**\n\n我们知道max的γ是几何间隔,那么我们变换一下\n$$\n\\frac{\\hat{γ}}{||w||} 其中,\\hat{γ}表示函数间隔,除以w二范数就是几何间隔了。\n$$\n\n$$\n既然我们最终是\\frac{\\hat{γ}}{||w||} ,那么式子我们可以简化成\n$$\n\n\n\nmax的时候是几何间隔,也就是最终s.t. 还是会约束着它朝着几何间隔去走,但是这样的好处就是下方的||w||就没有了\n\n**简化后如下:**\n\n\n\n之前我们说过,对于函数间隔,我们等比例放大缩小w、b可以让最终结果变成1,也就是γ=1\n\n**再简化后:**\n\n\n$$\n后面要用到拉格朗日乘子法,我们把\\frac{1}{||w||}变成\\frac{1}{2}||w||^2,这两者是等价的\n$$\n\n**再简化后:**\n\n\n$$\n\\min_{w,b} \\quad \\frac{1}{2}||w||^2\n$$\n\n$$\ns.t.\\quad y_i(w*x_i+b)-1≥0 \\quad i=1,2,...,N\n$$\n\n> 利用拉格朗日乘子法,推导成如下式子\n\n\n$$\nL(w,b,α)=\\quad \\frac{1}{2}||w||^2-\\sum^N_{i=1}α_iy_i(w*x_i+b)+\\sum^N_{i=1}α_i\n$$\n\n$$\n目标:\\min_{w,b}\\max_aL(w,b,α)\n$$\n\n$$\n转换成:\\max_a\\min_{w,b}L(w,b,α)\n$$\n\n\n\n将拉格朗日函数L(w,b,α)分别对w,b求偏导并令其等于0\n\n\n\n进行推导\n\n\n\n\n\n2.求minL(w,b,α)对α的极大,即是对偶问题\n\n3.求max转换成min:\n\n\n\n接下来就是求解α的问题了,但是我们还得解决另外的一个问题\n\n\n\n### 软间隔最大化\n\nMaximum soft interval\n\n\n\n如上图所示,圈和叉表示两个不同的类别,但有的圈那边有叉,有的叉那边也有圈,怎么解决呢\n\n\n\n> 加入ξ松弛变量,即允许有一些交叉的类别样本\n\n\n\n> 变成如上式子,但同时我们也对ξ约束,防止它是过大的\n\n\n\n\n\n最终的结果为:\n\n\n\n\n\n\n\n这样接下来的问题就只剩求解α了\n\n\n\n### 核函数\n\nKernel function\n\n**现在到了这里:**\n\n\n\n\n\n\n\n**目前的问题:**\n\n式子中间有xi核xj的点积\n\n例如在手写数字数据集中,训练集有6万个样本,6万乘6万勉强能接受\n\n但如果每个样本有784维,6万样本两两做点积,是非常慢的。如果x是更高的维度呢?\n\n**梳理一下:**\n\n1. 由于公式的需要,我们需要计算xi和xj的点积\n2. 此外,我们需要将样本映射到高维去,加入映射函数ø(x),那么ø(xi)和ø(xj)的维度数目进一步扩大,它们的点积会让运算变得极其复杂\n3. 我们希望存在一个函数K(xi,yi)=ø(xi)×ø(xj),但函数K的计算方式更简单。也就是说,我们将样本通过函数升维得到ø(xi)和ø(xj),接下来要计算它们的点积,能不能有个简单的计算公式,计算出来的结果和ø(xi)×ø(xj)一样?那样我们就不用再去算ø(xi)和ø(xj)的结果了,直接用简单方式计算不是更好吗?\n\n这个简便方式,就是核函数\n\n**在SVM中,我们通常使用高斯核:**\n\n\n\n 在计算x和z的点积时,直接用这个公式替代就好了\n\n\n\n### 序列最小最优化算法(SMO)\n\nSequetial minimal optimization\n\n之前我们还剩下α求解,我们用SMO\n\n**我们最后求解出来的α,一定是让整个结果满足KKT条件的。如果不满足,那一定不是最优解。**\n\n**所以我们可以不断地调整α的值,直到所有α都满足KKT条件,这是我们一定能得到最优解。**\n\n**怎么调整呢?——用SMO**\n\n假设整个式子中有N个α(α1,α2,α3,...,αN),先固定其它α,找α1,先让α1满足KKT条件。但如果固定除α1以外的所有α,等于也固定了α1.\n\n\n\n所以我们每次选择优化两个α\n\n\n\n进一步,原式中目前hi有α1和α2两个变量,我们将其它作为常数去除。\n\n\n\n整理如下:\n\n\n\n\n\n目前可知,α1一定在0和C之间\n\n\n\n\n\n\n\n\n\n**最终得到结果:**\n\n\n\n其中,L与H是所在对角线段端点的界,如果y1≠y2(如下左图所示),则\n\n\n\n如果y1=y2(如下右图所示),则\n\n\n\n\n\n现在对于两个α,我们已经知道该怎么优化了。那么怎么从总舵的α中挑选两个合适的来进行优化呢?\n\n——找到**违反KKT最严重**的。\n\n**第一个变量的选择:**\n\n\n\n**第二个变量的选择:**\n\n在第一个变量步子迈大的情况下,再找一个步子迈得最大的α。它的量化方式|E1-E2|\n\n\n\n两个α都找到一个,进行优化,然后找下一对α,直到再也找不到不违反KKT的α为止。\n\n### 总结\n\nSummary\n\n1. SVM首先从最大间隔出发,设计了可构造最优超平面的线性模型。\n2. 考虑到存在噪音或有部分点让人很为难,添加了软间隔。变成了具有软间隔功能的线性模型。\n3. 通过对数据的升维,使得模型变成了非线性。可以用于非线性数据集。\n4. 升维后无穷维的点积运算难以实现,引入了核函数简化运算。"
},
{
"alpha_fraction": 0.550561785697937,
"alphanum_fraction": 0.7845104336738586,
"avg_line_length": 18.310077667236328,
"blob_id": "b9e4dee0a710e11b525e6ca174c8fc8e4767da07",
"content_id": "a4d56b96309cf829e7469974316a9b2a65f14236",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4588,
"license_type": "permissive",
"max_line_length": 129,
"num_lines": 129,
"path": "/深度学习入门/第二章——走进深度学习的世界 神经网络模型.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "### 第二章——走进深度学习的世界 神经网络模型\n\n#### 反向传播计算方法\n\n简单的例子:\n\n\n\n如何让 f 值更小,就是改变x、y、z,而损失函数也是这样,那么我们分别求偏导,则能得出每个值对结果的影响\n\n**链式法则**\n\n- 梯度是一步一步传的\n\n \n\n复杂的例子:\n\n\n\n\n\n#### 神经网络整体架构\n\n类生物神经元\n\n\n\n> 左半边是生物学上的神经元,右半边是数学上的“神经元”,可以说是非常像。\n\n整体架构\n\n\n\n- input layer输入层:比如输入X,有多少个x即有多少个input,比如前面的猫有3千多像素点,那么就有3千多个“圈”进行input。\n\n- hidden layer 1:指将X做了某些变换,且每个圈与前者的全部圈都连接,即是全连接,为什么多了1个圈,是表示可能会在原始特征的基础上做变换,变成4个特征。具体如:假设X输入的是年龄,第一圈表示对年龄做平方,第二个圈表示将年龄与其它值相加相乘等等。\n- W1:input是3个,hidden layer 1是4个,那么夹在中间的W1就是[3,4]的权重矩阵。\n\n- hidden layer 2:指在1的基础上再进行变换,防止如果hidden layer 1的效果不好,那么加多一层,进行再加工。\n- W2:hidden layer 1是4个,hidden layer 2是4个,那么夹在中间的W2就是[4,4]的权重矩阵。\n\n- output layer:输出结果。\n- W3:hidden layer 2是4个,output layer 2是1个,那么夹在中间的W3就是[4,1]的权重矩阵。\n\n整体大致公式:\n\n- 基本架构:\n- 继续堆叠一层:\n\n- 神经网络的强大之处在于,用更多的参数来拟合复杂的数据\n\n\n\n#### 神经元个数对结果的影响\n\n<https://cs.stanford.edu/people/karpathy/convnetjs/demo/classify2d.html>\n\n越多的神经元个数,切分的越明显\n\n如1个神经元:\n\n\n\n> 目的是区分绿色和红色的点,当只有1个神经元时,可以明显看到类似一刀切。\n\n如10个神经元:\n\n\n\n> 可以看到已经切分的非常明显了\n\n另外:在机器学习中,如果数据是完全随机的情况,模型是无法分辨的,但神经网络可以。如下图\n\n\n\n这就是神经网络的强大之处,越多神经元区分的越明显,不过也可能存在过拟合,因为太强大了。\n\n\n\n#### 正则化与激活函数\n\n- 惩罚力度对结果的影响:\n\n\n\n> 惩罚力度过小(左图),导致的结果是过拟合,有几个红色点明明应该更靠近绿色也被评定为红色。这些一般是训练集的情况,有标签能学到,但是在测试集可能就是灾难了。随着lambda的增大,切分的会相对平滑。\n\n- 参数个数对结果的影响:\n\n\n\n> 同样,神经元个数越多,也越容易过拟合\n\n- 激活函数\n\n\n\n> 做非线性变换,如Sigmoid、Relu、Tanh等\n\n激活函数对比\n\n- Sigmoid:\n\n > 其缺点是,靠两边的线过于平缓,无法计算梯度或者约等于0,那么值就不会进行更新或者前向传播,而我们恰恰需要传播来更新我们的W值(前面讲到)\n\n- Relu:\n\n > 市面上绝大多数神经网络用的激活函数,这个是绝对会有梯度,不会出现梯度消失。\n\n\n\n#### 神经网络过拟合解决方法\n\n- 不同的预处理结果会使模型的结果发生很大的差异:\n\n \n\n > 如常见的标准化\n\n- 参数初始化:通常使用随机策略进行参数初始化\n\n \n\n- DROP-OUT:在神经网络训练过程中,随机去掉部分神经元,以减少神经元的个数,并不是简单的去掉部分,而且每次训练都随机去掉部分。\n\n \n\n > 这样保证每次训练的神经网络都相对简单,每次训练可以DROP-OUT一部分神经元\n\n"
},
{
"alpha_fraction": 0.4898154139518738,
"alphanum_fraction": 0.7660725712776184,
"avg_line_length": 12.837004661560059,
"blob_id": "4aa3caf4bea1b304e35c28992d3215e5b1dc8206",
"content_id": "ea19eb663ca435f8b1beb1dff27f7145abebcaf7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5424,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 227,
"path": "/机器学习算法原理及推导/李航——统计学习方法/6.逻辑斯蒂回归与最大熵.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 6.逻辑斯蒂回归与最大熵\n\n### 知识树\n\nKnowledge tree\n\n\n\n相较前面的算法,性能更好,也更符合工业场景\n\n\n\n### 一个逻辑斯蒂回归回归的故事\n\nA story about the Logistic regression\n\n1. 之前的f(x) = sign(w*x+b)只输出+1和-1,这样的判别方式真的有效吗?\n2. 超平面左侧0.001距离的点和超平面右侧0.001距离的点真的有天壤之别吗?\n\n\n\n> 如上面两个黑点,明明只差分毫,却变成了+1或者-1。这也是感知机的缺陷\n\n**我们想要解决的:**\n\n1. 怎么解决极小距离带来的+1和-1的天壤之别\n2. 怎么让最终的预测式子连续可微\n\n\n\n### 逻辑斯蒂回归\n\nLogistic regression\n\n\n\n\n\n\n\n\n\n> 连续可微\n>\n> 可输出概率\n\n\n\n**参数估计:**\n\n由上面的式子可知,里面参数只有w和x,x为已知的特征,也就是更新w即可\n\n逻辑斯蒂回归模型学习时,对于给定的训练数据集T={(x1,y1), (x2,y2), ...,(xn,yn)},可以应用极大似然估计法估计模型参数,从而得到逻辑斯蒂回归模型。\n\n设:\n\n> Y=1和Y=0相加时为1,所以当Y=1=π(x),那么Y=0就等于1-π(x)\n\n似然函数为\n\n\n\n> 当前的条件做连乘,变换成log则是相加\n\n对数似然函数为\n\n\n\n对L(w)求极大值,得到w的估计值\n\n**似然函数对w求导:**\n\n\n\n\n\n\n\n\n\n### 总结\n\nSummarization\n\n1. 逻辑斯蒂以输出概率的形式解决了极小距离带来的+1和-1的天壤之别,同时概率也可作为模型输出的置信程度。\n2. 逻辑斯蒂使得了最终的模型函数连续可微,训练目标与预测目标达成一致。\n3. 逻辑斯蒂采用了较大似然估计来估计参数。\n\n\n\n### 一个最大熵的小故事\n\nA story about the Maximum entropy model\n\n我们去到拉斯维加斯赌场\n\n问1:我手里有个骰子,问你扔下去后某个面朝上的概率是多少?\n\n答1:都是1/6,因为概率相同\n\n\n\n问2:我竟然认为有道理,可如果是老千手里的骰子呢?你还觉得是1/6吗?\n\n答2:可是你没说是老千手里的\n\n\n\n问3:可是为什么你不去假设可能是老千手里的骰子这种情况?\n\n答3:因为你没说是老千手里的\n\n\n\n问4:好像是这么个道理,如果要考虑老千,那可能还要考虑骰子是否破损,桌面是否有问题\n\n答4:所以1/6最保险\n\n\n\n问5:如果我告诉你,1朝上的概率是1/2呢?\n\n答5:那剩下的就是1/10\n\n\n\n**什么是最大熵?**\n\n在我们猜测概率时,不确定的部分我们认为是等可能的,就像骰子一样,我们知道有6个面,因此认为每个面的概率是1/6,也就是等可能。\n\n换句话说,就是趋向于均匀分布,最大熵使用的就是这么朴素的道理:\n\n\t凡是我们知道的,就把它考虑进去,凡是不知道的,通通均匀分布。\n\n\n\n### 最大熵模型\n\nMaximum entropy model\n\n终极目标:P(Y|X)\n\n熵:\n\n将终极目标代入熵:\n\n\n\n做些改变,调整熵:\n\n\n\n我们手里有训练集,包含所有样本及对应的标签。\n\n\n\n> v表示数目,满足X=x,Y=y的数目\n>\n> 统计出来概率,通过频数\n\n\n\n\n\n**特征函数**\n\n其作用是为了将某个特征x,进行一些转换后,让它和标签y起到重大的相关作用\n\n\n\n特征函数f(x,y)关于经验分布,的期望值:\n\n\n\n特征函数f(x,y)关于模型P(Y|X)与经验分布的期望值:\n\n\n\n> 下面的P表示真实世界中全部数据的分布,即训练集不可能用上全部的数据,一般都是某段时间的,比如N年,所以用表示它是真实的全部时间P中的某段的经验分布\n\n约束:\n\n> 希望训练集的和真实的全部数据是一致的分布\n\n\n\nmax\n\n\n\n\n\n> fi表示让所有的特征都满足约束条件\n\nmin\n\n\n\n\n\n\n\n**拉格朗日乘子法:**\n\n\n\n\n\n> 求最小的值\n\n\n\n\n\n\n\n### 总结\n\nSummarization\n\n1. 最大熵强调不提任何假设,以熵最大为目标。\n\n2. 将终极目标代入熵的公式后,将其最大化。\n\n3. 在训练集中寻找现有的约束,计算期望,将其作为约束。\n\n 使用拉格朗日乘子发得到P(y|x),之后使用优化算法得到P(y|x)中的参数w。\n\n"
},
{
"alpha_fraction": 0.6004396080970764,
"alphanum_fraction": 0.6239010691642761,
"avg_line_length": 42.567928314208984,
"blob_id": "7ba531f5264a90bee93ee4d0dd07e2fca7f6b525",
"content_id": "9d34e7eff680a630cd14698ba15c802a961876db",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19644,
"license_type": "permissive",
"max_line_length": 212,
"num_lines": 449,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/DCN蒸馏_12953/dcn_model/process.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport joblib\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom tqdm import tqdm\nfrom pandarallel import pandarallel\nfrom sklearn.model_selection import train_test_split\n# import random\nimport gc\nimport ast\nimport os\nimport sys\nimport warnings\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='3'\nwarnings.filterwarnings('ignore')\npd.options.mode.chained_assignment = None\n#pandarallel.initialize(nb_workers=16)\npandarallel.initialize()\n\n\ndef pandas_list_to_array(df):\n \"\"\"\n Input: DataFrame of shape (x, y), containing list of length l\n Return: np.array of shape (x, l, y)\n \"\"\"\n\n return np.transpose(\n np.array(df.values.tolist()),\n (0, 2, 1)\n )\n\n\ndef preprocess_inputs(df, cols: list):\n return pandas_list_to_array(\n df[cols]\n )\n\n\ndef append_all_data(files_list, file_head_path):\n \"\"\"\n concat all the data\n :param files_list: the name of data\n :param file_head_path: the path of data\n :return: DataFrame of data for all\n \"\"\"\n data_all_path = file_head_path + files_list[0]\n data_all = pd.read_csv(data_all_path)\n data_all = data_all.head(0)\n try:\n del data_all['Unnamed: 0']\n except KeyError as e:\n pass\n # 循环添加全部数据\n for i in files_list:\n data_path = file_head_path + i\n print(\"当前文件为:\", data_path)\n data = pd.read_csv(data_path)\n try:\n del data['Unnamed: 0']\n except KeyError as e:\n pass\n data_all = data_all.append(data)\n return data_all\n\n\ndef file_name(file_dir):\n files_list = []\n for root, dirs, files in os.walk(file_dir):\n # print(\"success\")\n for name in files:\n files_list.append(name)\n return files_list\n\n\ndef load_data(making_data_dir, link_data_dir, cross_data_dir, link_data_other_dir, head_data_dir, \n win_order_data_dir, pre_arrival_sqe_dir,zsl_link_data_dir, arrival_data_dir=None, zsl_arrival_data_dir=None, arrival_sqe_data_dir=None):\n \"\"\"\n loading three path of data, then merge them\n :return: all data by order_level\n \"\"\"\n print('-------------LOAD DATA for mk_data----------------')\n mk_list = file_name(making_data_dir)\n mk_list.sort()\n mk_data = append_all_data(mk_list, making_data_dir)\n #mk_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/join_20200825.csv') # for test running\n mk_data['date_time'] = mk_data['date_time'].astype(str)\n # print(mk_data['date_time'].head())\n mk_data['dayofweek'] = pd.to_datetime(mk_data['date_time'])\n mk_data['dayofweek'] = mk_data['dayofweek'].dt.dayofweek + 1\n weather_le = LabelEncoder()\n mk_data['weather_le'] = weather_le.fit_transform(mk_data['weather'])\n print('Remove the wk2_ and m1_')\n del_cols = []\n mk_cols = mk_data.columns.tolist()\n for i in range(len(mk_cols)):\n if 'wk2_' in mk_cols[i]:\n del_cols.append(mk_cols[i])\n if 'm1_' in mk_cols[i]:\n del_cols.append(mk_cols[i])\n if 'ratio' in mk_cols[i]:\n del_cols.append(mk_cols[i])\n del_cols = del_cols + ['weather', 'driver_id', 'date_time_dt', 'link_time_sum','date_time_sum']\n print('*-' * 40, 'Will be drop the list:', del_cols)\n mk_data.drop(columns=del_cols, axis=1, inplace=True)\n print('The init shape of mk_data:', mk_data.shape)\n #if arrival_data_dir:\n # mk_data, _ = train_test_split(mk_data, test_size=0.4, random_state=42)\n #print('*-'*40)\n #print('The train_test_split shape of mk_data:', mk_data.shape)\n\n \n print('-------------LOAD WIN DATA----------------')\n win_order_list = file_name(win_order_data_dir)\n win_order_list.sort()\n win_order_data = append_all_data(win_order_list, win_order_data_dir)\n #win_order_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/win_for_slice_20200825.csv') # for test running\n del_win_order_cols = []\n win_order_cols = win_order_data.columns.tolist()\n for i in range(len(win_order_cols)):\n if 'last_wk_lk_current' in win_order_cols[i]:\n del_win_order_cols.append(win_order_cols[i])\n #if 'distance' in win_order_cols[i]:\n # del_win_order_cols.append(win_order_cols[i])\n #if '1_percent' in win_order_cols[i]:\n # del_win_order_cols.append(win_order_cols[i])\n #if '0_percent' in win_order_cols[i]:\n # del_win_order_cols.append(win_order_cols[i])\n del_win_order_cols = del_win_order_cols + ['slice_id', 'date_time']\n win_order_data.drop(columns=del_win_order_cols, axis=1, inplace=True)\n print('win_order_data.shape',win_order_data.shape)\n mk_data = pd.merge(mk_data, win_order_data, how='left', on='order_id')\n print('mk_data.shape',mk_data.shape)\n del win_order_data\n gc.collect()\n\n\n \"\"\" \n print('-------------LOAD ZSL DATA----------------')\n zsl_link_list = file_name(zsl_link_data_dir)\n zsl_link_list.sort()\n zsl_link_data = append_all_data(zsl_link_list, zsl_link_data_dir)\n #zsl_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_train_link/link_fea_order_id_level_20200825.csv') # for test running\n get_zsl_link_cols = []\n zsl_link_cols = zsl_link_data.columns.tolist()\n for i in range(len(zsl_link_cols)):\n if 'eb' in zsl_link_cols[i]:\n get_zsl_link_cols.append(zsl_link_cols[i])\n #print(get_zsl_link_cols)\n get_zsl_link_cols.insert(0, 'order_id')\n print(zsl_link_data.shape)\n zsl_link_data = zsl_link_data[get_zsl_link_cols]\n print('mk_data.shape',mk_data.shape)\n mk_data = pd.merge(mk_data, zsl_link_data, on='order_id')\n print('mk_data.shape',mk_data.shape)\n del zsl_link_data\n gc.collect()\n \"\"\"\n \"\"\"\n #zsl_cross_list = file_name(zsl_cross_data_dir)\n #zsl_cross_list.sort()\n #zsl_cross_data = append_all_data(zsl_cross_list, zsl_cross_data_dir)\n zsl_cross_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_train_cross_0703/cross_fea_order_id_level_20200825.csv') # for test running\n get_zsl_cross_cols = []\n zsl_cross_cols = zsl_cross_data.columns.tolist()\n for i in range(len(zsl_cross_cols)):\n if ('last' or 'div' or 'interval' or 'period') in zsl_cross_cols[i]:\n get_zsl_cross_cols.append(zsl_cross_cols[i])\n get_zsl_cross_cols.append('order_id')\n print(zsl_cross_data.shape)\n zsl_cross_data = zsl_cross_data[get_zsl_cross_cols]\n print('mk_data.shape',mk_data.shape)\n mk_data = pd.merge(mk_data, zsl_cross_data, on='order_id')\n print('mk_data.shape',mk_data.shape)\n del zsl_cross_data\n gc.collect()\n \"\"\"\n \n print('-------------LOAD HEAD DATA----------------')\n head_list = file_name(head_data_dir)\n head_list.sort()\n head_data = append_all_data(head_list, head_data_dir)\n #head_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_head_link_data_clear/head_link_20200825.csv') # for test running\n get_head_cols = ['len_tmp','status_0','status_1','status_2','status_3','status_4','rate_0','rate_1','rate_2','rate_3','rate_4']\n get_head_cols.insert(0, 'order_id')\n print('head_data.shape:',head_data.shape)\n head_data = head_data[get_head_cols]\n print('mk_data.shape',mk_data.shape)\n mk_data = pd.merge(mk_data, head_data, how='left', on='order_id')\n print('mk_data.shape',mk_data.shape)\n del head_data\n gc.collect()\n \n print('-------------LOAD DATA for link_data----------------')\n link_list = file_name(link_data_dir)\n link_list.sort()\n link_data = append_all_data(link_list, link_data_dir)\n # for test running\n #link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/sqe_20200825_link.txt')\n print('The init shape of link_data:', link_data.shape)\n\n \n print('-------------LOAD DATA for arrival_sqe_data----------------')\n arrival_sqe_list = file_name(pre_arrival_sqe_dir)\n arrival_sqe_list.sort()\n arrival_sqe_data = append_all_data(arrival_sqe_list, pre_arrival_sqe_dir)\n #arrival_sqe_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/20200825.csv') # for test running\n del arrival_sqe_data['slice_id']\n arrival_cols = arrival_sqe_data.columns.tolist()\n new_arrival_cols = ['future_'+i for i in arrival_cols if i != 'order_id']\n new_arrival_cols.insert(0, 'order_id')\n arrival_sqe_data.columns = new_arrival_cols\n print('The init shape of arrival_sqe_data:', arrival_sqe_data.shape)\n link_data = pd.merge(link_data, arrival_sqe_data, how='left', on='order_id')\n del arrival_sqe_data\n gc.collect()\n \n \"\"\"\n print('-------------LOAD DATA for arrival_link_data----------------')\n arrival_link_list = file_name(pre_arrival_data_dir)\n arrival_link_list.sort()\n arrival_link_data = append_all_data(arrival_link_list, pre_arrival_data_dir)\n #arrival_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/final_pre_arrival_data/sqe_20200825_link.txt') # for test running\n print('The init shape of arrival_link_data:', arrival_link_data.shape)\n link_data = pd.merge(link_data, arrival_link_data, how='left', on='order_id')\n del arrival_link_data\n gc.collect()\n \"\"\"\n\n \"\"\"\n print('-------------LOAD DATA for h_s_link_data----------------')\n h_s_link_list = file_name(h_s_for_link_dir)\n h_s_link_list.sort()\n h_s_link_data = append_all_data(h_s_link_list,h_s_for_link_dir)\n #h_s_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_hightmp_slice_for_link_eb/20200825_link.txt') # for test running\n h_s_link_data = h_s_link_data[['order_id', 'sqe_slice_id', 'sqe_hightemp', 'sqe_weather_le']]\n print('The init shape of h_s_link_data:', h_s_link_data.shape)\n link_data = pd.merge(link_data, h_s_link_data, how='left', on='order_id')\n del h_s_link_data\n gc.collect()\n \"\"\"\n print('-------------LOAD DATA for link_data_other----------------')\n link_list_other = file_name(link_data_other_dir)\n link_list_other.sort()\n link_data_other = append_all_data(link_list_other, link_data_other_dir)\n #link_data_other = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_link_sqe_for_order_other/sqe_20200825_link.txt') # for test running\n print('The init shape of link_data_other:', link_data_other.shape)\n\n link_data = pd.merge(link_data, link_data_other, on='order_id')\n # print(link_data.head(0))\n # del link_data['lk_t_sub_by_min']\n del_link_cols = ['lk_t_sub_by_min','lk_t_sub_by_q50', 'lk_t_sub_by_min', 'total_linktime_std']\n # 'future_pre_arrival_status', 'future_arrive_slice_id'] # 'future_arrive_slice_id'\n link_data.drop(columns=del_link_cols, axis=1, inplace=True)\n print('The merge shape of link_data:', link_data.shape)\n del link_data_other\n gc.collect()\n\n print('-------------LOAD DATA for link_data_arrival----------------')\n if arrival_sqe_data_dir==None:\n pass\n else:\n link_list_arrival = file_name(arrival_sqe_data_dir)\n link_list_arrival.sort()\n link_data_arrival = append_all_data(link_list_arrival, arrival_sqe_data_dir)\n #link_data_arrival = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_lk_arrival_sqe_for_order/sqe_20200825_link.txt') # for test running\n print('The init shape of link_data_arrival:', link_data_arrival.shape)\n link_data = pd.merge(link_data, link_data_arrival, on='order_id')\n print('The merge shape of link_data:', link_data.shape)\n del link_data_arrival\n gc.collect()\n\n link_cols_list = ['link_id', 'link_time', 'link_current_status', 'pr',\n 'dc', 'link_arrival_status', 'future_pre_arrival_status', 'future_arrive_slice_id']\n\n data = pd.merge(mk_data, link_data, how='left', on='order_id')\n del mk_data\n del link_data\n gc.collect()\n\n print('-------------LOAD DATA for arrival_data----------------')\n if arrival_data_dir==None:\n pass\n else:\n arrival_list = file_name(arrival_data_dir)\n arrival_list.sort()\n arrival_data = append_all_data(arrival_list, arrival_data_dir)\n #arrival_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_link_sqe_for_order_arrival/sqe_20200825_link.txt')\n arrival_cols = ['order_id', 'lk_arrival_0_percent', 'lk_arrival_1_percent','lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']\n #print(arrival_data.head(2))\n data = pd.merge(data, arrival_data, how='left', on='order_id')\n del arrival_data\n gc.collect()\n \n print('-------------LOAD DATA for zsl_arrival_data----------------')\n if zsl_arrival_data_dir==None:\n pass\n else:\n zsl_arrival_list = file_name(zsl_arrival_data_dir)\n zsl_arrival_list.sort()\n zsl_arrival_data = append_all_data(zsl_arrival_list, zsl_arrival_data_dir)\n #zsl_arrival_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_arrival/link_fea_arrive_order_id_level_20200818.csv')\n zsl_arrival_cols = zsl_arrival_data.columns.tolist()\n zsl_arrival_cols.remove('order_id')\n #print(zsl_arrival_data.head(2))\n data = pd.merge(data, zsl_arrival_data, how='left', on='order_id')\n del zsl_arrival_data\n gc.collect()\n\n print('-------------LOAD DATA for cross_data----------------')\n cross_list = file_name(cross_data_dir)\n cross_list.sort()\n cross_data = append_all_data(cross_list, cross_data_dir)\n # for test running\n #cross_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/sqe_20200825_cross.txt')\n del_cross_cols = ['cr_t_sub_by_min', 'cr_t_sub_by_q50', 'total_crosstime_std']\n cross_data.drop(columns=del_cross_cols, axis=1, inplace=True)\n cross_cols_list = ['cross_id', 'cross_time']\n print('The init shape of cross_data:', cross_data.shape)\n\n data = pd.merge(data, cross_data, how='left', on='order_id')\n del cross_data\n gc.collect()\n # data['cross_id'] = data['cross_id'].str.replace('nan','0')\n # print('working..............................')\n\n mk_cols_list = data.columns.tolist()\n remove_mk_cols = ['order_id', 'slice_id', 'hightemp', 'lowtemp', 'weather_le', 'dayofweek', 'date_time', 'ata', 'link_arrival_status']\n mk_cols_list = list(set(mk_cols_list) - set(remove_mk_cols))\n mk_cols_list = list(set(mk_cols_list) - set(link_cols_list))\n mk_cols_list = list(set(mk_cols_list) - set(cross_cols_list))\n if arrival_data_dir==None:\n pass\n else:\n mk_cols_list = list(set(mk_cols_list) - set(arrival_cols))\n mk_cols_list = list(set(mk_cols_list) - set(zsl_arrival_cols))\n print('lenght of mk_cols_list', len(mk_cols_list))\n print('*-' * 40)\n print('The finish shape of data is:', data.shape)\n\n return data, mk_cols_list, link_cols_list, cross_cols_list\n\n\ndef processing_data(data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS, is_test=False):\n \"\"\"\n fix data, ast.literal_eval + StandardScaler + train_test_split\n :return: train_data, val_data, test_data\n \"\"\"\n #print('Now, Starting parallel_apply the arrival_status..................')\n #for i in tqdm(['link_arrival_status']):\n # data[i] = data[i].parallel_apply(ast.literal_eval)\n print('Now, Starting parallel_apply the link..................')\n for i in tqdm(link_cols_list):\n data[i] = data[i].parallel_apply(ast.literal_eval)\n gc.collect()\n print('Now, Starting parallel_apply the cross..................')\n for i in tqdm(cross_cols_list):\n data[i] = data[i].parallel_apply(ast.literal_eval)\n data = data.fillna(0)\n\n # train, val\n if is_test is True:\n print('is_test is True')\n ss = joblib.load('../model_h5/ss_scaler')\n ss_cols = mk_cols_list + WIDE_COLS\n data[ss_cols] = ss.transform(data[ss_cols])\n return data\n else:\n ss_cols = mk_cols_list + WIDE_COLS\n ss = StandardScaler()\n ss.fit(data[ss_cols])\n data[ss_cols] = ss.transform(data[ss_cols])\n joblib.dump(ss, '../model_h5/ss_scaler')\n print('is_test is False')\n return data\n\n\ndef processing_inputs(data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS, arrival=True):\n \"\"\"\n change the data for model\n :return:\n \"\"\"\n print('*-'*40, processing_inputs)\n if arrival:\n mk_cols_list = mk_cols_list + ['lk_arrival_0_percent', 'lk_arrival_1_percent','lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']\n mk_cols_list = mk_cols_list + ['zsl_link_arrival_status_mean','zsl_link_arrival_status_nunique','zsl_link_arrival_status0','zsl_link_arrival_status1','zsl_link_arrival_status2','zsl_link_arrival_status3']\n if 'lk_arrival_0_percent' in mk_cols_list:\n print('The lk_arrival_0_percent in the mk_cols_list')\n #print('*-' * 40, 'EXIT')\n #sys.exit(0)\n print('111'*40, 'HAVE FEATURES OF ARRIVAL')\n else:\n print('222'*40, 'HAVENOT FEATURES OF ARRIVAL')\n if 'ata' in mk_cols_list:\n print('The ata in the mk_cols_list')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n if 'ata' in link_cols_list:\n print('The ata in the link_cols_list')\n if 'ata' in cross_cols_list:\n print('The ata in the cross_cols_list')\n if 'ata' in WIDE_COLS:\n print('The ata in the WIDE_COLS')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n data_link_inputs = preprocess_inputs(data, cols=link_cols_list)\n data.drop(columns=link_cols_list, axis=1, inplace=True)\n gc.collect()\n print('drop the link_cols_list')\n # print(data_link_inputs[:, :, :1])\n # data['cross_id'] = data['cross_id'].str.replace('nan','0')\n data_cross_inputs = preprocess_inputs(data, cols=cross_cols_list)\n data.drop(columns=cross_cols_list, axis=1, inplace=True)\n gc.collect()\n print('drop the cross_cols_list')\n\n data_deep_input = data[mk_cols_list]\n data_wide_input = data[WIDE_COLS].values\n data_inputs_slice = data['slice_id'].values\n data_labels = data['ata']\n if arrival:\n arrival_col = ['lk_arrival_0_percent', 'lk_arrival_1_percent',\n 'lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']\n data_arrival = data[arrival_col]\n print('*-'*40, 'data_arrival', data_arrival.shape)\n return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels, data_arrival\n else:\n return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels\n\n\ndef split_col(data, columns, fillna=None):\n '''拆分成列\n\n :param data: 原始数据\n :param columns: 拆分的列名\n :type data: pandas.core.frame.DataFrame\n :type columns: list\n '''\n for c in columns:\n new_col = data.pop(c)\n max_len = max(list(map(lambda x:len(x) if isinstance(x, list) else 1, new_col.values))) # 最大长度\n new_col = new_col.apply(lambda x: x+[fillna]*(max_len - len(x)) if isinstance(x, list) else [x]+[fillna]*(max_len - 1)) # 补空值,None可换成np.nan\n new_col = np.array(new_col.tolist()).T # 转置\n for i, j in enumerate(new_col):\n data[c + str(i)] = j\n return data\n\ndef list_to_np(x):\n return np.array(x)\n\n\n"
},
{
"alpha_fraction": 0.6080156564712524,
"alphanum_fraction": 0.6532257795333862,
"avg_line_length": 24.658308029174805,
"blob_id": "b5b1d8b5113debdf724e8d3d035a08df54cefd67",
"content_id": "61751eab90153d681341729eb31ca4aa76055ffa",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 9637,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 319,
"path": "/NLP通用框架BERT项目实战/第三章——基于BERT的中文情感分析实战.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "### 第三章——基于BERT的中文情感分析实战\n\n#### 任务介绍\n\n对中文进行分类demo,分成0/1/2\n\n\n\n\n\n> 可以理解为0是一般,1是好,2是差。模型代码、数据集都在我的网盘里,链接:https://pan.baidu.com/s/18vPGelYCXGqp5OCWZWz36A \n> 提取码:de0f\n\n我们使用的是Google官方开源的中文BERT预训练模型\n\n\n\n> vocab.txt里把常用的中文基本覆盖了\n\n\n\n#### 读取处理自己的数据集\n\n~~~python\nclass MyDataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n~~~\n\n> 这是完全照搬class DataProcessor的类,只是类名改成MyDataProcessor\n\n\n\n**读取数据的类get_train_examples**\n\n~~~python\nclass MyDataProcessor(DataProcessor):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n file_path = os.path.join(data_dir, 'train_sentiment.txt')\n f = open(file_path, 'r', encoding='utf-8') # 读取数据,并指定中文常用的utf-8\n train_data = []\n index = 0 # ID值\n for line in f.readlines(): # 参考XnliProcessor\n guid = \"train-%d\" % index\n line = line.replace('\\n', '').split('\\t') # 处理换行符,原数据是以tab分割\n text_a = tokenization.convert_to_unicode(str(line[1])) # 第0位置是索引,第1位置才是数据,可以查看train_sentiment.txt\n label = str(line[2]) # 我们的label里没有什么东西,只有数值,所以转字符串即可\n train_data.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) # 这里我们没text_b,所以传入None\n index += 1 # index每次不一样,所以加等于1\n return train_data # 这样数据就读取完成\n~~~\n\n> 参照XnliProcessor\n\n~~~python\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i) # 获取样本ID\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1]) # 获取text_a和b,我们只有a所以把b去掉\n label = tokenization.convert_to_unicode(line[2]) # 获取标签\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) # 把读进来的东西传到InputExample,这个类可以点进去,里面什么都没做,只不过是模板,我们也照着做\n return examples\n~~~\n\n\n\n**获取label**\n\n~~~\n# 也是参考XnliProcessor,把return改成0,1,2即可\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n return [\"0\", \"1\", \"2\"]\n~~~\n\n\n\n**以下是完整的**\n\n~~~python\nclass MyDataProcessor(DataProcessor):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n file_path = os.path.join(data_dir, 'train_sentiment.txt')\n f = open(file_path, 'r', encoding='utf-8') # 读取数据,并指定中文常用的utf-8\n train_data = []\n index = 0 # ID值\n for line in f.readlines(): # 参考XnliProcessor\n guid = \"train-%d\" % index\n line = line.replace(\"\\n\", \"\").split(\"\\t\") # 处理换行符,原数据是以tab分割\n text_a = tokenization.convert_to_unicode(str(line[1])) # 第0位置是索引,第1位置才是数据,可以查看train_sentiment.txt\n label = str(line[2]) # 我们的label里没有什么东西,只有数值,所以转字符串即可\n train_data.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) # 这里我们没text_b,所以传入None\n index += 1 # index每次不一样,所以加等于1\n return train_data # 这样数据就读取完成\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n file_path = os.path.join(data_dir, 'test_sentiment.txt')\n f = open(file_path, 'r', encoding='utf-8')\n dev_data = []\n index = 0\n for line in f.readlines():\n guid = \"dev-%d\" % index\n line = line.replace('\\n', '').split('\\t')\n text_a = tokenization.convert_to_unicode(str(line[1]))\n label = str(line[2])\n dev_data.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n index += 1\n return dev_data\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n file_path = os.path.join(data_dir, 'test_sentiment.txt') # 我们直接用验证集来输出结果\n print(file_path)\n f = open(file_path, 'r', encoding='utf-8')\n test_data = []\n index = 0\n for line in f.readlines():\n guid = \"test-%d\" % index\n line = line.replace('\\n', '').split('\\t')\n text_a = tokenization.convert_to_unicode(str(line[1]))\n label = '0' # 这里的label随机使用即可,只是为了传入\n test_data.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n index += 1\n return test_data\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n return [\"0\", \"1\", \"2\"] # 参考XnliProcessor,改成返回0,1,2\n~~~\n\n\n\n#### 训练BERT中文分类模型\n\nmain函数增加运行内容\n\n~~~python\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"xnli\": XnliProcessor,\n 'my':MyDataProcessor, # 这是增加的部分,这样运行参数task_name才能对应上\n }\n~~~\n\n\n\n参数\n\n~~~\n-task_name=my\n-do_train=true\n-do_eval=true\n-data_dir=data\n-vocab_file=../GLUE/BERT_BASE_DIR/chinese_L-12_H-768_A-12/vocab.txt\n-bert_config_file=../GLUE/BERT_BASE_DIR/chinese_L-12_H-768_A-12/bert_config.json\n-init_checkpoint=../GLUE/BERT_BASE_DIR/chinese_L-12_H-768_A-12/bert_model.ckpt\n-max_seq_length=70\n-train_batch_size=32\n-learning_rate=5e-5\n--num_train_epochs=3.0\n-output_dir=my_model\n~~~\n\n> task_name:运行的模块,在main里指定了名字对应的类\n>\n> do_train:是否训练\n>\n> do_eval:是否验证\n>\n> data_dir:数据地址\n>\n> vocab_file:词库表\n>\n> bert_config_file:bert参数\n>\n> init_checkpoint:初始化参数\n>\n> max_seq_length:最长字符限制\n>\n> train_batch_size:训练次数\n>\n> learning_rate:学习率\n>\n> num_train_epochs:循环训练次数\n>\n> output_dir:输出路径\n\n\n\n设置参数完成,run即可\n\n\n\n\n\n最终模型结果\n\n\n\n\n\n\n\n#### 预测结果并输出\n\n进行预测的参数\n\n~~~python\n-task_name=my\n-do_predict=true\n-data_dir=data\n-vocab_file=../GLUE/BERT_BASE_DIR/chinese_L-12_H-768_A-12/vocab.txt\n-bert_config_file=../GLUE/BERT_BASE_DIR/chinese_L-12_H-768_A-12/bert_config.json\n-init_checkpoint=my_model\n-max_seq_length=70\n-output_dir=my_model_predict\n~~~\n\n> init_checkpoint:使用的初始化参数已经是我们训练过的了\n\nRUN完后有如下文件\n\n\n\n打开与原文件对比,是准确的,不过现在是概率,我们转成值\n\n\n\n\n\n添加get_results.py\n\n~~~python\nimport os\nimport pandas as pd\n\n\nif __name__ == '__main__':\n path = \"my_model_predict\"\n pd_all = pd.read_csv(os.path.join(path, \"test_results.tsv\"), sep='\\t', header=None)\n\n data = pd.DataFrame(columns=['polarity'])\n print(pd_all.shape)\n\n for index in pd_all.index:\n neutral_score = pd_all.loc[index].values[0]\n positive_score = pd_all.loc[index].values[1]\n negative_score = pd_all.loc[index].values[2]\n\n if max(neutral_score, positive_score, negative_score) == neutral_score:\n data.loc[index+1] = [\"0\"]\n elif max(neutral_score, positive_score, negative_score) == positive_score:\n data.loc[index+1] = [\"1\"]\n else:\n data.loc[index+1] = [\"2\"]\n\n data.to_csv(os.path.join(path, \"pre_sample.tsv\"), sep='\\t')\n\n~~~\n\n\n\n运行完后,同个目录下会出现pre_sample.tsv文件,对比结果\n\n\n\n> 正确\n\n\n\n至此,我们完成了中文情感分类实战,写了函数训练、验证,并输出预测结果,BERT也算正式使用了起来,给在做的你点个赞👍。"
},
{
"alpha_fraction": 0.5085645318031311,
"alphanum_fraction": 0.556212306022644,
"avg_line_length": 52.83116912841797,
"blob_id": "4c653cc393eb59df81590a4947db7d3a49e6b2d0",
"content_id": "6e30f4548d7eba2b720e5d4a634c6d973e8bd285",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8320,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 154,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/WD_128544/wd_model/main.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport gc\nimport process\nimport wd_model\nimport time\n\n\nRANDOM_SEED = 42\n\n# types of columns of the data_set DataFrame\nWIDE_COLS = [\n 'weather_le', 'hightemp', 'lowtemp', 'dayofweek'\n]\n\nif __name__ == '__main__':\n t1 = time.time()\n print(wd_model.get_available_gpus()) # 返回格式为:['/device:GPU:0', '/device:GPU:1']\n\n # LOAD DATA\n print('*-' * 40, 'LOAD DATA')\n making_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/'\n link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/'\n cross_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/'\n head_link_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_head_link_data_clear/'\n win_order_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/'\n pre_arrival_sqe_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/'\n data_for_driver_xw = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/data_for_driver_xw/'\n downstream_status_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/downstream_status_for_order/'\n data, mk_cols_list, link_cols_list, cross_cols_list = process.load_data(making_data_dir,\n link_data_dir,\n cross_data_dir,\n head_link_dir,\n win_order_data_dir,\n pre_arrival_sqe_dir,\n data_for_driver_xw,\n downstream_status_dir)\n\n # PROCESSING DATA\n print('*-' * 40, 'PROCESSING DATA')\n train_data, val_data = process.processing_data(data, mk_cols_list, link_cols_list, cross_cols_list,\n WIDE_COLS)\n del data\n gc.collect()\n # print(train_data.columns.tolist())\n\n # PROCESSING INPUTS\n print('*-' * 40, 'PROCESSING INPUTS')\n # SAVE LIST\n a = np.array(mk_cols_list)\n np.save('../model_h5/wd_mk_cols_list_0730_5.npy', a)\n a = np.array(link_cols_list)\n np.save('../model_h5/wd_link_cols_list_0730_5.npy', a)\n a = np.array(cross_cols_list)\n np.save('../model_h5/wd_cross_cols_list_0730_5.npy', cross_cols_list)\n pred_cols = ['ata']\n print('*-' * 40, 'PROCESSING INPUTS FOR TRAIN_DATA', train_data.shape)\n train_link_inputs, train_cross_inputs, train_deep_input, train_wide_input, \\\n train_inputs_slice, train_labels = process.processing_inputs(\n train_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)\n del train_data\n gc.collect()\n\n print('*-' * 40, 'PROCESSING INPUTS FOR VAL_DATA', val_data.shape)\n val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, \\\n val_inputs_slice, val_labels = process.processing_inputs(\n val_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)\n del val_data\n gc.collect()\n\n\n # MODEL_INIT\n print('*-' * 40, 'MODEL_INIT')\n deep_col_len, wide_col_len = train_deep_input.shape[1], train_wide_input.shape[1]\n link_nf_size, cross_nf_size = train_link_inputs.shape[2], train_cross_inputs.shape[2]\n link_size = 639877 + 2\n cross_size = 44313 + 2\n slice_size = 288\n # link_seqlen, cross_seqlen = 170, 12 # 已默认\n print(\"link_size:{},link_nf_size:{},cross_size:{},cross_nf_size:{},slice_size:{}\".format(link_size, link_nf_size,\n cross_size, cross_nf_size,\n slice_size))\n print(\"deep_col_len:{}, wide_col_len:{}\".format(deep_col_len, wide_col_len))\n\n model = wd_model.wd_model(link_size, cross_size, slice_size, deep_col_len, wide_col_len,\n link_nf_size, cross_nf_size, conv='conv')\n\n mc, es, lr = wd_model.get_mc_es_lr('0730_5', patience=4, min_delta=1e-4)\n print('*-' * 40, 'MODEL_INIT END')\n # MODEL_FIT\n print('*-' * 40, 'MODEL_FIT_PREDICT')\n history = model.fit(\n [train_link_inputs, train_cross_inputs, train_deep_input, train_wide_input, train_inputs_slice], train_labels,\n validation_data=(\n [val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, val_inputs_slice], val_labels),\n batch_size=2048, # 2048,256\n epochs=100,\n verbose=1,\n callbacks=[es])\n np.save('../model_h5/history_0730_5.npy', history.history)\n model.save_weights(\"../model_h5/wd_model_0730_5.h5\")\n\n del train_link_inputs, train_cross_inputs, train_deep_input, \\\n train_wide_input, train_inputs_slice, train_labels\n del val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, val_inputs_slice, val_labels\n gc.collect()\n\n print('*-' * 40, 'LOAD TEST DATA')\n making_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/order_xt/'\n link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/max_170_link_sqe_for_order/'\n cross_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/cross_sqe_for_order/'\n head_link_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/head_link_data_clear/'\n win_order_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/win_order_xw/'\n pre_arrival_sqe_test_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/sqe_arrival_for_link/'\n data_test_for_driver_xw = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/data_for_driver_xw/'\n downstream_status_test_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/downstream_status_for_order/'\n test_data, _, _, _ = process.load_data(making_data_dir,\n link_data_dir,\n cross_data_dir,\n head_link_dir,\n win_order_test_data_dir,\n pre_arrival_sqe_test_dir,\n data_test_for_driver_xw,\n downstream_status_test_dir)\n\n # PROCESSING DATA\n print('*-' * 40, 'PROCESSING DATA')\n test_data = process.processing_data(test_data, mk_cols_list, link_cols_list, cross_cols_list,\n WIDE_COLS, is_test=True)\n print('*-' * 40, 'PROCESSING INPUTS FOR TEST_DATA', test_data.shape)\n test_link_inputs, test_cross_inputs, test_deep_input, test_wide_input, \\\n test_inputs_slice, test_labels = process.processing_inputs(\n test_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)\n test_pre = test_data[['order_id']]\n del test_data\n gc.collect()\n\n # MODEL_RPEDICT\n print('*-' * 40, 'MODEL_RPEDICT')\n test_pre = test_pre.rename(columns={'order_id': 'id'})\n test_pred = model.predict(\n [test_link_inputs, test_cross_inputs, test_deep_input, test_wide_input, test_inputs_slice],\n batch_size=2048)\n test_pre['test_predict'] = test_pred\n # test_pre['test_predict'] = test_pre['test_predict'].round(0)\n test_pre = test_pre.rename(columns={'test_predict': 'result'}) # 更改列名\n test_pre = test_pre[['id', 'result']]\n print(test_pre.head())\n result_save_path = '../result_csv/submit_w_0730_5.csv'\n print('*-' * 40, 'CSV_SAVE_PATH:', result_save_path)\n test_pre.to_csv(result_save_path, index=0) # 保存\n print('..........Finish')\n t2 = time.time()\n print(\"Total time spent: {:.4f}\".format((t2-t1)/3600))\n"
},
{
"alpha_fraction": 0.601253092288971,
"alphanum_fraction": 0.6223700642585754,
"avg_line_length": 40.43589782714844,
"blob_id": "b449401da9d721c5af6bb2fac7396216b869c20d",
"content_id": "a14cdc05d1cdcbfcf5795c9eb8047f17d0439a3e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12956,
"license_type": "permissive",
"max_line_length": 162,
"num_lines": 312,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/WD_128544/wd_model/process.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nfrom tqdm import tqdm\nfrom pandarallel import pandarallel\nfrom sklearn.model_selection import train_test_split\n# import random\nimport gc\nimport ast\nimport os\nimport warnings\nimport joblib\n\n\nwarnings.filterwarnings('ignore')\npd.options.mode.chained_assignment = None\npandarallel.initialize()\n\n\ndef pandas_list_to_array(df):\n \"\"\"\n Input: DataFrame of shape (x, y), containing list of length l\n Return: np.array of shape (x, l, y)\n \"\"\"\n\n return np.transpose(\n np.array(df.values.tolist()),\n (0, 2, 1)\n )\n\n\ndef preprocess_inputs(df, cols: list):\n return pandas_list_to_array(\n df[cols]\n )\n\n\ndef append_all_data(files_list, file_head_path):\n \"\"\"\n concat all the data\n :param files_list: the name of data\n :param file_head_path: the path of data\n :return: DataFrame of data for all\n \"\"\"\n data_all_path = file_head_path + files_list[0]\n data_all = pd.read_csv(data_all_path)\n data_all = data_all.head(0)\n try:\n del data_all['Unnamed: 0']\n except KeyError as e:\n pass\n # 循环添加全部数据\n for i in files_list:\n data_path = file_head_path + i\n print(\"当前文件为:\", data_path)\n data = pd.read_csv(data_path)\n try:\n del data['Unnamed: 0']\n except KeyError as e:\n pass\n data_all = data_all.append(data)\n return data_all\n\n\ndef file_name(file_dir):\n files_list = []\n for root, dirs, files in os.walk(file_dir):\n # print(\"success\")\n for name in files:\n files_list.append(name)\n return files_list\n\n\ndef load_data(making_data_dir, link_data_dir, cross_data_dir, head_link_dir,\n win_order_data_dir, pre_arrival_sqe_dir, data_for_driver_xw, downstream_status_dir):\n \"\"\"\n loading three path of data, then merge them\n :return: all data by order_level\n \"\"\"\n print('-------------LOAD DATA for mk_data----------------')\n mk_list = file_name(making_data_dir)\n mk_list.sort()\n mk_data = append_all_data(mk_list, making_data_dir)\n #mk_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/join_20200825.csv') # for test running\n mk_data['date_time'] = mk_data['date_time'].astype(str)\n mk_data['dayofweek'] = pd.to_datetime(mk_data['date_time'])\n mk_data['dayofweek'] = mk_data['dayofweek'].dt.dayofweek+1\n\n weather_le = LabelEncoder()\n mk_data['weather_le'] = weather_le.fit_transform(mk_data['weather'])\n mk_data['driver_id'] = mk_data['driver_id'].astype(str)\n\n \"\"\"\n print('-------------LOAD DATA for driver_data----------------')\n driver_list = file_name(data_for_driver_xw)\n driver_list.sort()\n driver_data = append_all_data(driver_list, data_for_driver_xw)\n #driver_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/data_for_driver_xw/driver_20200825_head.txt')\n driver_data = driver_data[['driver_id','date_time','entropy','hour_mean','workday_order','weekend_order']]\n driver_data['date_time'] = driver_data['date_time'].astype(str)\n driver_data['driver_id'] = driver_data['driver_id'].astype(str)\n mk_data = mk_data.merge(driver_data, on=['driver_id', 'date_time'], how='left')\n del driver_data\n \"\"\"\n\n \"\"\"\n print('-------------LOAD DATA for downstream_status_for_order----------------')\n ds_data_list = file_name(downstream_status_dir)\n ds_data_list.sort()\n ds_link_data = append_all_data(ds_data_list, downstream_status_dir)\n #ds_link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/downstream_status_for_order/ds_for_order_20200825.csv')\n mk_data = mk_data.merge(ds_link_data, on=['order_id'], how='left')\n del ds_link_data\n \"\"\"\n\n\n \"\"\"\n print('-------------LOAD DATA for rate_status_for_order----------------')\n #rate_data_list = file_name(rate_status_for_order)\n #rate_data_list.sort()\n #rate_data = append_all_data(rate_data_list, rate_status_for_order)\n rate_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/rate_status_for_order/rate_for_order_20200825.csv')\n mk_data = mk_data.merge(rate_data, on=['order_id'], how='left')\n del rate_data\n \"\"\"\n\n\n print('Remove the wk2_ and m1_ and ratio')\n del_cols = []\n mk_cols = mk_data.columns.tolist()\n for i in range(len(mk_cols)):\n if 'wk2_' in mk_cols[i]:\n del_cols.append(mk_cols[i])\n if 'm1_' in mk_cols[i]:\n del_cols.append(mk_cols[i])\n if 'ratio' in mk_cols[i]:\n del_cols.append(mk_cols[i])\n del_cols = del_cols + ['date_time_mean','weather', 'driver_id', 'date_time_dt', 'link_time_sum','date_time_sum']\n print('*-' * 40, 'Will be drop the list:', del_cols)\n mk_data.drop(columns=del_cols, axis=1, inplace=True)\n print('The init shape of mk_data:', mk_data.shape)\n\n\n print('-------------LOAD WIN DATA----------------')\n win_order_list = file_name(win_order_data_dir)\n win_order_list.sort()\n win_order_data = append_all_data(win_order_list, win_order_data_dir)\n #win_order_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/win_for_slice_20200825.csv') # for test running\n del_win_order_cols = []\n win_order_cols = win_order_data.columns.tolist()\n for i in range(len(win_order_cols)):\n if 'last_wk_lk_current' in win_order_cols[i]:\n del_win_order_cols.append(win_order_cols[i])\n #if 'distance' in win_order_cols[i]:\n # del_win_order_cols.append(win_order_cols[i])\n #if '1_percent' in win_order_cols[i]:\n # del_win_order_cols.append(win_order_cols[i])\n #if '0_percent' in win_order_cols[i]:\n # del_win_order_cols.append(win_order_cols[i])\n del_win_order_cols = del_win_order_cols + ['slice_id', 'date_time']\n win_order_data.drop(columns=del_win_order_cols, axis=1, inplace=True)\n print('win_order_data.shape',win_order_data.shape)\n mk_data = pd.merge(mk_data, win_order_data, how='left', on='order_id')\n print('mk_data.shape',mk_data.shape)\n del win_order_data\n gc.collect()\n\n\n print('-------------LOAD HEAD DATA----------------')\n head_list = file_name(head_link_dir)\n head_list.sort()\n head_data = append_all_data(head_list, head_link_dir)\n #head_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/head_link_data_clear/head_link_20200825.csv') # for test running\n get_head_cols = ['len_tmp','status_0','status_1','status_2','status_3','status_4','rate_0','rate_1','rate_2','rate_3','rate_4']\n get_head_cols.insert(0, 'order_id')\n print('head_data.shape:',head_data.shape)\n head_data = head_data[get_head_cols]\n print('mk_data.shape',mk_data.shape)\n mk_data = pd.merge(mk_data, head_data, how='left', on='order_id')\n print('mk_data.shape',mk_data.shape)\n del head_data\n gc.collect()\n\n\n print('-------------LOAD DATA for link_data----------------')\n link_list = file_name(link_data_dir)\n link_list.sort()\n link_data = append_all_data(link_list, link_data_dir)\n #link_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/sqe_20200825_link.txt') # for test running\n #del_link_cols = ['link_time_sub','link_time_sub_sum','link_time_sub_mean', 'link_time_sub_std','link_time_sub_skew']\n #link_data.drop(del_link_cols, axis=1, inplace=True)\n print('The init shape of link_data:', link_data.shape)\n gc.collect()\n\n\n print('-------------LOAD DATA for arrival_sqe_data----------------')\n arrival_sqe_list = file_name(pre_arrival_sqe_dir)\n arrival_sqe_list.sort()\n arrival_sqe_data = append_all_data(arrival_sqe_list, pre_arrival_sqe_dir)\n #arrival_sqe_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/20200825.csv') # for test running\n del arrival_sqe_data['slice_id']\n del arrival_sqe_data['pre_arrival_status']\n del arrival_sqe_data['arrive_slice_id']\n arrival_cols = arrival_sqe_data.columns.tolist()\n new_arrival_cols = ['future_'+i for i in arrival_cols if i != 'order_id']\n new_arrival_cols.insert(0, 'order_id')\n arrival_sqe_data.columns = new_arrival_cols\n print('The init shape of arrival_sqe_data:', arrival_sqe_data.shape)\n link_data = pd.merge(link_data, arrival_sqe_data, how='left', on='order_id')\n del arrival_sqe_data\n gc.collect()\n link_cols_list = ['link_id', 'link_time', 'link_current_status', 'pr','dc']\n\n\n\n print('-------------LOAD DATA for cross_data----------------')\n cross_list = file_name(cross_data_dir)\n cross_list.sort()\n cross_data = append_all_data(cross_list, cross_data_dir)\n #cross_data = pd.read_csv('/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/sqe_20200825_cross.txt') # for test running\n del_cross_cols = ['cr_t_sub_by_min', 'cr_t_sub_by_q50', 'total_crosstime_std']\n cross_data.drop(columns=del_cross_cols, axis=1, inplace=True)\n print('The init shape of cross_data:', cross_data.shape)\n cross_cols_list = ['cross_id', 'cross_time']\n\n\n data = pd.merge(mk_data, link_data, how='left', on='order_id')\n del mk_data\n del link_data\n gc.collect()\n data = pd.merge(data, cross_data, how='left', on='order_id')\n del cross_data\n gc.collect()\n\n # remove the class type and id and label, for deep inputs\n mk_cols_list = data.columns.tolist()\n remove_mk_cols = ['order_id', 'slice_id', 'hightemp', 'lowtemp', 'weather_le', 'dayofweek', 'date_time', 'ata']\n mk_cols_list = list(set(mk_cols_list) - set(remove_mk_cols))\n mk_cols_list = list(set(mk_cols_list) - set(link_cols_list))\n mk_cols_list = list(set(mk_cols_list) - set(cross_cols_list))\n print('lenght of mk_cols_list', len(mk_cols_list))\n print('*-' * 40)\n print('The finish shape of data is:', data.shape)\n\n return data, mk_cols_list, link_cols_list, cross_cols_list\n\n\ndef processing_data(data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS, is_test=False):\n \"\"\"\n fix data, ast.literal_eval + StandardScaler + train_test_split\n :return: train_data, val_data, test_data\n \"\"\"\n print('Now, Starting parallel_apply the link..................')\n for i in tqdm(link_cols_list):\n data[i] = data[i].parallel_apply(ast.literal_eval)\n print('Now, Starting parallel_apply the cross..................')\n for i in tqdm(cross_cols_list):\n data[i] = data[i].parallel_apply(ast.literal_eval)\n # data = data.fillna(0)\n data.fillna(data.median(),inplace=True)\n ss_cols = mk_cols_list + WIDE_COLS\n \n # train, val\n if is_test is True:\n print('is_test is True')\n ss = joblib.load('../model_h5/ss_scaler')\n data[ss_cols] = ss.transform(data[ss_cols])\n return data\n else:\n ss = StandardScaler()\n ss.fit(data[ss_cols])\n data[ss_cols] = ss.transform(data[ss_cols])\n joblib.dump(ss, '../model_h5/ss_scaler')\n print('is_test is False')\n data['date_time'] = data['date_time'].astype(int)\n print(\"type(data['date_time']):\", data['date_time'].dtype)\n # print('Here train_test_split..................')\n # all_train_data, _ = train_test_split(all_train_data, test_size=0.9, random_state=42)\n print('*-' * 40, 'The data.shape:', data.shape)\n train_data, val_data = train_test_split(data, test_size=0.15, random_state=42)\n train_data = train_data.reset_index()\n val_data = val_data.reset_index()\n del train_data['index']\n del val_data['index']\n return train_data, val_data\n\n\ndef processing_inputs(data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS):\n \"\"\"\n change the data for model\n :return:\n \"\"\"\n if 'ata' in mk_cols_list:\n print('The ata in the mk_cols_list')\n if 'ata' in link_cols_list:\n print('The ata in the link_cols_list')\n if 'ata' in cross_cols_list:\n print('The ata in the cross_cols_list')\n if 'ata' in WIDE_COLS:\n print('The ata in the WIDE_COLS')\n #link_cols_list = ['link_id', 'link_time','link_id_count','pr','dc',\n # 'top_a','link_current_status','link_ratio']\n #cross_cols_list = ['cross_id', 'cross_time']\n data_link_inputs = preprocess_inputs(data, cols=link_cols_list)\n data_cross_inputs = preprocess_inputs(data, cols=cross_cols_list)\n data_deep_input = data[mk_cols_list].values\n data_wide_input = data[WIDE_COLS].values\n data_inputs_slice = data['slice_id'].values\n # print('--------------------------------test, ', min(data['slice_id'].values.tolist()))\n data_labels = data['ata'].values\n\n return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels\n"
},
{
"alpha_fraction": 0.5477213859558105,
"alphanum_fraction": 0.5802789330482483,
"avg_line_length": 56.225059509277344,
"blob_id": "1b0d3343f30f7566bf1b26141635bbe4d15fa188",
"content_id": "a7f2e3b4317571e5077b01cd9ba0df0c8afbde15",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 24702,
"license_type": "permissive",
"max_line_length": 197,
"num_lines": 431,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/DCN蒸馏_12953/dcn_model/main.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport gc\nimport tensorflow as tf\nimport process\nimport dcn_model\nimport sys\nimport random\nimport os\nfrom sklearn.preprocessing import StandardScaler\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\ntf.random.set_seed(42)\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='3'\n\nRANDOM_SEED = 42\n# types of columns of the data_set DataFrame\nCATEGORICAL_COLS = [\n 'weather_le', 'hightemp', 'lowtemp', 'dayofweek',\n 'slice_id', 'link_current_status_4'\n]\n\nNUMERIC_COLS = [\n 'distance', 'simple_eta', 'link_time_sum', 'link_count',\n 'cr_t_sum', 'link_current_status_4_percent', 'link_current_status_mean',\n 'pr_mean', 'dc_mean','lk_arrival_0_percent', 'lk_arrival_1_percent',\n 'lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent'\n\n]\n\nWIDE_COLS = [\n 'weather_le', 'hightemp', 'lowtemp', 'dayofweek'\n]\n\nIGNORE_COLS = [\n 'order_id', 'ata'\n]\n\nTRAINING = True\nVAL_TO_TEST = False\n\n\ndef set_seed(seed=42):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n\n\nif __name__ == '__main__':\n set_seed(RANDOM_SEED)\n print(dcn_model.get_available_gpus()) # 返回格式为:['/device:GPU:0', '/device:GPU:1']\n\n # LOAD DATA\n print('*-' * 40, 'LOAD DATA')\n making_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_order_xt/'\n link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_link_sqe_for_order/'\n cross_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_cross_sqe_for_order/'\n link_data_other_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/for_0714_link_sqe_for_order_other/'\n head_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_head_link_data_clear/'\n win_order_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/win_order_xw/'\n #pre_arrival_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/final_pre_arrival_data/'\n arrival_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_link_sqe_for_order_arrival/'\n zsl_arrival_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_arrival/'\n arrival_sqe_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_170_lk_arrival_sqe_for_order/'\n #h_s_for_link_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/max_hightmp_slice_for_link_eb/'\n pre_arrival_sqe_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/sqe_arrival_for_link/'\n zsl_link_data_dir = '/home/didi2021/didi2021/giscup_2021/final_train_data_0703/zsl_train_link/'\n data, mk_cols_list, link_cols_list, cross_cols_list = process.load_data(making_data_dir,\n link_data_dir,\n cross_data_dir,\n link_data_other_dir,\n head_data_dir,\n win_order_data_dir,\n pre_arrival_sqe_dir,\n zsl_link_data_dir,\n #pre_arrival_data_dir,\n #h_s_for_link_dir,\n arrival_data_dir,\n zsl_arrival_data_dir,\n arrival_sqe_data_dir)\n \n #fd = dcn_model.FeatureDictionary(data, numeric_cols=NUMERIC_COLS, ignore_cols=IGNORE_COLS,\n # cate_cols=CATEGORICAL_COLS)\n # PROCESSING DATA\n data['date_time'] = data['date_time'].astype(int)\n print(\"type(data['date_time']):\", data['date_time'].dtype)\n data = data[data['date_time'] != 20200901]\n print('Here train_test_split..................')\n # all_train_data, _ = train_test_split(all_train_data, test_size=0.9, random_state=42)\n data = data.reset_index()\n del data['index']\n print('*-' * 40, 'The data.shape:', data.shape)\n train_data, val_data = train_test_split(data, test_size=0.15, random_state=RANDOM_SEED)\n train_data = train_data.reset_index()\n val_data = val_data.reset_index()\n del train_data['index']\n del val_data['index']\n print('Save End.................')\n fb_list = CATEGORICAL_COLS+NUMERIC_COLS+IGNORE_COLS\n data_bak = data[fb_list]\n del data\n data = data_bak.copy()\n del data_bak\n gc.collect()\n\n print('*-' * 40, 'PROCESSING DATA FOR TRAIN')\n train_data = process.processing_data(train_data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS)\n #del data\n #fb_list = CATEGORICAL_COLS+NUMERIC_COLS+IGNORE_COLS\n #data = data[fb_list]\n #gc.collect()\n # print(train_data.columns.tolist())\n\n # PROCESSING INPUTS\n print('*-' * 40, 'PROCESSING INPUTS')\n # SAVE LIST\n a = np.array(mk_cols_list)\n np.save('../model_h5/mk_cols_list_0720_2.npy', a)\n a = np.array(link_cols_list)\n np.save('../model_h5/link_cols_list_0720_2.npy', a)\n a = np.array(cross_cols_list)\n np.save('../model_h5/cross_cols_list_0720_2.npy', cross_cols_list)\n a = np.array(CATEGORICAL_COLS)\n np.save('../model_h5/CATEGORICAL_COLS_0720_2.npy', a)\n del a\n pred_cols = ['ata']\n print('*-' * 40, 'PROCESSING INPUTS FOR TRAIN_DATA', train_data.shape)\n train_link_inputs, train_cross_inputs, train_deep_input, train_wide_input, \\\n train_inputs_slice, train_labels, train_arrival = process.processing_inputs(\n train_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)\n X_train = dcn_model.preprocess(train_data, CATEGORICAL_COLS, NUMERIC_COLS)\n train_pre = train_data[['order_id']]\n del train_data\n gc.collect()\n\n print('*-' * 40, 'PROCESSING DATA FOR TRAIN')\n val_data = process.processing_data(val_data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS, is_test=True)\n print('*-' * 40, 'PROCESSING INPUTS FOR VAL_DATA', val_data.shape)\n val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, \\\n val_inputs_slice, val_labels, val_arrival = process.processing_inputs(\n val_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS)\n X_val = dcn_model.preprocess(val_data, CATEGORICAL_COLS, NUMERIC_COLS)\n # val_data.to_csv('../model_h5/val_data.csv', index=0) # saving csv for test running\n val_pre = val_data[['order_id']]\n del val_data\n gc.collect()\n\n # MODEL_INIT\n print('*-' * 40, 'T_MODEL_INIT')\n deep_col_len, wide_col_len = train_deep_input.values.shape[1], train_wide_input.shape[1]\n link_size = 639877 + 2\n cross_size = 44313 + 2\n link_nf_size, cross_nf_size = train_link_inputs.shape[2], train_cross_inputs.shape[2]\n slice_size = 288\n # link_seqlen, cross_seqlen = 170, 12 # 已默认\n print(\"link_size:{},link_nf_size:{},cross_size:{},cross_nf_size:{},slice_size:{}\".format(link_size, link_nf_size,\n cross_size, cross_nf_size,\n slice_size))\n print(\"deep_col_len:{}, wide_col_len:{}\".format(deep_col_len, wide_col_len))\n\n fd = dcn_model.FeatureDictionary(data, numeric_cols=NUMERIC_COLS, ignore_cols=IGNORE_COLS,\n cate_cols=CATEGORICAL_COLS)\n inp_layer, inp_embed = dcn_model.embedding_layers(fd)\n autoencoder, encoder = dcn_model.create_autoencoder(train_deep_input.values.shape[-1], 1, noise=0.1)\n if TRAINING:\n autoencoder.fit(train_deep_input.values, (train_deep_input.values, train_labels.values),\n epochs=1000, # 1000\n batch_size=2048, # 1024\n validation_split=0.1,\n callbacks=[tf.keras.callbacks.EarlyStopping('val_ata_output_loss', patience=10, restore_best_weights=True)])\n encoder.save_weights('../model_h5/t_encoder.hdf5')\n else:\n encoder.load_weights('../model_h5/t_encoder.hdf5')\n encoder.trainable = False\n del autoencoder\n\n t_model = dcn_model.DCN_model(inp_layer, inp_embed, link_size, cross_size, slice_size, deep_col_len, wide_col_len,\n link_nf_size, cross_nf_size, encoder, conv=True, have_knowledge=False) \n #del encoder\n gc.collect()\n \n mc, es, lr = dcn_model.get_mc_es_lr('0720_2', patience=5, min_delta=1e-4)\n print('*-' * 40, 'MODEL_INIT END')\n \n print('*-' * 40, 'ARRIVAL_MODEL_FIT')\n t_history = t_model.fit(\n [\n X_train['weather_le'], X_train['hightemp'], X_train['lowtemp'], X_train['dayofweek'],\n X_train['slice_id'], X_train['link_current_status_4'],\n X_train['distance'], X_train['simple_eta'], X_train['link_time_sum'], X_train['link_count'],\n X_train['cr_t_sum'], X_train['link_current_status_4_percent'], X_train['link_current_status_mean'],\n X_train['pr_mean'], X_train['dc_mean'],\n X_train['lk_arrival_0_percent'], X_train['lk_arrival_1_percent'],X_train['lk_arrival_2_percent'], \n X_train['lk_arrival_3_percent'],X_train['lk_arrival_4_percent'],\n train_link_inputs, train_cross_inputs, train_deep_input.values, train_wide_input, train_inputs_slice],\n train_labels.values,\n validation_data=(\n [\n X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],\n X_val['slice_id'], X_val['link_current_status_4'],\n X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],\n X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],\n X_val['pr_mean'], X_val['dc_mean'],\n X_val['lk_arrival_0_percent'], X_val['lk_arrival_1_percent'],X_val['lk_arrival_2_percent'], \n X_val['lk_arrival_3_percent'],X_val['lk_arrival_4_percent'],\n val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice],\n (val_labels.values),),\n batch_size=2048, # 2048,1024\n epochs=100, # 100\n verbose=1,\n # )\n callbacks=[es]) # lr\n np.save('../model_h5/t_model_0720_2.npy', t_history.history)\n t_model.save_weights(\"../model_h5/t_model_0720_2.h5\")\n print('*-' * 40, 't_MODEL_PREDICT')\n y_knowledge_train = t_model.predict(\n [X_train['weather_le'], X_train['hightemp'], X_train['lowtemp'], X_train['dayofweek'],\n X_train['slice_id'], X_train['link_current_status_4'],\n X_train['distance'], X_train['simple_eta'], X_train['link_time_sum'], X_train['link_count'],\n X_train['cr_t_sum'], X_train['link_current_status_4_percent'], X_train['link_current_status_mean'],\n X_train['pr_mean'], X_train['dc_mean'],\n X_train['lk_arrival_0_percent'], X_train['lk_arrival_1_percent'],X_train['lk_arrival_2_percent'], \n X_train['lk_arrival_3_percent'],X_train['lk_arrival_4_percent'],\n train_link_inputs, train_cross_inputs, train_deep_input.values, train_wide_input, train_inputs_slice],\n batch_size=2048)\n y_knowledge_val = t_model.predict(\n [\n X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],\n X_val['slice_id'], X_val['link_current_status_4'],\n X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],\n X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],\n X_val['pr_mean'], X_val['dc_mean'],\n X_val['lk_arrival_0_percent'], X_val['lk_arrival_1_percent'],X_val['lk_arrival_2_percent'],\n X_val['lk_arrival_3_percent'],X_val['lk_arrival_4_percent'],\n val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice],\n batch_size=2048)\n print('*-'*40, 'TRAINFORME')\n train_labels = pd.DataFrame(train_labels)\n train_labels['y_knowledge_train'] = np.squeeze(y_knowledge_train)\n print(np.squeeze(y_knowledge_train)[:2])\n print(train_labels['y_knowledge_train'].head(2))\n val_labels = pd.DataFrame(val_labels) \n val_labels['y_knowledge_val'] = np.squeeze(y_knowledge_val)\n print('*-' * 40, 't_MODEL_END')\n zsl_arrival_cols = ['zsl_link_arrival_status_mean','zsl_link_arrival_status_nunique','zsl_link_arrival_status0','zsl_link_arrival_status1','zsl_link_arrival_status2','zsl_link_arrival_status3']\n train_deep_input = train_deep_input.drop(['lk_arrival_0_percent','lk_arrival_1_percent','lk_arrival_2_percent','lk_arrival_3_percent','lk_arrival_4_percent'],axis=1)\n train_deep_input = train_deep_input.drop(zsl_arrival_cols, axis=1)\n\n val_deep_input = val_deep_input.drop(['lk_arrival_0_percent','lk_arrival_1_percent','lk_arrival_2_percent','lk_arrival_3_percent','lk_arrival_4_percent'],axis=1)\n val_deep_input = val_deep_input.drop(zsl_arrival_cols, axis=1)\n\n if 'ata' in train_deep_input.columns.tolist():\n print('The ata in the train_deep_input')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n if 'lk_arrival_0_percent' in train_deep_input.columns.tolist():\n print('The lk_arrival_0_percent in the train_deep_input')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n if 'lk_arrival_0_percent' in val_deep_input.columns.tolist():\n print('The lk_arrival_0_percent in the val_deep_input')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n if 'zsl_link_arrival_status_mean' in train_deep_input.columns.tolist():\n print('The zsl_link_arrival_status_mean in the train_deep_input')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n\n mk_cols_list = train_deep_input.columns.tolist()\n print('*-' * 40, 'MODEL_FIT')\n deep_col_len, wide_col_len = train_deep_input.values.shape[1], train_wide_input.shape[1]\n print(\"deep_col_len:{}, wide_col_len:{}\".format(deep_col_len, wide_col_len))\n NUMERIC_COLS = list(set(NUMERIC_COLS)-set(['lk_arrival_0_percent','lk_arrival_1_percent','lk_arrival_2_percent',\n 'lk_arrival_3_percent','lk_arrival_4_percent']))\n fb_list = CATEGORICAL_COLS+NUMERIC_COLS+IGNORE_COLS\n if 'lk_arrival_0_percent' in fb_list:\n print('The lk_arrival_0_percent in the fb_list')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n data = data[fb_list]\n fd = dcn_model.FeatureDictionary(data, numeric_cols=NUMERIC_COLS, ignore_cols=IGNORE_COLS,\n cate_cols=CATEGORICAL_COLS)\n inp_layer, inp_embed = dcn_model.embedding_layers(fd)\n autoencoder, encoder = dcn_model.create_autoencoder(train_deep_input.values.shape[-1], 1, noise=0.1)\n if TRAINING:\n autoencoder.fit(train_deep_input.values, (train_deep_input.values, train_labels['ata'].values),\n epochs=1000, # 1000\n batch_size=2048, # 1024\n validation_split=0.1,\n callbacks=[tf.keras.callbacks.EarlyStopping('val_ata_output_loss', patience=10, restore_best_weights=True)])\n encoder.save_weights('../model_h5/main_encoder.hdf5')\n else:\n encoder.load_weights('../model_h5/main_encoder.hdf5')\n encoder.trainable = False\n del autoencoder\n \n #print(type(train_labels['y_knowledge_train']))\n #print(type(train_labels))\n #y_train = np.vstack((train_labels, train_pre['y_knowledge_train'])).T\n #y_valid = np.vstack((val_labels, val_pre['y_knowledge_val'])).T\n #print(train_labels.shape)\n print(train_labels.head(1))\n print(train_labels.values[0])\n\n print('*-'*40, 'The shape of train_link_inputs before', train_link_inputs.shape)\n train_link_inputs = np.concatenate((train_link_inputs[:, :, :5], train_link_inputs[:, :, 6:]), axis=2)\n \n print('*-'*40, 'The shape of train_link_inputs after', train_link_inputs.shape)\n val_link_inputs = np.concatenate((val_link_inputs[:, :, :5], val_link_inputs[:, :, 6:]), axis=2)\n link_nf_size, cross_nf_size = train_link_inputs.shape[2], train_cross_inputs.shape[2]\n mc, es, lr = dcn_model.get_mc_es_lr_for_student('0720_2', patience=5, min_delta=1e-4)\n model = dcn_model.DCN_model(inp_layer, inp_embed, link_size, cross_size, slice_size, deep_col_len, wide_col_len,\n link_nf_size, cross_nf_size, encoder, conv=True)\n history = model.fit(\n [\n X_train['weather_le'], X_train['hightemp'], X_train['lowtemp'], X_train['dayofweek'],\n X_train['slice_id'], X_train['link_current_status_4'],\n X_train['distance'], X_train['simple_eta'], X_train['link_time_sum'], X_train['link_count'],\n X_train['cr_t_sum'], X_train['link_current_status_4_percent'], X_train['link_current_status_mean'],\n X_train['pr_mean'], X_train['dc_mean'],\n train_link_inputs, train_cross_inputs, train_deep_input.values, train_wide_input, train_inputs_slice],\n train_labels.values,\n validation_data=(\n [\n X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],\n X_val['slice_id'], X_val['link_current_status_4'],\n X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],\n X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],\n X_val['pr_mean'], X_val['dc_mean'],\n val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice], \n (val_labels.values),),\n batch_size=2048, # 2048,1024\n epochs=100, # 100\n verbose=1,\n # )\n callbacks=[es]) # lr\n np.save('../model_h5/history_0720_2.npy', history.history)\n model.save_weights(\"../model_h5/dcn_model_0720_2.h5\")\n # MODEL_RPEDICT\n if VAL_TO_TEST:\n print('*-'*40,'val_to_test')\n val_pre = val_pre.rename(columns={'order_id': 'id'})\n print(val_link_inputs.shape, val_cross_inputs.shape, X_val.shape)\n print('*-' * 40, 'MODEL_RPEDICT')\n val_pred = model.predict(\n [\n X_val['weather_le'], X_val['hightemp'], X_val['lowtemp'], X_val['dayofweek'],\n X_val['slice_id'], X_val['link_current_status_4'],\n X_val['distance'], X_val['simple_eta'], X_val['link_time_sum'], X_val['link_count'],\n X_val['cr_t_sum'], X_val['link_current_status_4_percent'], X_val['link_current_status_mean'],\n X_val['pr_mean'], X_val['dc_mean'],\n val_link_inputs, val_cross_inputs, val_deep_input.values, val_wide_input, val_inputs_slice],\n batch_size=2048)\n val_pre['val_predict'] = np.squeeze(val_pred[:, 1])\n val_pre['other_predict'] = np.squeeze(val_pred[:, 0])\n # val_pre['val_predict'] = val_pre['val_predict'].round(0)\n val_pre = val_pre.rename(columns={'val_predict': 'result'}) # 更改列名\n val_pre = val_pre[['id', 'result', 'other_predict']]\n val_pre['ata'] = val_labels['ata'].values\n print(val_pre.head())\n result_save_path = '../result_csv/val_0720_2.csv'\n print('*-' * 40, 'CSV_SAVE_PATH:', result_save_path)\n print('..........Finish')\n\n del X_train, train_link_inputs, train_cross_inputs, train_deep_input, \\\n train_wide_input, train_inputs_slice, train_labels\n del X_val, val_link_inputs, val_cross_inputs, val_deep_input, val_wide_input, val_inputs_slice, val_labels\n gc.collect()\n #print('*-' * 40, 'EXIT')\n #sys.exit(0)\n print('*-' * 40, 'LOAD TEST DATA')\n making_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/order_xt/'\n link_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/max_170_link_sqe_for_order/'\n cross_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/cross_sqe_for_order/'\n link_test_data_other_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/link_sqe_for_order_other/'\n head_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/head_link_data_clear/'\n win_order_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/win_order_xw/'\n pre_arrival_sqe_test_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/sqe_arrival_for_link/'\n #h_s_for_test_link_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/max_hightmp_slice_for_link_eb/'\n #pre_arrival_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/final_pre_arrival_data/'\n zsl_link_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/zsl_test_link/'\n #zsl_cross_test_data_dir = '/home/didi2021/didi2021/giscup_2021/final_test_data_0703/zsl_test_cross_0703/'\n test_data, _, _, _ = process.load_data(making_test_data_dir,\n link_test_data_dir,\n cross_test_data_dir,\n link_test_data_other_dir,\n head_test_data_dir,\n win_order_test_data_dir,\n pre_arrival_sqe_test_dir,\n zsl_link_test_data_dir) #,\n #h_s_for_test_link_dir)\n #pre_arrival_test_data_dir)\n print('*-' * 40, 'PROCESSING DATA')\n link_cols_list.remove('link_arrival_status')\n test_data = process.processing_data(test_data, link_cols_list, cross_cols_list, mk_cols_list, WIDE_COLS, is_test=True)\n gc.collect()\n print('*-' * 40, 'PROCESSING INPUTS FOR TEST_DATA', test_data.shape)\n test_link_inputs, test_cross_inputs, test_deep_input, test_wide_input, \\\n test_inputs_slice, _ = process.processing_inputs(\n test_data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS, arrival=False)\n X_test = dcn_model.preprocess(test_data, CATEGORICAL_COLS, NUMERIC_COLS)\n test_pre = test_data[['order_id']]\n test_arrival_pre = test_data[['order_id']]\n gc.collect()\n\n test_pre = test_pre.rename(columns={'order_id': 'id'})\n print(test_link_inputs.shape, test_cross_inputs.shape, X_test.shape, test_deep_input.shape)\n print('*-' * 40, 'MODEL_RPEDICT')\n test_pred = model.predict(\n [\n X_test['weather_le'], X_test['hightemp'], X_test['lowtemp'], X_test['dayofweek'],\n X_test['slice_id'], X_test['link_current_status_4'],\n X_test['distance'], X_test['simple_eta'], X_test['link_time_sum'], X_test['link_count'],\n X_test['cr_t_sum'], X_test['link_current_status_4_percent'], X_test['link_current_status_mean'],\n X_test['pr_mean'], X_test['dc_mean'],\n test_link_inputs, test_cross_inputs, test_deep_input.values, test_wide_input, test_inputs_slice],\n batch_size=2048)\n test_pre['test_predict'] = np.squeeze(test_pred[:, 1])\n test_pre['other_predict'] = np.squeeze(test_pred[:, 0])\n # test_pre['test_predict'] = test_pre['test_predict'].round(0)\n test_pre = test_pre.rename(columns={'test_predict': 'result'}) # 更改列名\n test_pre = test_pre[['id', 'result','other_predict']]\n print(test_pre.head())\n result_save_path = '../result_csv/submit_0720_2.csv'\n print('*-' * 40, 'CSV_SAVE_PATH:', result_save_path)\n test_pre.to_csv(result_save_path, index=0) # 保存\n\n print('..........Finish')\n"
},
{
"alpha_fraction": 0.5766812562942505,
"alphanum_fraction": 0.5904294848442078,
"avg_line_length": 43.04694747924805,
"blob_id": "1ee1279c475d43d6651785e9025bdbced32bdbd7",
"content_id": "d11d0c398a1be71791edf1f0a6a50ea086788cbf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9405,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 213,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/utils.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\"\"\"\nAuthor: Aigege\nCode: https://github.com/AiIsBetter\n\"\"\"\n# date 2021.08.01\nimport multiprocessing as mp\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom scipy.stats import kurtosis, iqr, skew\nimport gc\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import Ridge\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\ndef chunk_groups(groupby_object, chunk_size):\n n_groups = groupby_object.ngroups\n group_chunk, index_chunk = [], []\n for i, (index, df) in enumerate(groupby_object):\n group_chunk.append(df)\n index_chunk.append(index)\n\n if (i + 1) % chunk_size == 0 or i + 1 == n_groups:\n group_chunk_, index_chunk_ = group_chunk.copy(), index_chunk.copy()\n group_chunk, index_chunk = [], []\n yield index_chunk_, group_chunk_\n\ndef parallel_apply(groups, func, index_name='Index', num_workers=1, chunk_size=100000):\n n_chunks = np.ceil(1.0 * groups.ngroups / chunk_size)\n indeces, features = [], []\n for index_chunk, groups_chunk in tqdm(chunk_groups(groups, chunk_size), total=n_chunks):\n with mp.pool.Pool(num_workers) as executor:\n features_chunk = executor.map(func, groups_chunk)\n for i in features_chunk:\n features.append(i)\n return features\n\ndef parallel_apply_fea(groups, func, index_name='Index', num_workers=1, chunk_size=100000):\n n_chunks = np.ceil(1.0 * groups.ngroups / chunk_size)\n indeces, features = [], []\n for index_chunk, groups_chunk in chunk_groups(groups, chunk_size):\n with mp.pool.Pool(num_workers) as executor:\n features_chunk = executor.map(func, groups_chunk)\n features.extend(features_chunk)\n indeces.extend(index_chunk)\n\n features = pd.DataFrame(features)\n features.index = indeces\n features.index.name = index_name\n return features\n\ndef add_features_in_group(features, gr_, feature_name, aggs, prefix):\n for agg in aggs:\n if agg == 'sum':\n features['{}{}_sum'.format(prefix, feature_name)] = gr_[feature_name].sum()\n elif agg == 'mean':\n features['{}{}_mean'.format(prefix, feature_name)] = gr_[feature_name].mean()\n elif agg == 'max':\n features['{}{}_max'.format(prefix, feature_name)] = gr_[feature_name].max()\n elif agg == 'min':\n features['{}{}_min'.format(prefix, feature_name)] = gr_[feature_name].min()\n elif agg == 'std':\n features['{}{}_std'.format(prefix, feature_name)] = gr_[feature_name].std()\n elif agg == 'count':\n features['{}{}_count'.format(prefix, feature_name)] = gr_[feature_name].count()\n elif agg == 'skew':\n features['{}{}_skew'.format(prefix, feature_name)] = skew(gr_[feature_name])\n elif agg == 'kurt':\n features['{}{}_kurt'.format(prefix, feature_name)] = kurtosis(gr_[feature_name])\n elif agg == 'iqr':\n features['{}{}_iqr'.format(prefix, feature_name)] = iqr(gr_[feature_name])\n elif agg == 'median':\n features['{}{}_median'.format(prefix, feature_name)] = gr_[feature_name].median()\n elif agg == 'nunique':\n features['{}{}_nunique'.format(prefix, feature_name)] = gr_[feature_name].nunique()\n return features\n\ndef reduce_mem_usage(df):\n # print('reduce_mem_usage_parallel start!')\n # chunk_size = df.columns.shape[0]\n # start_mem = df.memory_usage().sum() / 1024 ** 2\n # print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))\n for col in df.columns:\n col_type = df[col].dtype\n if col_type != object:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n else:\n df[col] = df[col].astype('category')\n # end_mem = df.memory_usage().sum() / 1024 ** 2\n # print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))\n # print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\n return df\n\ndef reduce_mem_usage_parallel(df_original,num_worker):\n print('reduce_mem_usage_parallel start!')\n # chunk_size = df_original.columns.shape[0]\n start_mem = df_original.memory_usage().sum() / 1024 ** 2\n print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))\n if df_original.columns.shape[0]>500:\n group_chunk = []\n for name in df_original.columns:\n group_chunk.append(df_original[[name]])\n with mp.Pool(num_worker) as executor:\n df_temp = executor.map(reduce_mem_usage,group_chunk)\n del group_chunk\n gc.collect()\n df_original = pd.concat(df_temp,axis = 1)\n end_mem = df_original.memory_usage().sum() / 1024 ** 2\n print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))\n print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\n del df_temp\n gc.collect()\n else:\n df_original = reduce_mem_usage(df_original)\n end_mem = df_original.memory_usage().sum() / 1024 ** 2\n print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))\n print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\n\n return df_original\n\n# 评估指标\ndef MAPE(true, pred):\n diff = np.abs(np.array(pred) - np.array(true))\n return np.mean(diff / true)\n# 自定义lgb评估指标\ndef lgb_score_mape(train_data,preds):\n labels = train_data\n diff = np.abs(np.array(preds) - np.array(labels))\n result = np.mean(diff / labels)\n return 'mape',result, False\n\ndef ridge_feature_select(X_train, y_train, num_folds):\n print(\"Starting feature select. Train shape: {}\".format(X_train.shape))\n skf = KFold(n_splits=num_folds, shuffle=True, random_state=2021)\n feature_importance_df = pd.DataFrame()\n oof_preds = np.zeros(X_train.shape[0])\n\n k_fold_mape = []\n for i, (trn_idx, val_idx) in enumerate(skf.split(X_train, y_train)):\n clf = Ridge(alpha=1)\n clf.fit(X_train.iloc[trn_idx].fillna(0), y_train.iloc[trn_idx])\n oof_preds[val_idx] = clf.predict(X_train.iloc[val_idx].fillna(0))\n k_fold_mape.append(MAPE(y_train.iloc[val_idx], oof_preds[val_idx]))\n # print(\"kfold_{}_mape_score:{} \".format(i, k_fold_mape[i]))\n full_mape = MAPE(y_train, oof_preds)\n print(\"full_mape_score:{} \".format(full_mape))\n return k_fold_mape,full_mape\n\ndef feature_select(X_train,y_train):\n feature_importance_df_ = pd.read_csv('feature_importances.csv')\n cols = feature_importance_df_[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False).index\n best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]\n best_features = best_features.groupby('feature',as_index = False)['importance'].mean()\n best_features = best_features.sort_values(by = 'importance',ascending=False)\n data=best_features.sort_values(by=\"importance\", ascending=False)\n feature_select = list(data['feature'].values)\n feature_array = []\n full_mape_all = 0\n count = 0\n for fea in feature_select:\n print(count)\n count = count + 1\n feature_array.append(fea)\n df_select = X_train[feature_array]\n k_fold_mape, full_mape = ridge_feature_select(df_select,y_train, num_folds=5)\n if count == 1:\n full_mape_all = full_mape\n file = open('feature_select_name.txt', 'a')\n file.write(fea + '\\n')\n file.close()\n file = open('feature_select_fullauc.txt', 'a')\n file.write(str(full_mape_all) + '\\n')\n file.close()\n file = open('feature_select_kfoldauc.txt', 'a')\n file.write(str(k_fold_mape) + '\\n')\n file.close()\n del df_select\n gc.collect()\n continue\n if full_mape_all <= full_mape:\n feature_array.remove(fea)\n else:\n full_mape_all = full_mape\n file = open('feature_select_name.txt', 'a')\n file.write(fea + '\\n')\n file.close()\n file = open('feature_select_fullauc.txt', 'a')\n file.write(str(full_mape_all) + '\\n')\n file.close()\n file = open('feature_select_kfoldauc.txt', 'a')\n file.write(str(k_fold_mape) + '\\n')\n file.close()\n del df_select\n gc.collect()\n a = 1\n\n"
},
{
"alpha_fraction": 0.5475422143936157,
"alphanum_fraction": 0.5628771781921387,
"avg_line_length": 59.14840316772461,
"blob_id": "935590ee9e0d80926bba5824a9212a7d82d6de90",
"content_id": "1d540dd465bd50d46a8c7ebbb01f10b3420204a8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26361,
"license_type": "permissive",
"max_line_length": 157,
"num_lines": 438,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/3_link_fea_order_id_level.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\"\"\"\nAuthor: Aigege\nCode: https://github.com/AiIsBetter\n\"\"\"\n# date 2021.08.01\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport networkx as nx\nimport os\nimport gc\nimport warnings\nfrom utils import parallel_apply_fea,add_features_in_group\nfrom functools import partial\nwarnings.filterwarnings(\"ignore\")\n\ndef last_k_link_time_interval(gr, periods):\n gr_ = gr.copy()\n gr_ = gr_.iloc[::-1]\n gr_['t_i_v'] = gr_['link_time'].diff()\n gr_['t_i_v'] = gr_['t_i_v']\n gr_['t_i_v'] = gr_['t_i_v'].fillna(0)\n\n gr_['c_s_v'] = gr_['link_current_status'].diff()\n gr_['c_s_v'] = gr_['c_s_v']\n gr_['c_s_v'] = gr_['c_s_v'].fillna(0)\n\n gr_ = gr_.drop_duplicates().reset_index(drop = True)\n\n # link time变化\n features = {}\n for period in periods:\n if period > 10e5:\n period_name = 'zsl_link_time_interval_all'\n gr_period = gr_.copy()\n else:\n period_name = 'zsl_link_time_interval_last_{}_'.format(period)\n gr_period = gr_.iloc[:period]\n features = add_features_in_group(features, gr_period, 't_i_v',\n ['mean','max', 'min', 'std','skew','sum'],\n # ['diff'],\n period_name)\n # current status变化\n for period in periods:\n if period > 10e5:\n period_name = 'zsl_link_current_status_interval_all'\n gr_period = gr_.copy()\n else:\n period_name = 'zsl_link_current_status_interval_last_{}_'.format(period)\n gr_period = gr_.iloc[:period]\n features = add_features_in_group(features, gr_period, 'c_s_v',\n ['mean', 'std', 'skew'],\n # ['diff'],\n period_name)\n return features\n\n# last k link id time trend\ndef last_link_time_features(gr,periods):\n gr_ = gr.copy()\n gr_ = gr_.iloc[::-1]\n features = {}\n for period in periods:\n if period > 10e5:\n period_name = 'zsl_all_'\n gr_period = gr_.copy()\n else:\n period_name = 'zsl_last_{}_'.format(period)\n gr_period = gr_.iloc[:period]\n features = add_features_in_group(features, gr_period, 'link_time',\n ['max', 'sum', 'mean','min','skew','std'],\n period_name)\n features = add_features_in_group(features, gr_period, 'link_current_status',\n ['mean', 'nunique'],\n period_name)\n return features\n# last k link id time trend\ndef trend_in_last_k_link_id_time(gr, periods):\n gr_ = gr.copy()\n gr_ = gr_.iloc[::-1]\n features = {}\n for period in periods:\n gr_period = gr_.iloc[:period]\n features = add_trend_feature(features, gr_period,\n 'link_time', 'zsl_{}_period_trend_'.format(period)\n )\n\n return features\n# trend feature\ndef add_trend_feature(features, gr, feature_name, prefix):\n y = gr[feature_name].values\n try:\n x = np.arange(0, len(y)).reshape(-1, 1)\n lr = LinearRegression()\n lr.fit(x, y)\n trend = lr.coef_[0]\n except:\n trend = np.nan\n features['{}{}'.format(prefix, feature_name)] = trend\n return features\n\ndef slice_id_change(x):\n hour = x * 5 / 60\n hour = np.floor(hour)\n hour += 8\n if hour >= 24:\n hour = hour - 24\n return hour\nif __name__ == '__main__':\n nrows = None\n root_path = '../data/giscup_2021/'\n read_idkey = np.load(root_path + 'id_key_to_connected_allday.npy', allow_pickle=True).item()\n read_grapheb = np.load(root_path + 'graph_embeddings_retp1_directed.npy', allow_pickle=True).item()\n read_grapheb_retp = np.load(root_path + 'graph_embeddings_retp05_directed.npy', allow_pickle=True).item()\n for i in read_grapheb:\n read_grapheb[i] = list(read_grapheb[i]) + list(read_grapheb_retp[i])\n del read_grapheb_retp\n head_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']\n embedding_k = 256\n fill_list = [0] * embedding_k\n #######################################nextlinks #######################################\n nextlinks = pd.read_csv(root_path+'nextlinks.txt', sep=' ', header=None)\n nextlinks.columns=['from_id', 'to_id']\n nextlinks['to_id'] = nextlinks['to_id'].astype('str')\n nextlinks['to_id'] = nextlinks['to_id'].apply(lambda x: x.split(\",\"))\n nextlinks = pd.DataFrame({'from_id':nextlinks.from_id.repeat(nextlinks.to_id.str.len()),\n 'to_id':np.concatenate(nextlinks.to_id.values)})\n from_id_weight = nextlinks['from_id'].value_counts()\n from_id_weight = from_id_weight.to_frame()\n from_id_weight['index'] = from_id_weight.index\n\n from_id_weight.columns=['weight', 'from_id']\n nextlinks = pd.merge(nextlinks,from_id_weight, 'left', on=['from_id'])\n nextlinks = nextlinks.sort_values(by='weight',ascending=False)\n G = nx.DiGraph()\n from_id = nextlinks['from_id'].astype(str).to_list()\n to_id = nextlinks['to_id'].to_list()\n weight = nextlinks['weight'].to_list()\n edge_tuple = list(zip(from_id, to_id,weight))\n print('adding')\n G.add_weighted_edges_from(edge_tuple)\n\n dc = nx.algorithms.centrality.degree_centrality(G)\n dc = sorted(dc.items(), key=lambda d: d[1],reverse=True)\n dc = dc[:50000]\n dc = [str(i[0]) for i in dc ]\n #######################################link #######################################\n for name in os.listdir(root_path+'train/'):\n data_time = name.split('.')[0]\n if data_time=='20200803':\n continue\n train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)\n print(\"开始处理\", data_time)\n train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\n train_head['order_id'] = train_head['order_id'].astype(str)\n train_head['ata'] = train_head['ata'].astype(float)\n train_head['distance'] = train_head['distance'].astype(float)\n train_head['simple_eta'] = train_head['simple_eta'].astype(float)\n train_head['driver_id'] = train_head['driver_id'].astype(int)\n train_head['slice_id'] = train_head['slice_id'].astype(int)\n #link preprocess\n data_link = train[[1]]\n data_link['index'] = train_head.index\n data_link['order_id'] = train_head['order_id']\n data_link['ata'] = train_head['ata']\n data_link['distance'] = train_head['distance']\n data_link['simple_eta'] = train_head['simple_eta']\n data_link['slice_id'] = train_head['slice_id']\n\n # data_link['slice_id'] = data_link['slice_id'].apply(slice_id_change)\n gc.collect()\n data_link_split = data_link[1].str.split(' ', expand=True).stack().to_frame()\n data_link_split = data_link_split.reset_index(level=1, drop=True).rename(columns={0: 'link_info'})\n # data_link_split = data_link_split.reset_index(drop=True)\n data_link_split = data_link[['order_id', 'index', 'ata', 'distance', 'simple_eta', 'slice_id']].join(\n data_link_split)\n data_link_split = data_link_split.reset_index(drop=True)\n data_link_split[['link_id',\n 'link_time',\n 'link_ratio',\n 'link_current_status',\n 'link_arrival_status']] = data_link_split['link_info'].str.split(':|,', 5, expand=True)\n data_link_split = data_link_split.drop(['link_info'], axis=1)\n data_link_split['link_ratio'] = data_link_split['link_ratio'].astype(float)\n data_link_split['link_time'] = data_link_split['link_time'].astype(float)\n data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)\n print('preprocess finish!')\n print('start feature engineering')\n feature = train_head[['order_id', 'distance']]\n ###################static fea#############################################\n #######################order link id count###############################\n df = data_link_split.groupby('order_id', as_index=False)\n tmp_linkid_agg = df['link_id'].agg({'zsl_order_link_id_count': 'count'})\n tmp_linkid_agg['zsl_order_link_id_count_bins'] = 0\n tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count']>=75)&(tmp_linkid_agg['zsl_order_link_id_count']<100),'zsl_order_link_id_count_bins']=1\n tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count']>=100)&(tmp_linkid_agg['zsl_order_link_id_count']<120),'zsl_order_link_id_count_bins']=2\n tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count']>=120),'zsl_order_link_id_count_bins']=3\n feature = feature.merge(tmp_linkid_agg,on='order_id',how='left')\n print('order link id count finish!')\n #######################order link id & distance###############################\n feature['zsl_order_is_highspeed'] = 0\n feature.loc[(feature['distance']>90000)&(feature['zsl_order_link_id_count']<300),'zsl_order_is_highspeed'] = 1\n print('order link id & distance finish!')\n #######################order link id & nextlinks centry###############################\n tmp = data_link_split[data_link_split['link_id'].isin(dc)]\n tmp = tmp.groupby('order_id', as_index=False)\n tmp_linkid_centry_count = tmp['link_id'].agg({'zsl_order_link_id_centry_count': 'count'})\n feature = feature.merge(tmp_linkid_centry_count,on='order_id',how='left')\n feature['zsl_order_link_id_centry_count'] = feature['zsl_order_link_id_centry_count'].fillna(0)\n print('order link id & nextlinks centry finish!')\n #######################order link time sum mean max min var std###############################\n tmp_linktime_agg = df['link_time'].agg({'zsl_order_link_time_sum': 'sum','zsl_order_link_time_mean': 'mean',\n 'zsl_order_link_time_max': 'max','zsl_order_link_time_min': 'min',\n 'zsl_order_link_time_var': 'var','zsl_order_link_time_skew': 'skew'})\n feature = feature.merge(tmp_linktime_agg,on='order_id',how='left')\n print('order link time sum mean max min var std finish!')\n #######################order link current status mean nunique###############################\n tmp_linktime_agg = df['link_current_status'].agg({'zsl_link_current_status_mean': 'mean', 'zsl_link_current_status_nunique': 'nunique'})\n feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')\n print('order link current status mean nunique finish!')\n #######################order link current status count vector###############################\n data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(str)\n data_link_split.loc[data_link_split['link_current_status'].astype(int)<0,'link_current_status'] = '0'\n data_link_split.loc[data_link_split['link_current_status'].astype(int)>3,'link_current_status'] = '3'\n data = data_link_split.groupby('order_id')['link_current_status'].apply(lambda x: x.str.cat(sep=',')).reset_index()\n cv_encode = CountVectorizer(token_pattern=u'(?u)\\\\b\\\\w+\\\\b')\n train_x = cv_encode.fit_transform(data['link_current_status'])\n train_x = train_x.toarray()\n link_current_status = pd.DataFrame(train_x, columns=['zsl_link_current_status0', 'zsl_link_current_status1', 'zsl_link_current_status2',\n 'zsl_link_current_status3'])\n data = pd.concat([data[['order_id']],link_current_status],axis=1)\n feature = feature.merge(data, on='order_id', how='left')\n print('order link current status count vector finish!')\n #######################order distance/link_id_count###############################\n feature['zsl_distance_div_link_id_count'] = feature['distance']*10/feature['zsl_order_link_id_count']\n feature = feature.drop('distance', axis=1)\n print('order distance div link_id_count finish!')\n #######################order link ratio sum mean max min var std###############################\n tmp_linkratio_agg = df['link_ratio'].agg({'zsl_order_link_ratio_sum': 'sum', 'zsl_order_link_ratio_mean': 'mean',\n 'zsl_order_link_ratio_min': 'min',\n 'zsl_order_link_ratio_var': 'var', 'zsl_order_link_ratio_skew': 'skew'})\n feature = feature.merge(tmp_linkratio_agg, on='order_id', how='left')\n print('order link ratio sum mean max min var std finish!')\n #######################weather###################################################################\n weather = pd.read_csv(root_path+'weather.csv')\n weather_dict={'rainstorm':0,'heavy rain':1,'moderate rain':2,'cloudy':3,\n 'showers':4}\n weather['weather'] = weather['weather'].map(weather_dict)\n weather['date'] = weather['date'].astype(str)\n weather=weather[weather['date']==data_time]\n feature['weather'] = weather['weather'].values[0]\n feature['hightemp'] = weather['hightemp'].values[0]\n feature['lowtemp'] = weather['lowtemp'].values[0]\n print('weather finish!')\n ###################trend fea#############################################\n ###################trend link time#####################################\n data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)\n groupby = data_link_split.groupby(['order_id'])\n func = partial(trend_in_last_k_link_id_time, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_link_time_features, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_k_link_time_interval, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n print('trend link time finish!')\n ####################nextlinks graph embedding#######################\n data_link_split['link_id'] = data_link_split['link_id'].astype(int)\n data_link_split['link_id'] = data_link_split['link_id'].map(read_idkey)\n data_link_split['link_id'] = data_link_split['link_id'].fillna(0)\n data_link_split['link_id'] = data_link_split['link_id'].astype(int)\n data_link_split['link_id'] = data_link_split['link_id'].map(read_grapheb)\n data_link_split['link_id'] = data_link_split['link_id'].fillna('0')\n def replace_list(x):\n if isinstance(x, str):\n x = fill_list\n return x\n data_link_split['link_id'] = data_link_split['link_id'].apply(replace_list)\n link_id_col = ['zsl_link_id_eb{}'.format(i) for i in range(embedding_k)]\n agg_col = dict(zip(link_id_col, ['mean'] * len(link_id_col)))\n link_id_array = np.array(data_link_split.pop('link_id').to_list())\n link_id_array = pd.DataFrame(link_id_array, columns=agg_col, dtype=np.float16)\n data_link_split = pd.concat([data_link_split, link_id_array], axis=1)\n tmp = data_link_split.groupby('order_id', as_index=False)\n tmp_linkid_agg = tmp.agg(agg_col)\n feature = feature.merge(tmp_linkid_agg, on='order_id', how='left')\n\n feature.to_csv(root_path + 'feature/train/link_fea_order_id_level_{}.csv'.format(data_time), index=False)\n del train\n gc.collect()\n\n test = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)\n test_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\n test_head['order_id'] = test_head['order_id'].astype(str)\n test_head['ata'] = test_head['ata'].astype(float)\n test_head['distance'] = test_head['distance'].astype(float)\n test_head['simple_eta'] = test_head['simple_eta'].astype(float)\n test_head['driver_id'] = test_head['driver_id'].astype(int)\n test_head['slice_id'] = test_head['slice_id'].astype(int)\n\n # link preprocess\n data_link = test[[1]]\n data_link['index'] = test_head.index\n data_link['order_id'] = test_head['order_id']\n data_link['ata'] = test_head['ata']\n data_link['distance'] = test_head['distance']\n data_link['simple_eta'] = test_head['simple_eta']\n data_link['slice_id'] = test_head['slice_id']\n\n # data_link['slice_id'] = data_link['slice_id'].apply(slice_id_change)\n gc.collect()\n data_link_split = data_link[1].str.split(' ', expand=True).stack().to_frame()\n data_link_split = data_link_split.reset_index(level=1, drop=True).rename(columns={0: 'link_info'})\n # data_link_split = data_link_split.reset_index(drop=True)\n data_link_split = data_link[['order_id', 'index', 'ata', 'distance', 'simple_eta', 'slice_id']].join(\n data_link_split)\n data_link_split = data_link_split.reset_index(drop=True)\n data_link_split[['link_id',\n 'link_time',\n 'link_ratio',\n 'link_current_status',\n 'link_arrival_status']] = data_link_split['link_info'].str.split(':|,', 5, expand=True)\n data_link_split = data_link_split.drop(['link_info'], axis=1)\n data_link_split['link_ratio'] = data_link_split['link_ratio'].astype(float)\n data_link_split['link_time'] = data_link_split['link_time'].astype(float)\n data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)\n print('preprocess finish!')\n print('start feature engineering')\n feature = test_head[['order_id', 'distance']]\n ###################static fea#############################################\n #######################order link id count###############################\n df = data_link_split.groupby('order_id', as_index=False)\n tmp_linkid_agg = df['link_id'].agg({'zsl_order_link_id_count': 'count'})\n tmp_linkid_agg['zsl_order_link_id_count_bins'] = 0\n tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count'] >= 75) & (\n tmp_linkid_agg['zsl_order_link_id_count'] < 100), 'zsl_order_link_id_count_bins'] = 1\n tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count'] >= 100) & (\n tmp_linkid_agg['zsl_order_link_id_count'] < 120), 'zsl_order_link_id_count_bins'] = 2\n tmp_linkid_agg.loc[(tmp_linkid_agg['zsl_order_link_id_count'] >= 120), 'zsl_order_link_id_count_bins'] = 3\n feature = feature.merge(tmp_linkid_agg, on='order_id', how='left')\n print('order link id count finish!')\n #######################order link id & distance###############################\n feature['zsl_order_is_highspeed'] = 0\n feature.loc[\n (feature['distance'] > 90000) & (feature['zsl_order_link_id_count'] < 300), 'zsl_order_is_highspeed'] = 1\n print('order link id & distance finish!')\n #######################order link id & nextlinks centry###############################\n tmp = data_link_split[data_link_split['link_id'].isin(dc)]\n tmp = tmp.groupby('order_id', as_index=False)\n tmp_linkid_centry_count = tmp['link_id'].agg({'zsl_order_link_id_centry_count': 'count'})\n feature = feature.merge(tmp_linkid_centry_count, on='order_id', how='left')\n feature['zsl_order_link_id_centry_count'] = feature['zsl_order_link_id_centry_count'].fillna(0)\n print('order link id & nextlinks centry finish!')\n #######################order link time sum mean max min var std###############################\n tmp_linktime_agg = df['link_time'].agg({'zsl_order_link_time_sum': 'sum', 'zsl_order_link_time_mean': 'mean',\n 'zsl_order_link_time_max': 'max', 'zsl_order_link_time_min': 'min',\n 'zsl_order_link_time_var': 'var', 'zsl_order_link_time_skew': 'skew'})\n feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')\n print('order link time sum mean max min var std finish!')\n #######################order link current status mean nunique###############################\n tmp_linktime_agg = df['link_current_status'].agg(\n {'zsl_link_current_status_mean': 'mean', 'zsl_link_current_status_nunique': 'nunique'})\n feature = feature.merge(tmp_linktime_agg, on='order_id', how='left')\n print('order link current status mean nunique finish!')\n #######################order link current status count vector###############################\n data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(str)\n data_link_split.loc[data_link_split['link_current_status'].astype(int) < 0, 'link_current_status'] = '0'\n data_link_split.loc[data_link_split['link_current_status'].astype(int) > 3, 'link_current_status'] = '3'\n data = data_link_split.groupby('order_id')['link_current_status'].apply(lambda x: x.str.cat(sep=',')).reset_index()\n cv_encode = CountVectorizer(token_pattern=u'(?u)\\\\b\\\\w+\\\\b')\n test_x = cv_encode.fit_transform(data['link_current_status'])\n test_x = test_x.toarray()\n link_current_status = pd.DataFrame(test_x, columns=['zsl_link_current_status0', 'zsl_link_current_status1',\n 'zsl_link_current_status2',\n 'zsl_link_current_status3'])\n data = pd.concat([data[['order_id']], link_current_status], axis=1)\n feature = feature.merge(data, on='order_id', how='left')\n print('order link current status count vector finish!')\n #######################order distance/link_id_count###############################\n feature['zsl_distance_div_link_id_count'] = feature['distance'] * 10 / feature['zsl_order_link_id_count']\n feature = feature.drop('distance', axis=1)\n print('order distance div link_id_count finish!')\n #######################order link ratio sum mean max min var std###############################\n tmp_linkratio_agg = df['link_ratio'].agg({'zsl_order_link_ratio_sum': 'sum', 'zsl_order_link_ratio_mean': 'mean',\n 'zsl_order_link_ratio_min': 'min',\n 'zsl_order_link_ratio_var': 'var', 'zsl_order_link_ratio_skew': 'skew'})\n feature = feature.merge(tmp_linkratio_agg, on='order_id', how='left')\n print('order link ratio sum mean max min var std finish!')\n #######################weather###################################################################\n weather = pd.read_csv(root_path + 'weather.csv')\n weather_dict = {'rainstorm': 0, 'heavy rain': 1, 'moderate rain': 2, 'cloudy': 3,\n 'showers': 4}\n weather['weather'] = weather['weather'].map(weather_dict)\n weather['date'] = weather['date'].astype(str)\n weather = weather[weather['date'] == data_time]\n feature['weather'] = weather['weather'].values[0]\n feature['hightemp'] = weather['hightemp'].values[0]\n feature['lowtemp'] = weather['lowtemp'].values[0]\n print('weather finish!')\n ###################trend fea#############################################\n ###################trend link time#####################################\n data_link_split['link_current_status'] = data_link_split['link_current_status'].astype(int)\n groupby = data_link_split.groupby(['order_id'])\n func = partial(trend_in_last_k_link_id_time, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_link_time_features, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n func = partial(last_k_link_time_interval, periods=[2, 5, 7, 10, 15, 20, 30, 50, 80, 100, 100000000])\n g = parallel_apply_fea(groupby, func, index_name='order_id', num_workers=20, chunk_size=10000)\n feature = feature.merge(g, on='order_id', how='left')\n print('trend link time finish!')\n ####################nextlinks graph embedding#######################\n data_link_split['link_id'] = data_link_split['link_id'].astype(int)\n data_link_split['link_id'] = data_link_split['link_id'].map(read_idkey)\n data_link_split['link_id'] = data_link_split['link_id'].fillna(0)\n data_link_split['link_id'] = data_link_split['link_id'].astype(int)\n data_link_split['link_id'] = data_link_split['link_id'].map(read_grapheb)\n data_link_split['link_id'] = data_link_split['link_id'].fillna('0')\n def replace_list(x):\n if isinstance(x, str):\n x = fill_list\n return x\n data_link_split['link_id'] = data_link_split['link_id'].apply(replace_list)\n link_id_col = ['zsl_link_id_eb{}'.format(i) for i in range(embedding_k)]\n agg_col = dict(zip(link_id_col, ['mean'] * len(link_id_col)))\n link_id_array = np.array(data_link_split.pop('link_id').to_list())\n link_id_array = pd.DataFrame(link_id_array, columns=agg_col, dtype=np.float16)\n data_link_split = pd.concat([data_link_split, link_id_array], axis=1)\n tmp = data_link_split.groupby('order_id', as_index=False)\n tmp_linkid_agg = tmp.agg(agg_col)\n feature = feature.merge(tmp_linkid_agg, on='order_id', how='left')\n feature.to_csv(root_path+'feature/test/link_fea_order_id_level_20200901.csv',index=False)\n"
},
{
"alpha_fraction": 0.6142174601554871,
"alphanum_fraction": 0.645967960357666,
"avg_line_length": 25.559701919555664,
"blob_id": "101fe3405e3ff1552a0494a78570b8bd466e4b9e",
"content_id": "f491fd3388dbc54ef9a863e990166a0acbd30f1b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3669,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 134,
"path": "/机器学习算法原理及推导/其它/第二章——手写线性回归算法/LinearRegression/MultivariateLinearRegression.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport plotly\nimport plotly.graph_objs as go\n\nfrom linear_regression import LinearRegression\nplotly.offline.init_notebook_mode() # 在线显示图标,更多功能\n\ndata = pd.read_csv('../data/world-happiness-report-2017.csv')\n\ntrain_data = data.sample(frac=0.8)\ntest_data = data.drop(train_data.index)\n\n# 与单特征模型相比,只是多了一个特征列\ninput_param_name_1 = 'Economy..GDP.per.Capita.'\ninput_param_name_2 = 'Freedom'\noutput_param_name = 'Happiness.Score'\n# 双特征的loss为:0.08517538069974877\nx_train = train_data[[input_param_name_1, input_param_name_2]].values\n# 全特征的loss为:0.0019415807477718364\n# feat_list = list(train_data.columns.drop(['Happiness.Score','Country']))\n# x_train = train_data[feat_list].values\ny_train = train_data[[output_param_name]].values\n\nx_test = test_data[[input_param_name_1, input_param_name_2]].values\ny_test = test_data[[output_param_name]].values\n\n# Configure the plot with training dataset.\nplot_training_trace = go.Scatter3d(\n x=x_train[:, 0].flatten(),\n y=x_train[:, 1].flatten(),\n z=y_train.flatten(),\n name='Training Set',\n mode='markers',\n marker={\n 'size': 10,\n 'opacity': 1,\n 'line': {\n 'color': 'rgb(255, 255, 255)',\n 'width': 1\n },\n }\n)\n\nplot_test_trace = go.Scatter3d(\n x=x_test[:, 0].flatten(),\n y=x_test[:, 1].flatten(),\n z=y_test.flatten(),\n name='Test Set',\n mode='markers',\n marker={\n 'size': 10,\n 'opacity': 1,\n 'line': {\n 'color': 'rgb(255, 255, 255)',\n 'width': 1\n },\n }\n)\n\nplot_layout = go.Layout(\n title='Date Sets',\n scene={\n 'xaxis': {'title': input_param_name_1},\n 'yaxis': {'title': input_param_name_2},\n 'zaxis': {'title': output_param_name}\n },\n margin={'l': 0, 'r': 0, 'b': 0, 't': 0}\n)\n\nplot_data = [plot_training_trace, plot_test_trace]\n\nplot_figure = go.Figure(data=plot_data, layout=plot_layout)\n\nplotly.offline.plot(plot_figure)\n\nnum_iterations = 500\nlearning_rate = 0.01\npolynomial_degree = 0\nsinusoid_degree = 0\n\nlinear_regression = LinearRegression(x_train, y_train, polynomial_degree, sinusoid_degree)\n\n(theta, cost_history) = linear_regression.train(learning_rate, num_iterations)\n\nprint('开始时的损失:', cost_history[0])\nprint('训练后的损失:', cost_history[-1])\n\nplt.plot(range(num_iterations), cost_history)\nplt.xlabel('Iterations')\nplt.ylabel('Cost')\nplt.title('Gradient Descent')\nplt.show()\n\npredictions_num = 10\n\nx_min = x_train[:, 0].min()\nx_max = x_train[:, 0].max()\n\ny_min = x_train[:, 1].min()\ny_max = x_train[:, 1].max()\n\nx_axis = np.linspace(x_min, x_max, predictions_num)\ny_axis = np.linspace(y_min, y_max, predictions_num)\n\nx_predictions = np.zeros((predictions_num * predictions_num, 1))\ny_predictions = np.zeros((predictions_num * predictions_num, 1))\n\nx_y_index = 0\nfor x_index, x_value in enumerate(x_axis):\n for y_index, y_value in enumerate(y_axis):\n x_predictions[x_y_index] = x_value\n y_predictions[x_y_index] = y_value\n x_y_index += 1\n\nz_predictions = linear_regression.predict(np.hstack((x_predictions, y_predictions)))\n\nplot_predictions_trace = go.Scatter3d(\n x=x_predictions.flatten(),\n y=y_predictions.flatten(),\n z=z_predictions.flatten(),\n name='Prediction Plane',\n mode='markers',\n marker={\n 'size': 1,\n },\n opacity=0.8,\n surfaceaxis=2,\n)\n\nplot_data = [plot_training_trace, plot_test_trace, plot_predictions_trace]\nplot_figure = go.Figure(data=plot_data, layout=plot_layout)\nplotly.offline.plot(plot_figure)\n"
},
{
"alpha_fraction": 0.739393949508667,
"alphanum_fraction": 0.7696969509124756,
"avg_line_length": 54,
"blob_id": "fcbafa4bd1dae03d8b6bc2b223dee1cbfc74ad79",
"content_id": "7795a8b05de6801bea8439583d796589722eaf49",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 165,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 3,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/README.MD",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "node2vec \n1. Recompile from https://github.com/snap-stanford/snap and add node2vec to system path.\n2. To grant executable permission, run: chmod +x ./c_exe/node2vec\n"
},
{
"alpha_fraction": 0.6379032135009766,
"alphanum_fraction": 0.6601382493972778,
"avg_line_length": 42.838382720947266,
"blob_id": "3eeda76204cfbf69ceb139e176d9dfc6910db76c",
"content_id": "4360dcdec9d9aa6d94c327c5e97d2d218149997c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8680,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 198,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/WD_128544/wd_model/wd_model.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nfrom tensorflow import keras\nimport tensorflow as tf\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nimport tensorflow.keras.layers as L\n# import tensorflow.keras.models as M\nimport tensorflow.keras.backend as K\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping\nfrom keras_radam.training import RAdamOptimizer\nfrom tensorflow.keras import initializers, regularizers, constraints, optimizers, layers, callbacks\nfrom tensorflow.keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D, Conv1D\nfrom tensorflow.keras.layers import Input, Dense, Lambda, Layer\nfrom tensorflow.keras.initializers import Constant\nfrom tensorflow.keras.models import Model\n\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n\ndef gru_layer(hidden_dim, dropout):\n return L.Bidirectional(L.GRU(\n hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))\n\n\ndef lstm_layer(hidden_dim, dropout):\n return L.Bidirectional(L.LSTM(\n hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))\n\n\ndef preprocess(df, cate_cols, numeric_cols):\n for cl in cate_cols:\n le = LabelEncoder()\n df[cl] = le.fit_transform(df[cl])\n cols = cate_cols + numeric_cols\n X_train = df[cols]\n return X_train\n\n\ndef wd_model(link_size, cross_size, slice_size, input_deep_col, input_wide_col,\n link_nf_size, cross_nf_size, link_seqlen=170, cross_seqlen=12, pred_len=1,\n dropout=0.25, sp_dropout=0.1, embed_dim=64, hidden_dim=128, n_layers=3, lr=0.001,\n kernel_size1=3, kernel_size2=2, conv_size=128, conv='conv'):\n link_inputs = L.Input(shape=(link_seqlen, link_nf_size))\n cross_inputs = L.Input(shape=(cross_seqlen, cross_nf_size))\n deep_inputs = L.Input(shape=(input_deep_col,), name='deep_input')\n slice_input = L.Input(shape=(1,))\n wide_inputs = keras.layers.Input(shape=(input_wide_col,), name='wide_input')\n\n # link----------------------------\n categorical_fea1 = link_inputs[:, :, :1]\n numerical_fea1 = link_inputs[:, :, 1:5]\n\n embed = L.Embedding(input_dim=link_size, output_dim=embed_dim)(categorical_fea1)\n reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))\n #reshaped = L.SpatialDropout1D(sp_dropout)(reshaped)\n\n hidden = L.concatenate([reshaped, numerical_fea1], axis=2)\n hidden = L.SpatialDropout1D(sp_dropout)(hidden)\n \"\"\"\n categorical_ar_st = link_inputs[:, :, 5:6]\n categorical_ar_st = L.Masking(mask_value=-1, name='categorical_ar_st')(categorical_ar_st)\n embed_ar_st = L.Embedding(input_dim=(-1,289), output_dim=8)(categorical_ar_st)\n reshaped_ar_st = tf.reshape(embed_ar_st, shape=(-1, embed_ar_st.shape[1], embed_ar_st.shape[2] * embed_ar_st.shape[3]))\n reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)\n\n categorical_ar_sl = link_inputs[:, :, 6:7]\n categorical_ar_sl = L.Masking(mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)\n embed_ar_sl = L.Embedding(input_dim=(-1, 289), output_dim=8)(categorical_ar_sl)\n reshaped_ar_sl = tf.reshape(embed_ar_sl, shape=(-1, embed_ar_sl.shape[1], embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))\n reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)\n hidden = L.concatenate([reshaped, reshaped_ar_st, reshaped_ar_sl, numerical_fea1],axis=2)\n \"\"\"\n for x in range(n_layers):\n hidden = lstm_layer(hidden_dim, dropout)(hidden)\n\n if conv=='conv':\n #x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden)\n avg_pool1_gru = GlobalAveragePooling1D()(hidden)\n max_pool1_gru = GlobalMaxPooling1D()(hidden)\n truncated_link = concatenate([avg_pool1_gru, max_pool1_gru])\n elif conv=='resnet50':\n truncated_link = ResNet50(include_top=False, pooling='max', weights=None)(hidden)\n else:\n truncated_link = hidden[:, :pred_len]\n truncated_link = L.Flatten()(truncated_link)\n\n # cross----------------------------\n categorical_fea2 = cross_inputs[:, :, :1]\n numerical_fea2 = cross_inputs[:, :, 1:]\n embed2 = L.Embedding(input_dim=cross_size, output_dim=embed_dim)(categorical_fea2)\n reshaped2 = tf.reshape(embed2, shape=(-1, embed2.shape[1], embed2.shape[2] * embed2.shape[3]))\n #reshaped2 = L.SpatialDropout1D(sp_dropout)(reshaped2)\n\n hidden2 = L.concatenate([reshaped2, numerical_fea2], axis=2)\n hidden2 = L.SpatialDropout1D(sp_dropout)(hidden2)\n for x in range(n_layers):\n hidden2 = lstm_layer(hidden_dim, dropout)(hidden2)\n\n if conv=='conv':\n #x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden2)\n avg_pool3_gru = GlobalAveragePooling1D()(hidden2)\n max_pool3_gru = GlobalMaxPooling1D()(hidden2)\n truncated_cross = concatenate([avg_pool3_gru, max_pool3_gru])\n elif conv=='resnet50':\n truncated_cross = ResNet50(include_top=False, pooling='max', weights=None)(hidden2)\n else:\n truncated_cross = hidden2[:, :pred_len]\n truncated_cross = L.Flatten()(truncated_cross)\n\n # slice----------------------------\n embed_slice = L.Embedding(input_dim=slice_size, output_dim=1)(slice_input)\n embed_slice = L.Flatten()(embed_slice)\n\n # deep_inputs\n \"\"\"\n dense_hidden1 = L.Dense(256, activation=\"relu\")(deep_inputs)\n dense_hidden1 = L.Dropout(dropout)(dense_hidden1)\n dense_hidden2 = L.Dense(256, activation=\"relu\")(dense_hidden1)\n dense_hidden2 = L.Dropout(dropout)(dense_hidden2)\n dense_hidden3 = L.Dense(128, activation=\"relu\")(dense_hidden2)\n \"\"\"\n x = L.Dense(512, activation=\"relu\")(deep_inputs)\n x = L.BatchNormalization()(x)\n x = L.Lambda(tf.keras.activations.swish)(x)\n x = L.Dropout(0.25)(x)\n for i in range(2):\n x = L.Dense(256)(x)\n x = L.BatchNormalization()(x)\n x = L.Lambda(tf.keras.activations.swish)(x)\n x = L.Dropout(0.25)(x)\n dense_hidden3 = L.Dense(64,activation='linear')(x)\n # main-------------------------------\n truncated = L.concatenate([truncated_link, truncated_cross, dense_hidden3, wide_inputs, embed_slice]) # WD\n \"\"\"\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Dropout(dropout)(L.Dense(512, activation='relu') (truncated))\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Dropout(dropout)(L.Dense(256, activation='relu') (truncated))\n \"\"\"\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Dropout(dropout)(L.Dense(1024, activation='relu') (truncated))\n truncated = L.Dropout(dropout)(truncated)\n\n for i in range(2):\n truncated = L.Dense(512)(truncated)\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Lambda(tf.keras.activations.swish)(truncated)\n truncated = L.Dropout(dropout)(truncated)\n\n out = L.Dense(1, activation='linear')(truncated)\n\n\n model = tf.keras.Model(inputs=[link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],\n outputs=out) # WD\n print(model.summary())\n model.compile(loss='mape',\n optimizer=RAdamOptimizer(learning_rate=1e-3),\n metrics=['mape'])\n\n return model\n\n\ndef get_mc_es_lr(model_name: str, patience=5, min_delta=1e-4):\n mc = tf.keras.callbacks.ModelCheckpoint('../model_h5/model_{}.h5'.format(model_name)),\n es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min',\n restore_best_weights=True, patience=patience)\n lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=patience, mode='min',\n min_delta=min_delta)\n\n return mc, es, lr\n\n\nclass Mish(tf.keras.layers.Layer):\n\n def __init__(self, **kwargs):\n super(Mish, self).__init__(**kwargs)\n self.supports_masking = True\n\n def call(self, inputs):\n return inputs * K.tanh(K.softplus(inputs))\n\n def get_config(self):\n base_config = super(Mish, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n\ndef mish(x):\n return tf.keras.layers.Lambda(lambda x: x*K.tanh(K.softplus(x)))(x)\n\n\ntf.keras.utils.get_custom_objects().update({'mish': tf.keras.layers.Activation(mish)})\n"
},
{
"alpha_fraction": 0.5246946215629578,
"alphanum_fraction": 0.5292087197303772,
"avg_line_length": 32.03508758544922,
"blob_id": "a4903489cbee300295ee3aa388001a326454f33a",
"content_id": "3c31e095dc5448fb36f529d9e6b06422d4977c37",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4300,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 114,
"path": "/机器学习算法原理及推导/其它/第二章——手写线性回归算法/LinearRegression/linear_regression.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom util.features import prepare_for_training\n\n\nclass LinearRegression:\n def __init__(self, data, labels, polynomial_degree=0, sinusoid_degree=0, normalize_data=True):\n \"\"\":\n 1.对数据进行预处理操作\n 2.先得到所有的特征个数\n 3.初始化参数矩阵\n\n data:数据\n polynomial_degree: 是否做额外变换\n sinusoid_degree: 是否做额外变换\n normalize_data: 是否标准化数据\n \"\"\"\n (data_processed,\n features_mean,\n features_deviation) = prepare_for_training.prepare_for_training(data, polynomial_degree, sinusoid_degree,\n normalize_data)\n\n self.data = data_processed\n self.labels = labels\n self.features_mean = features_mean\n self.features_deviation = features_deviation\n self.polynomial_degree = polynomial_degree\n self.sinusoid_degree = sinusoid_degree\n self.normalize_data = normalize_data\n\n num_features = self.data.shape[1]\n self.theta = np.zeros((num_features, 1))\n\n def train(self, alpha, num_iterations=500):\n \"\"\"\n 训练模块,执行梯度下降得到theta值和损失值loss\n\n alpha: 学习率\n num_iterations: 迭代次数\n \"\"\"\n cost_history = self.gradient_descent(alpha, num_iterations)\n return self.theta, cost_history\n\n def gradient_descent(self, alpha, num_iterations):\n \"\"\"\n 实际迭代模块\n\n alpha: 学习率\n num_iterations: 迭代次数\n\n :return: 返回损失值 loss\n \"\"\"\n cost_history = [] # 收集每次的损失值\n for _ in range(num_iterations): # 开始迭代\n self.gradient_step(alpha) # 每次更新theta\n cost_history.append(self.cost_function(self.data, self.labels))\n return cost_history\n\n def gradient_step(self, alpha):\n \"\"\"\n 梯度下降参数更新计算方法,注意是矩阵运算\n\n alpha: 学习率\n \"\"\"\n num_examples = self.data.shape[0] # 当前样本个数\n # 根据当前数据和θ获取预测值\n prediction = LinearRegression.hypothesis(self.data, self.theta)\n delta = prediction - self.labels # 残差,即预测值减去真实值\n theta = self.theta\n # 依照小批量梯度下降法,写代码表示\n theta = theta - alpha * (1/num_examples)*(np.dot(delta.T, self.data)).T\n self.theta = theta # 计算完theta后更新当前theta\n\n def cost_function(self, data, labels):\n \"\"\"\n 损失计算方法,计算平均的损失而不是每个数据的损失值\n \"\"\"\n num_examples = data.shape[0]\n delta = LinearRegression.hypothesis(data, self.theta) - labels # 预测值-真实值 得到残差\n cost = np.dot(delta, delta.T) # 损失值\n return cost[0][0]\n\n @staticmethod\n def hypothesis(data, theta):\n \"\"\"\n 获取预测值\n\n :param data: 矩阵数据\n :param theta: 权重θ\n :return: 返回预测值\n \"\"\"\n predictions = np.dot(data, theta)\n return predictions\n\n def get_cost(self, data, labels):\n \"\"\"\n 得到当前损失\n \"\"\"\n data_processed = prepare_for_training.prepare_for_training(data,\n self.polynomial_degree,\n self.sinusoid_degree,\n self.normalize_data)[0]\n return self.cost_function(data_processed, labels)\n\n def predict(self, data):\n \"\"\"\n 用训练的参数模型,预测得到回归值的结果\n \"\"\"\n data_processed = prepare_for_training.prepare_for_training(data,\n self.polynomial_degree,\n self.sinusoid_degree,\n self.normalize_data)[0]\n predictions = LinearRegression.hypothesis(data_processed, self.theta)\n\n return predictions\n"
},
{
"alpha_fraction": 0.5609607696533203,
"alphanum_fraction": 0.7534204721450806,
"avg_line_length": 16.046632766723633,
"blob_id": "32c80607c82ad64775141ac505bbeb0af39fe1a2",
"content_id": "7624602987dea7b25358588e4ce7f23291316b7a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4229,
"license_type": "permissive",
"max_line_length": 143,
"num_lines": 193,
"path": "/NLP通用框架BERT项目实战/第二章——BERT源码解读与应用实例/README.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# README\n\n[BERT开源框架地址](<https://github.com/google-research/bert>),最好读下README,以下是预训练好的BERT模型,这里用到两个\n\n\n\n> 点击即可下载,里面内容如下\n\n\n\n\n\n> json:相关的参数\n>\n> vocab:语料库\n>\n> 其它:使用时的是加载文件,如训练好的权重等\n\n\n\n> 下载一个数据集,使用脚本命令下载,可能需要翻墙,可以访问我的云盘进行下载。链接:https://pan.baidu.com/s/18vPGelYCXGqp5OCWZWz36A \n> 提取码:de0f。这里只用到MRPC\n\n\n\n#### MRPC\n\n内容如下:\n\n\n\ntrain.csv:\n\n\n\n> 二分类任务:判断两句话是否说的是同一意思\n>\n> Quality:是否相同,相同为1\n\ntest.csv\n\n\n\n> 没有了Quality,需要进行预测\n\n\n\n#### download BERT\n\n把bert的code全部下载下来,并解压到指定目录\n\n\n\n\n\n#### 创建环境\n\nsettings一个虚拟环境,我使用的是pycharm\n\n\n\n我默认你是下载了某个IDE,至少是懂python的,Show All\n\n\n\n点击加号\n\n\n\n添加一个conda或者普通的python都可以,因为conda是专门做数据的,所以我选了conda,其实没什么区别\n\n\n\n点击OK即可。\n\n\n\n> 输入如下命令,进行安装\n\n~~~\n# python3.7,我的是window\npip install tensorflow==1.13.2 -i https://pypi.douban.com/simple\n\npip install numpy==1.16 -i https://pypi.douban.com/simple\n~~~\n\n\n\n#### 参数\n\n\n\n注意:是run_classifier.py文件\n\n\n\n~~~\n-task_name=MRPC\n-do_train=true\n-do_eval=true\n-data_dir=../GLUE/glue_data/MRPC\n-vocab_file=../GLUE/BERT_BASE_DIR/uncased_L-12_H-768_A-12/vocab.txt\n-bert_config_file=../GLUE/BERT_BASE_DIR/uncased_L-12_H-768_A-12/bert_config.json\n-init_checkpoint=../GLUE/BERT_BASE_DIR/uncased_L-12_H-768_A-12/bert_model.ckpt\n-max_seq_length=128\n-train_batch_size=8\n-learning_rate=2e-5\n--num_train_epochs=3.0\n-output_dir=../GLUE/output/\n~~~\n\n> task_name:运行的模块,在main里指定了名字对应的类\n>\n> do_train:是否训练\n>\n> do_eval:是否验证\n>\n> data_dir:数据地址\n>\n> vocab_file:词库表\n>\n> bert_config_file:bert参数\n>\n> init_checkpoint:初始化参数\n>\n> max_seq_length:最长字符限制\n>\n> train_batch_size:训练次数\n>\n> learning_rate:学习率\n>\n> num_train_epochs:循环训练次数\n>\n> output_dir:输出路径\n\n配置完成后,run该文件即可\n\n\n\n\n\nGoogle原版的\n\n\n\n\n\n#### 报错及解决办法\n\nclass AdamWeightDecayOptimizer(tf.optimizers.Optimizer): AttributeError: module 'tensorflow' has no attribute 'optimizers'\n\n> 如下内容\n\n~~~\ntf.optimizers.Optimizer改为tf.keras.optimizers.Optimizer\n~~~\n\n\n\nsuper(AdamWeightDecayOptimizer, self).__init__(False, name) TypeError: __ini\n\n~~~\nsuper(AdamWeightDecayOptimizer, self).__init__(False, name)\n 改成\nsuper(AdamWeightDecayOptimizer, self).__init__()\n~~~\n\n\n\ntensorflow/core/framework/op_kernel.cc:1401] OP_REQUIRES failed at save_restore_v2_ops.cc:109 : Not found: Failed to create a NewWriteableFile:\n\n> 路径过长,需要将整个项目移动到某盘下。要求满足:1.段路径,2.全英文\n\n\n\n#### 备选方案\n\n一直起不来的,可以直接使用我改好的代码文件,链接:https://pan.baidu.com/s/18vPGelYCXGqp5OCWZWz36A \n提取码:de0f\n\n我的路径如下:\n\n\n\n\n\n\n\n\n\n\n\noutput是自动生成的不需要管"
},
{
"alpha_fraction": 0.6366047859191895,
"alphanum_fraction": 0.6498673558235168,
"avg_line_length": 22.625,
"blob_id": "22a9d8356868a289a3dfaa7ba28f84464fb5a515",
"content_id": "3bbd2037a8cff05dc9ec666e52e591ee63614b5e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 377,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 16,
"path": "/机器学习算法原理及推导/其它/第二章——手写线性回归算法/util/features/generate_sinusoids.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\ndef generate_sinusoids(dataset, sinusoid_degree):\n \"\"\"\n sin(x).\n \"\"\"\n\n num_examples = dataset.shape[0]\n sinusoids = np.empty((num_examples, 0))\n\n for degree in range(1, sinusoid_degree+1):\n sinusoid_features = np.sin(degree * dataset)\n sinusoids = np.concatenate((sinusoids, sinusoid_features), axis=1)\n\n return sinusoids"
},
{
"alpha_fraction": 0.6403345465660095,
"alphanum_fraction": 0.7462825179100037,
"avg_line_length": 17.894737243652344,
"blob_id": "aeea293bee9310a80d43fa6816aa6cf19b132031",
"content_id": "4311ba43c2019cf962a60483ef7a7abe7c310fd2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1600,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 57,
"path": "/机器学习算法原理及推导/李宏毅——异常检测/1. 异常检测——概述.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 1. 异常检测——概述\n\n### 问题描述\n\nProblem Formulation\n\n- 有一批训练数据如:{x1,x2,...,xn}\n- 我们想从这批输入数据中分出类似的,或者不类似的\n\n\n\n> 类似上图,找出数据中anomaly的数据,这个anomaly并不表示它是有问题,只是说它跟大多数数据不一样。有可能是特别好的,有可能是特别坏的。\n\n\n\n### 什么是异常\n\nWhat is Anomaly?\n\n什么是异常取决于大部分是什么\n\n\n\n> 你给它看很多雷丘,那么皮卡丘就是异常\n>\n> 你给它看很多皮卡丘,那么雷丘就是异常\n>\n> 你给它看很多神奇宝贝,那么数码宝贝就是异常\n\n\n\n### 异常检测的应用\n\nApplications\n\n- Fraud Detection(诈欺检测)\n - Training data:正常刷卡行为,x:盗刷?\n - Ref: https://www.kaggle.com/ntnu-testimon/paysim1/home\n - Ref: https://www.kaggle.com/mlg-ulb/credicardraud/home\n- Network Intrusion Detection(入侵检测)\n - Training data:正常连接,x:攻击行为?\n - Ref:http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html\n- Cancer Detection(细胞检测)\n - Training data:正常细胞,x:癌细胞\n - Ref:http://kdd.ics.uci.edu/uciml/breast-cancer-wisconsin-data/home\n\n\n\n### 如何分类\n\nBinary Classification?\n\n- Given normal data \n- Given anomaly \n- Then training a binary classifier ......\n\n如上,给它正常数据和异常数据,然后自动分成Class1和Class2"
},
{
"alpha_fraction": 0.5885772705078125,
"alphanum_fraction": 0.8125854730606079,
"avg_line_length": 18.104576110839844,
"blob_id": "797062457904223fb12f92aeb22a81653fb0b049",
"content_id": "0af1cedc8ff1c03e424c29090b2b641748fefd2c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5814,
"license_type": "permissive",
"max_line_length": 158,
"num_lines": 153,
"path": "/深度学习入门/第四章——递归神经网络与词向量原理解读.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "### 第四章——递归神经网络与词向量原理解读\n\n#### RNN网络架构解读\n\n常规神经网络并不能考虑时间序列的特征(比如前天+昨天+今天或者带有前后关联的特征),现在每个特征都是独立考虑的,那么如果有这样的特征,网络应该怎么学呢\n\n\n\n> 而递归递归网络hidden这里的转回箭头,表示训练完第一个X后,再拿回来去训练第二个X,即前一次训练的结果对后一次的训练结果产生影响。\n\n\n\n> 类似现在有X0、X1、X2 ... Xt,假设X0就是本月的1号,X1就是2号以此类推,Xt就是昨天,这样是不是就是一个时间序列。\n>\n> X输入后有了h,h是中间的结果,每个h保证能联合前一个的h。\n\n\n\n#### LSTM网络\n\nRNN的问题在于,每一次的h只考虑前一个,当h到最后的时候,它只考虑n-1的h,这样对吗?或者说越后面的时间的数据一定越重要吗?我们是不是应该考虑每个时间的数据\n\n\n\n- C:控制参数,决定什么样的信息会被保留什么样的会被遗忘。\n\n- 门:一种让信息选择式通过的方法\n\n- 每次计算的结果和前一轮的结果进行比较,选择要更新的信息\n\n \n\n\n\n#### 词向量Word2Vec模型通俗解释\n\n先考虑第一个问题:如何将文本向量化\n\n比如描述一个人,只用身高或体重,还是综合各项指标?如下\n\n\n\n只要有了向量,就可以用不同的方法来计算相似度。如下\n\n\n\n通常,数据的维度越高,能提供的信息也就越多,从而计算结果的可靠性就更值得信赖了。如下\n\n\n\n如何描述语言的特征呢?通常都在词的层面上构建特征。Word2Vec就是把词转成向量:\n\n\n\n假设现在已经拿到一份训练好的词向量,其中每个词都表示50维的向量:\n\n\n\n如果在热度图中显示,结果如下:\n\n\n\n从结果中可以发现,相似的词在特征表达中比较相似,也就是说明词的特征是有实际意义的!\n\n\n\n> 如上图的男人和男孩有相当部分的区域颜色是相似的,只是有的浅了点,有的深了点。同样的地方,对比水,它们之间相差的就非常远,颜色基本没有关联。\n\n\n\n#### 训练数据构建\n\n输入数据从哪来?首先我们得理解的是,文字单词转换成数值,它并不是仅仅针对该次任务或者该数据的,Thou或者shalt在这里是有这些意思,转成对应的数值,在其它文本其它数据中也是同样的意思,所以只要是符合逻辑的文本,我们都可以拿来训练。目前有Google的预训练bert模型也是这个原理。一般我们会维护一下语料库大表。\n\n\n\n如何训练\n\n\n\n> 我们选中三个单词,以thou shalt 来预测not,也可以不断的往后滑动,如以shalt not来预测make\n\n\n\n> 也就是可以是无监督学习,但文字的前后一定是有先后顺序的,不能没有说话逻辑。\n\n\n\n#### CBOW与Skipgram模型\n\nCBOW:根据上下文预测中间内容\n\n\n\nSkipgram:根据中间内容预测上下文\n\n\n\n两模型输入输出如下:\n\n\n\n> 这两个模型都存在gensim里,后续调用该工具包即可。\n\n最终预测的词也会带有概率,而前面我们讲到如SoftMax用到会将最大的值提取出来,如果我们的语料库非常大,比如5万字,那么要给出5万字的概率,而SoftMax也得计算这5万,可以说是非常耗时的,怎么解决这个问题呢?\n\n\n\n#### 负采样方案\n\n**初始方案:**\n\n输入两个单词,看它们是不是前后对应的输入和输出,也就相当于一个二分类任务\n\n\n\n出发点是好的,但是此时训练集构建出来的标签全为1,无法进行较好的训练\n\n\n\n> 如上图,这样相当于告诉模型,只要往1预测,那么一定能效果好,类似风控场景中,正负样本非常悬殊甚至达到1:1万的情况。我们是不能直接训练的,我们会“构造”一些正样本,而这里是1太多,需要构建一些负样本。\n\n**改进方案:**\n\n加入一些负样本\n\n\n\n> 1个1添加多少个0呢?gensim工具包给出的默认参数是1个1和5个0,即正负样本比1:5\n\n正常情况下的Skipgram训练集\n\n\n\n**大致流程如下:**\n\n**1.初始化词向量矩阵**\n\n\n\n> 最左边Embedding是语料库大表,所有的词都在里面。\n>\n> Context是本次的预测的文本\n>\n> dataset是负采样完成后的结果\n\n\n\n**2.通过神经网络返回传播来计算更新,此时不光更新权重参数矩阵W,也会更新输入数据**\n\n\n\n> 给出损失函数Error的结果,并更新到输入和输出数据中。更新输出数据是为了输出本次的预测结果,而更新输入数据,是为了维护我们将一直使用的语料库大表,使得我们的语料库的词向量等越来越准确。\n\n"
},
{
"alpha_fraction": 0.5531680583953857,
"alphanum_fraction": 0.7509641647338867,
"avg_line_length": 11.699300765991211,
"blob_id": "8500700db70b166af49d9260e177c4550e2bef39",
"content_id": "6cb7e2ecf7b6f5f442499d2f5ec1b39506f3432c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3361,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 143,
"path": "/机器学习算法原理及推导/李航——统计学习方法/3.K近邻——物以类聚.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 3.K近邻——物以类聚\n\nK-nearest neighbor\n\n### 知识树\n\n\n\n\n\n### 怎么区分红豆绿豆?\n\nHow to distinguish red beans and green beans?\n\n\n\n之前我们构造了一个超平面来解决这个问题,既然超平面可以切分,是不是红豆之间和绿豆之间有着某种关联。即:物以类聚。\n\n如果一个豆过来自然而然的到红豆堆,我们有理由认为它大概率是红豆。\n\n1. 同一标签的样本通常有很多相似的特征。\n2. 没进来一个样本,查看它周边的样本是什么类别,那么它就很有可能属于该类别。\n\n那么某个点与其它点距离怎么计算。\n\n\n\n### 距离度量\n\nDistance measure\n\n首先令\n\n\n\n度量的方法有:\n\n欧式距离(也称二范数):\n\n\n\n> xi里的x减去对应位置的xj里的x,然后全部平方,再求和,然后开根号。\n>\n> 如果两个点之间的距离很远,那么值就会很大\n\n曼哈顿距离(也称一范数/也称城市街区距离):\n\n\n\n> 相对上面欧式距离,不需要平方-相加-开根号,只要拿它的绝对值-相加即可\n\nP范数:\n\n\n\n> 引出P范数,p=1则是一范数,p=2则是二范数\n\n还有3范数(也称切比雪夫距离/棋盘距离)\n\n最常用的是欧式距离>曼哈顿距离>切比雪夫距离\n\n\n\n### 总结\n\nSummarization\n\n1. K近邻思想:物以类聚\n2. K近邻没有显式的训练过程\n 1. 不需要先训练再预测,直接得到结果\n3. 距离度量\n 1. 欧式距离:两点之间直线\n 2. 曼哈顿距离:城市街区距离\n 3. 切比雪夫距离:棋盘距离\n\n\n\n### K值的选择\n\nHow to chose K\n\n**选择较小的K值**\n\n\t用较小的邻域进行预测。预测结果对邻近的实例点非常敏感。如果邻近的实例点恰好是噪声,预测就会出错。\n\n**选择较大的K值**:\n\n\t用较大的邻域进行预测。对于输入实例较远(已经不太相似)的样本点也会对预测起作用,使预测发生错误。\n\n**在应用中**:\n\n\t先取一个较小的K值,再通过交叉验证法来选取最优的K值\n\n\n\n### 分数表决规则\n\nMajority voting rule\n\n分类决策规则:多数表决\n\n损失函数:\n\n\n\n实心圆内都判断为红色的损失值\n\n\n\n\n\n实心圆内都判断为蓝色的损失值\n\n\n\n\n\n### K近邻算法\n\nK-nearest neighbor\n\n输入:训练数据T = [(x1, y1),...,(xn,yn)]\n\n实例特征向量x。\n\n1. 根据给定的距离度量,在训练集中找到与x最近的k个点,涵盖这k个点的邻域记作Nk(x)\n\n2. 在Nk(x)中根据分类决策规则(如多少表决)决定x的类别y\n\n 输出实例x所属的类别y\n\n\n\n\n\n### 总结\n\nSummarization\n\n1. K近邻的思想:物以类聚\n2. K近邻没有显式的训练过场\n3. 距离度量:欧式距离、曼哈顿距离、切比雪夫距离\n4. 分类方式:多数表决规则"
},
{
"alpha_fraction": 0.6771488189697266,
"alphanum_fraction": 0.7243186831474304,
"avg_line_length": 22.292682647705078,
"blob_id": "b021b1fd44715dcd7a0a6a9b88638069f6387f76",
"content_id": "41eef413da606c7a606c8d25f00ed5c6d7c41699",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1216,
"license_type": "permissive",
"max_line_length": 83,
"num_lines": 41,
"path": "/机器学习竞赛实战_优胜解决方案/Indoor Location & Navigation/README.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# README\n\n19th/Top2%,提供答疑\n\n\n\n<https://www.kaggle.com/c/indoor-location-navigation>\n\n### 推荐notebook工具\n\n- kaggle自带的notebook\n- [智能钛Notebook-2.4.0-tf](https://console.cloud.tencent.com/tione/notebook/instance)\n\n\n\n### How to run the code\n\n1. 下载数据到input文件夹\n2. floor预测代码\n part1 数据预处理\n 运行code/wifi-features.ipynb \n 运行code/create-unified-wifi-features-example.ipynb\n part2 深度学习模型\n 运行code/floor-model-blstm.ipynb\n3. 坐标预测代码\n part1 数据预处理\n 运行code/wifi-label-encode.ipynb\n 运行code/data_abstract_sensor.ipynb\n 运行code/data_abstract_wifi.ipynb\n 运行code/gen_accl.ipynb\n part2 深度学习模型\n 运行code/lstm-wifi-encode-wifi.ipynb 仅使用wifi数据预测\n 运行code/lstm-wifi-encode-wifi-sensor.ipynb 使用wifi+sensor数据预测\n4. 结果融合\n 运行code/combine_v1.ipynb 模型线性融合\n5. 后处理\n 运行code/post_process.ipynb\n6. 规则预测代码\n 运行code/rules_infer.ipynb\n7. 结果融合\n 运行code/combine_v2.ipynb 模型线性融合得到最终final.csv预测文件"
},
{
"alpha_fraction": 0.6043733358383179,
"alphanum_fraction": 0.6358399987220764,
"avg_line_length": 44.28985595703125,
"blob_id": "0ad177ad7926a6a76bc8b32fef869c8636c76b1a",
"content_id": "ec37c37d890952a3ece0265ee0af6891bcc669e3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9437,
"license_type": "permissive",
"max_line_length": 144,
"num_lines": 207,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/4_single_model.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\"\"\"\nAuthor: Aigege\nCode: https://github.com/AiIsBetter\n\"\"\"\n# date 2021.08.01\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nimport lightgbm as lgb\nfrom utils import reduce_mem_usage,reduce_mem_usage_parallel\nimport os\nimport gc\nimport warnings\nimport time\nwarnings.filterwarnings(\"ignore\")\ndef slice_id_change(x):\n hour = x * 5 / 60\n hour = np.floor(hour)\n hour += 8\n if hour >= 24:\n hour = hour - 24\n return hour\n# 评估指标\ndef MAPE(true, pred):\n diff = np.abs(np.array(pred) - np.array(true))\n return np.mean(diff / true)\n# 自定义lgb评估指标\ndef lgb_score_mape(train_data,preds):\n labels = train_data\n diff = np.abs(np.array(preds) - np.array(labels))\n result = np.mean(diff / labels)\n return 'mape',result, False\nhead_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']\nresult = []\nresult_time_weight = []\nresult_dis_weight = []\ncount = 0\ndf = []\nnrows=None\nroot_path = '../data/giscup_2021/'\ndata_list = ['20200818', '20200819', '20200820', '20200821', '20200822', '20200823', '20200824',\n '20200825', '20200826', '20200827', '20200828', '20200829', '20200830', '20200831']\n#######################################本地验证#######################################\nfor name in os.listdir(root_path+'train/'):\n data_time = name.split('.')[0]\n if data_time not in data_list:\n continue\n train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)\n feature_cross = pd.read_csv(root_path+'feature/train/cross_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)\n feature_link = pd.read_csv(root_path+'feature/train/link_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)\n feature_head = pd.read_csv(root_path+'feature/train/head_link_{}.csv'.format(data_time),nrows=nrows)\n feature_sqe = pd.read_csv(root_path + 'feature/train/{}.csv'.format(data_time),nrows=nrows)\n\n\n feature_cross['order_id'] = feature_cross['order_id'].astype(str)\n feature_link['order_id'] = feature_link['order_id'].astype(str)\n feature_head['order_id'] = feature_head['order_id'].astype(str)\n feature_sqe['order_id'] = feature_sqe['order_id'].astype(str)\n\n print(\"开始处理\", data_time)\n # train.columns = ['head','link','cross']\n # train['head'] = train['head'].apply(lambda x:x.split(' '))\n train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\n train_head['order_id'] = train_head['order_id'].astype(str)\n train_head['ata'] = train_head['ata'].astype(float)\n train_head['distance'] = train_head['distance'].astype(float)\n train_head['simple_eta'] = train_head['simple_eta'].astype(float)\n train_head['driver_id'] = train_head['driver_id'].astype(int)\n train_head['slice_id'] = train_head['slice_id'].astype(int)\n train_head['date_time'] = int(data_time)\n\n train_head = train_head.merge(feature_cross,on='order_id',how='left')\n train_head = train_head.merge(feature_link,on='order_id',how='left')\n\n feature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',\n 'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',\n 'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',\n 'len_tmp',\n 'link_time_mean', 'link_time_std'],\n axis=1)\n feature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)\n train_head = train_head.merge(feature_sqe, on='order_id', how='left')\n train_head = train_head.merge(feature_head, on='order_id', how='left')\n\n print('merge finish!')\n train_head = reduce_mem_usage_parallel(train_head,28)\n df.append(train_head.drop('order_id',axis=1))\n del train\n gc.collect()\n count +=1\ndf = pd.concat(df,axis=0)\n\ntest = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)\ntest_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\ntest_head['order_id'] = test_head['order_id'].astype(str)\ntest_head['ata'] = test_head['ata'].astype(float)\ntest_head['distance'] = test_head['distance'].astype(float)\ntest_head['simple_eta'] = test_head['simple_eta'].astype(float)\ntest_head['driver_id'] = test_head['driver_id'].astype(int)\ntest_head['slice_id'] = test_head['slice_id'].astype(int)\n\n\nfeature_cross = pd.read_csv(root_path + 'feature/test/cross_fea_order_id_level_{}.csv'.format('20200901'),nrows=nrows)\nfeature_link = pd.read_csv(root_path + 'feature/test/link_fea_order_id_level_{}.csv'.format('20200901'), nrows=nrows)\nfeature_head = pd.read_csv(root_path + 'feature/test/head_link_{}.csv'.format('20200901'),nrows=nrows)\nfeature_sqe = pd.read_csv(root_path + 'feature/test/{}.csv'.format('20200901'),nrows=nrows)\n\ntest_head['date_time'] = 20200901\n\nfeature_cross['order_id'] = feature_cross['order_id'].astype(str)\nfeature_link['order_id'] = feature_link['order_id'].astype(str)\nfeature_head['order_id'] = feature_head['order_id'].astype(str)\nfeature_sqe['order_id'] = feature_sqe['order_id'].astype(str)\n\ntest_head = test_head.merge(feature_cross, on='order_id', how='left')\n\ntest_head = test_head.merge(feature_link,on='order_id',how='left')\n\nfeature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',\n 'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',\n 'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',\n 'len_tmp',\n 'link_time_mean', 'link_time_std'],\n axis=1)\nfeature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)\ntest_head = test_head.merge(feature_sqe, on='order_id', how='left')\ntest_head = test_head.merge(feature_head, on='order_id', how='left')\n\ntest_head = reduce_mem_usage_parallel(test_head,28)\ndel feature_cross,feature_link\ngc.collect()\n\nX_train = df.drop('ata',axis=1)\ny_train = df['ata']\nX_test = test_head.drop(['order_id','ata'],axis=1)\n\nfolds = 5\nskf = KFold(n_splits=folds, shuffle=True, random_state=2021)\ntrain_mean = np.zeros(shape=[1,folds])\ntest_predict = np.zeros(shape=[X_test.shape[0], folds],dtype=float)\nk_fold_mape = []\nfeature_importance_df = pd.DataFrame()\n# Display/plot feature importance\ndef display_importances(feature_importance_df_):\n feature_importance_df_.to_csv('feature_importances.csv',index=False)\n cols = feature_importance_df_[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False)[:100].index\n best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]\n best_features = best_features.groupby('feature',as_index = False)['importance'].mean()\n best_features = best_features.sort_values(by = 'importance',ascending=False)\n plt.figure(figsize=(8, 10))\n sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False))\n plt.title('LightGBM Features (avg over folds)')\n plt.tight_layout()\n plt.savefig('feature_importances.jpg')\n # plt.show()\n\nscores = 0\nthreshold = 0\nprint('start training......')\nprint('训练集维度:',X_train.shape)\nprint('测试集维度:',X_test.shape)\nfor i, (trn_idx, val_idx) in enumerate(skf.split(X_train, y_train)):\n clf = lgb.LGBMRegressor(\n boosting_type='gbdt',\n objective='regression',\n n_estimators=10000,\n learning_rate=0.1,\n num_leaves=170,\n max_bin=63,\n max_depth=-1,\n random_state = 2021,\n subsample_for_bin=200000,\n feature_fraction=0.84,\n bagging_fraction=0.86,\n bagging_freq=7,\n min_child_samples=89,\n lambda_l1=0.006237830242067111,\n lambda_l2=2.016472023736186e-05,\n metric=None,\n n_jobs = 30,\n # device='gpu'\n )\n clf.fit(X_train.iloc[trn_idx], y_train.iloc[trn_idx], eval_set=[(X_train.iloc[trn_idx], y_train.iloc[trn_idx])\n , (X_train.iloc[val_idx], y_train.iloc[val_idx])],\n eval_metric=lambda y_true, y_pred:[lgb_score_mape(y_true, y_pred)],\n verbose=100, early_stopping_rounds=100)\n\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = X_train.columns\n fold_importance_df[\"importance\"] = clf.feature_importances_\n fold_importance_df[\"fold\"] = i + 1\n feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)\n print('predicting')\n val_predict = clf.predict(X_train.iloc[val_idx], num_iteration=clf.best_iteration_)\n test_predict[:,i] = clf.predict(X_test, num_iteration=clf.best_iteration_)\n\n k_fold_mape.append(MAPE(y_train.iloc[val_idx],val_predict))\n print(\"kfold_{}_mape_score:{} \".format(i, k_fold_mape[i]))\n\nprint('Train set kfold {} mean mape:'.format(i), np.mean(k_fold_mape))\ndisplay_importances(feature_importance_df)\ntest_head['result'] = np.mean(test_predict,axis=1)\ntest_head['id'] = test_head['order_id']\ntest_head[['id','result']].to_csv('submission.csv',index=False)\n"
},
{
"alpha_fraction": 0.45051318407058716,
"alphanum_fraction": 0.6898826956748962,
"avg_line_length": 122.95454406738281,
"blob_id": "d423e0f85f6aa6c88a28f30af25ed2dfbb11e4cd",
"content_id": "0492fcb51e3fd2b87b0bd6f70f79f1c45b0f64f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3300,
"license_type": "permissive",
"max_line_length": 342,
"num_lines": 22,
"path": "/机器学习竞赛实战_优胜解决方案/机器学习实战小项目/特征工程建模/README.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# README\n\n### 难度由上至下递增\n\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E4%BF%A1%E7%94%A8%E5%8D%A1%E6%AC%BA%E8%AF%88%E6%A3%80%E6%B5%8B'>信用卡欺诈检测(含数据集)</a>\n - 相关信息:逻辑回归、分类任务、预测0/1\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%B7%A5%E4%B8%9A%E5%8C%96%E5%B7%A5%E7%94%9F%E4%BA%A7%E9%A2%84%E6%B5%8B'>工业化工生产预测(含数据集)</a>\n - 相关信息:xgboost、细粒度处理、冠军方案\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%99%BA%E6%85%A7%E5%9F%8E%E5%B8%82-%E9%81%93%E8%B7%AF%E9%80%9A%E8%A1%8C%E6%97%B6%E9%97%B4%E9%A2%84%E6%B5%8B'>智慧城市-道路通行时间预测(含数据集)</a>\n - 相关信息:时间分片、特征交叉、预测道路通行时间\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%BB%BA%E7%AD%91%E8%83%BD%E6%BA%90%E5%88%A9%E7%94%A8%E7%8E%87%E9%A2%84%E6%B5%8B'>建筑能源利用率预测(含数据集)</a>\n - 相关信息:模型流程、多模型比较及可视化、预测得分\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E4%BA%AC%E4%B8%9C%E7%94%A8%E6%88%B7%E8%B4%AD%E4%B9%B0%E6%84%8F%E5%90%91%E9%A2%84%E6%B5%8B'>京东用户购买意向预测(含数据集)</a>\n - 相关信息:待补充\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%BF%AB%E6%89%8B%E7%9F%AD%E8%A7%86%E9%A2%91%E7%94%A8%E6%88%B7%E6%B4%BB%E8%B7%83%E5%BA%A6%E5%88%86%E6%9E%90'>快手用户活跃预测(含数据集)</a>\n - 相关信息:RNN网络、时间序数据、冠军方案\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E7%89%B9%E5%BE%81%E5%B7%A5%E7%A8%8B%E5%BB%BA%E6%A8%A1'>特征工程建模(含数据集)</a>\n - 相关信息:银行违约、寻找疾病原因及解释、模型解释工具\n- <a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%B8%B8%E7%94%A8%E7%89%B9%E5%BE%81%E6%9E%84%E5%BB%BA%E6%96%B9%E6%B3%95'>常用特征构建方法(含数据集)</a>\n - 相关信息:图像、文本及数据特征的处理方法\n\n最后两个为技能补充。\n\n"
},
{
"alpha_fraction": 0.6835699677467346,
"alphanum_fraction": 0.7525355219841003,
"avg_line_length": 13.114285469055176,
"blob_id": "851eb8b7d00810f3da7197aad1e5ac17a39cbbcc",
"content_id": "9b405c22b27f33c555a29821b5f67c01863bd5fc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1099,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 35,
"path": "/机器学习算法原理及推导/李航——统计学习方法/8.提升方法——三个臭皮匠,顶个诸葛亮.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 8.提升方法——三个臭皮匠,顶个诸葛亮\n\nboosting\n\n多个弱分类器组合\n\n\n\n### 知识树\n\nKnowledge tree\n\n\n\n\n\n### 提升方法\n\nBoosting\n\n- 基本思路:\n - 将弱可学习算法提升为强可学习算法\n - 其中提升方法是集成学习的一种\n - 集成学习两个主要类别\n - 序列方法\n - 并行方法\n\nAdaboost算法\n\n- 解决分类问题 y ∈ [-1,+1]\n- 在训练数据上训练得到模型,查看模型在整体数据和单个数据的分类效果\n- 在整体数据上分类效果较好,则该模型在最后的模型中占较大比例,反之。\n - 比如:有5个小模型,预测一个数据是正1或者负1,但是不同模型的整体效果不一样,好的模型乘以更大的概率,最后再全部求和的sigmoid得出是正1还是负1\n- 在单个数据上分类效果较好,那么在训练下个模型时调小单个数据的权值,反之。\n- 在上面过程迭代N次之后,直到最后的分类结果达到预期目标。将所有的模型组合,得到强可学习模型。"
},
{
"alpha_fraction": 0.6146308183670044,
"alphanum_fraction": 0.7800820469856262,
"avg_line_length": 19.031963348388672,
"blob_id": "bb00defc73aea26d0b05ef7f3163e923f97b7e5a",
"content_id": "93bb21d91b3bf0fbcd97445c621d0d0624730abd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 8175,
"license_type": "permissive",
"max_line_length": 426,
"num_lines": 219,
"path": "/NLP通用框架BERT项目实战/第一章——NLP通用框架BERT原理解读.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "### 第一章——NLP通用框架BERT原理解读\n\n#### 传统解决方案遇到的问题\n\n传统的RNN网络\n\n\n\n- 训练速度:无法加速训练,并行等\n- Self-Attention机制(注意力),一段话中,不是每个词都重要,我们只需要关注重要的部分。如:等下我们把这些活干完,下班后我们一起去吃饭吧,我请客。是不是对于听的人来说主要是“一起吃饭,我请客”。\n\n- word2vec:训练好词向量就永久不变了,不同的语境相同的词相同的向量,但这合理吗?就想我们在生气的时候说的傻子,和开心的时候说傻子,意思是完全不一样的,\n\n\n\n#### Transformer整体架构如下\n\n\n\n#### 注意力机制的作用\n\n- 对于输入的数据,我们的关注点是什么?\n- 如何才能让计算机关注到这些有价值的信息?\n\n\n\n> 如上,传入一段文本,如果我们没有强调注意什么,那么词向量结果可能是平行的,如果我们强调“eating”,那么“eating”这个词的词向量就会有所不同。\n\n如果是人为的加权,告诉计算机哪个重要,这显然是不合实际的,应该让计算机自己发现哪些重要。\n\n\n\n> “it”在第一句中是指代“animal”,表示它太累了没有过去。\n>\n> “it”在第二句中指代“street”,表示路太窄了没有过去。\n>\n> 这里关注的是“animal”,我们希望即使是第二句,“animal”对结果的影响越大。\n\n\n\n#### Self-Attention计算\n\n- 输入经过编码后得到的向量。\n- 得到当前词语上下文的关系,可以当做是加权。\n- 构建三个矩阵分别查询当前词跟其它词的关系,以及特征向量的表达。\n\n如下图:\n\n\n\n> 先转换成向量,构建三个矩阵Q、K、V,求出来第一个词编码的时候怎么找到上下文。右边的W就是权重。\n\n这三个矩阵具体做什么:\n\n- Q: query,要去查询的\n- K: key,等着被查的\n- V: value,实际的特征信息\n\n\n\n> X是输入内容,结果W后,形成Q、K、V不同矩阵的特征向量。\n\n\n\n> q与k的内积表示有多匹配,如果Xa与Xb之间无关的时候,那么其在坐标系上的表示是垂直的;如果有关系,则非垂直,则有夹角有内积,相关性越大,则夹角越小,内积越大。\n\n\n\n#### 特征分配与softmax机制\n\n- 最终的的得分值经过softmax就是最终上下文结果\n\n > 加上dk是因为计算内积会因为向量维度越长,向量维度越大,越大影响就越大,影响不应该根据维度的长宽改变,所以应该除掉向量维度的影响。softemax的计算不理解的可以跳转到[深度学习入门第一章](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8/%E7%AC%AC%E4%B8%80%E7%AB%A0%E2%80%94%E2%80%94%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%BF%85%E5%A4%87%E7%9F%A5%E8%AF%86%E7%82%B9.md#softmax%E5%88%86%E7%B1%BB%E5%99%A8%E5%88%86%E7%B1%BB%E4%BB%BB%E5%8A%A1)\n\n- Scaled Dot-Product Attention 不能让分值随着向量维度的增大而增加\n\n\n这样就能计算出来了结果,这个就是Self-Attention机制,内积大的就重要,小的就不重要。\n\n每个词的Q会跟整个序列中每个K计算得分,然后基于得分再分配特征\n\n\n\n> 这样就解决了word2vec训练完后词向量不变的问题,每次都跟其它组合项进行计算,从而改变权重和词向量。\n\n**再看一遍整体流程**\n\n\n\n- input~Score:每个词的Q会跟每个K计算得分。\n- Divide by dk:除掉维度的影响。\n- Softmax:得到最大的概率值。\n- Softmax × Value:求得词向量的编码。\n\n这样第一个词就编码完成,后面的第二、第N个词也是如此操作。\n\n总结:Attention依靠内积来求得每个词和每个K的得分,且并行求。\n\n\n\n#### Multi/head的作用\n\n之前卷积中,我们利用fillter(w)来将X映射成更多的特征表达,再取最大的进行降维,在这里也是同样道理。\n\n\n\n- 通过不同的head得到多个特征表达\n- 将所有特征拼接在一起\n- 可以通过再一层全连接来降维\n\nMulti-Head架构图如下\n\n\n\n> 不同的注意力结果,得到的特征向量表达也是不同的。\n\n堆叠多层:\n\n\n\n> 由于输入输出都是向量,也就是可以堆叠更多层,计算方法都是相同的,只是增加了多层。\n\n\n\n#### 位置编码与多层堆叠\n\n> 位置信息:在self-attention中,每个词都会考虑整个序列的加权,所以其出现位置并不会对结果产生什么影响,相当于放哪都无所谓,但是这跟实际就有些不符合了,我们希望模型能对位置有额外的认识。\n\n\n\n> POSITIONAL ENCODING:将余弦和正弦的周期表达信号,当作位置信息。\n\n**Add与Normalize**\n\n\n\n- 归一化:\n\n > Batch Size:将其一行,让其均值为0,标准差为1\n >\n > Layer:让其一列,变成均值为0,标准差为1\n\n- 连接:基本的残差连接方式\n\n > 残差连接:X如果处理完残差变小,则使用该X,如果残差反而大了,则使用原有的X。也就是堆叠的层数中,我们保证了堆叠的过程中,结果一定不会比原来差。\n\n\n\n#### Decoder\n\n- Attention计算不同:只需要用Q去查encode的K,V即可\n- 加入MASK机制:遮挡部分值,不使用它,比如内积会将每个相乘相加,而在计算内积的过程不能让它知道全部信息,比如下面的I am a student,遮挡掉student,否则相当于透题,相当于分类中训练的标签。\n\n\n\n最终输出结果\n\n- 得到最终预测结果\n- 损失函数cross-entropy即可\n\n\n\n\n\n#### Transformer整体架构\n\n- Self-Attention\n- Multi-Head\n- 多层堆叠,位置编码\n- 并行加速训练\n\n\n\n**Encode端**\n\n- input:输入X1、X2、...、Xn;\n- 初始化:初始化词编码,先随机出词的编码再进行调整,但现在一般使用预训练好的模型,也就是没有了这个步骤;\n- Positional Encoding:加入位置编码;\n- Nx:N层堆叠Self-Attention;\n- Multi-Head:多头Self-Attention;\n- 残差连接:随着堆叠层数,可能结果会变差,残差连接则能保证学习至少不比原来差;\n\n**Decode端**\n\n- MASK机制:遮挡掉部分,不使用后面的结果,相当于分类中训练的标签;\n- 获取K,V的结果;\n- Nx:N层堆叠;\n- Linear:得到所有的编码,Linear成向量;\n- Softmax:将向量进行分类;\n\n这就是Transformer的整体结构。而Transformer和BERT的关系是...,我们接着往下看\n\n\n\n#### BERT模型训练方法\n\n- Bidirectonal Encoder Representations from Transformers\n- transformer的encoder部分\n- 并不需要标签,有语料就能训练了\n\n也就是下图的部分\n\n\n\n如何训练BERT\n\n- 方法1:句子中有15%的词汇被随机mask掉\n- 较给模型去预测被mask的是什么\n- 词语的可能性太多了,中文一般是字\n- 如果BERT训练的向量好,那分类自然好\n\n\n\n> MASK的部分就相当于分类中的训练标签\n\n- 方法二:预测两个句子是否应该连在一起\n- [seq]:两个句子之前的连接符,[cls]:表示要做分类的向量\n\n\n\n"
},
{
"alpha_fraction": 0.6049124002456665,
"alphanum_fraction": 0.6267949342727661,
"avg_line_length": 44.33956527709961,
"blob_id": "00a2a26e648f8d8ad9727c8ea0c6d005ebd108b6",
"content_id": "e7a4345e96cd2f39964053959e76813ebdaa1abb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 29259,
"license_type": "permissive",
"max_line_length": 138,
"num_lines": 642,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/DCN蒸馏_12953/dcn_model/dcn_model.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nfrom tensorflow import keras\nimport tensorflow as tf\nfrom sklearn.preprocessing import StandardScaler, LabelEncoder\nimport tensorflow.keras.layers as L\n# import tensorflow.keras.models as M\nimport tensorflow.keras.backend as K\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping\n#from keras_radam import RAdam\nfrom keras_radam.training import RAdamOptimizer\nfrom tensorflow.keras import initializers, regularizers, constraints, optimizers, layers, callbacks\nfrom tensorflow.keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D, Conv1D\nfrom tensorflow.keras.layers import Input, Dense, Lambda, Layer\nfrom tensorflow.keras.initializers import Constant\nfrom tensorflow.keras.models import Model\nimport os\nfrom tensorflow.keras.losses import mean_absolute_percentage_error\n#from tensorflow.contrib.opt import AdamWOptimizer\n\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"]='3'\ngamma = 2.0\nalpha=.25\nepsilon = K.epsilon()\n\n\ndef mape_2(y_true, y_pred):\n y_true = y_true[:, :1]\n y_pred = y_pred[:, :1]\n return tf.py_function(mean_absolute_percentage_error, (y_true, y_pred), tf.float32)\n\ndef mape_3(y_true, y_pred):\n y_true = y_true[:, :1]\n y_pred = y_pred[:, 1:]\n return tf.py_function(mean_absolute_percentage_error, (y_true, y_pred), tf.float32)\n\n\ndef knowledge_distillation_loss_withFL(y_true, y_pred, beta=0.1):\n\n # Extract the groundtruth from dataset and the prediction from teacher model\n y_true, y_pred_teacher = y_true[: , :1], y_true[: , 1:]\n \n # Extract the prediction from student model\n y_pred, y_pred_stu = y_pred[: , :1], y_pred[: , 1:]\n\n loss = beta*focal_loss(y_true,y_pred) + (1-beta)*mean_absolute_percentage_error(y_pred_teacher, y_pred_stu)\n\n return loss\n\n\ndef focal_loss(y_true, y_pred):\n pt_1 = y_pred * y_true\n pt_1 = K.clip(pt_1, epsilon, 1-epsilon)\n CE_1 = -K.log(pt_1)\n FL_1 = alpha* K.pow(1-pt_1, gamma) * CE_1\n \n pt_0 = (1-y_pred) * (1-y_true)\n pt_0 = K.clip(pt_0, epsilon, 1-epsilon)\n CE_0 = -K.log(pt_0)\n FL_0 = (1-alpha)* K.pow(1-pt_0, gamma) * CE_0\n \n loss = K.sum(FL_1, axis=1) + K.sum(FL_0, axis=1)\n return loss\n\n\ndef knowledge_distillation_loss_withBE(y_true, y_pred, beta=0.6):\n\n # Extract the groundtruth from dataset and the prediction from teacher model\n y_true, y_pred_teacher = y_true[: , :1], y_true[: , 1:]\n \n # Extract the prediction from student model\n y_pred, y_pred_stu = y_pred[: , :1], y_pred[: , 1:]\n\n loss = beta*mean_absolute_percentage_error(y_true,y_pred) + (1-beta)*mean_absolute_percentage_error(y_pred_teacher, y_pred_stu)\n\n return loss\n\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n\ndef gru_layer(hidden_dim, dropout):\n return L.Bidirectional(L.GRU(\n hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))\n\n\ndef lstm_layer(hidden_dim, dropout):\n return L.Bidirectional(L.LSTM(\n hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'))\n\n\nclass FeatureDictionary(object):\n def __init__(self, df=None, numeric_cols=[], ignore_cols=[], cate_cols=[]):\n self.df = df\n self.cate_cols = cate_cols\n self.numeric_cols = numeric_cols\n self.ignore_cols = ignore_cols\n self.gen_feat_dict() # feat_dict 获取cate feature每一列的字典长度。\n\n def gen_feat_dict(self):\n self.feat_cate_len = {}\n tc = 0\n for col in self.cate_cols:\n # 获取每一列的类别\n us = self.df[col].unique()\n us_len = len(us)\n # 获取每一列的类别对应的维度\n self.feat_cate_len[col] = us_len\n\n\ndef embedding_layers(fd):\n # 该函数主要是定义输入和embedding输入的网络层\n embeddings_tensors = []\n continus_tensors = []\n cate_feature = fd.feat_cate_len\n numeric_feature = fd.numeric_cols\n for ec in cate_feature:\n layer_name = ec + '_inp'\n # for categorical features, embedding特征在维度保持在6×(category cardinality)**(1/4)\n embed_dim = cate_feature[ec] if int(6 * np.power(cate_feature[ec], 1 / 4)) > cate_feature[ec] else int(\n 6 * np.power(cate_feature[ec], 1 / 4))\n t_inp, t_embedding = embedding_input(layer_name, cate_feature[ec], embed_dim)\n embeddings_tensors.append((t_inp, t_embedding))\n del (t_inp, t_embedding)\n for cc in numeric_feature:\n layer_name = cc + '_in'\n t_inp, t_build = continus_input(layer_name)\n continus_tensors.append((t_inp, t_build))\n del (t_inp, t_build)\n # category feature的输入 这里的输入特征顺序要与xu\n inp_layer = [et[0] for et in embeddings_tensors]\n inp_embed = [et[1] for et in embeddings_tensors]\n # numeric feature的输入\n inp_layer += [ct[0] for ct in continus_tensors]\n inp_embed += [ct[1] for ct in continus_tensors]\n\n return inp_layer, inp_embed\n\n\ndef embedding_input(name, input_dim, output_dim):\n inp = L.Input(shape=(1,), dtype='int64', name=name)\n embeddings = L.Embedding(input_dim, output_dim, input_length=1)(inp)\n return inp, embeddings\n\n\ndef continus_input(name):\n inp = L.Input(shape=(1,), dtype='float32', name=name)\n return inp, L.Reshape((1, 1))(inp)\n\n\nclass CrossLayer(L.Layer):\n def __init__(self, output_dim, num_layer, **kwargs):\n self.output_dim = output_dim\n self.num_layer = num_layer\n super(CrossLayer, self).__init__(**kwargs)\n\n def get_config(self):\n\n config = super().get_config().copy()\n config.update({\n 'vocab_size': self.vocab_size,\n 'num_layers': self.num_layers,\n 'units': self.units,\n 'd_model': self.d_model,\n 'num_heads': self.num_heads,\n 'dropout': self.dropout,\n })\n return config\n\n def build(self, input_shape):\n self.input_dim = input_shape[2]\n self.W = []\n self.bias = []\n for i in range(self.num_layer):\n self.W.append(\n self.add_weight(shape=[1, self.input_dim], initializer='glorot_uniform', name='w_{}'.format(i),\n trainable=True))\n self.bias.append(\n self.add_weight(shape=[1, self.input_dim], initializer='zeros', name='b_{}'.format(i), trainable=True))\n self.built = True\n\n def call(self, input):\n for i in range(self.num_layer):\n if i == 0:\n cross = L.Lambda(lambda x: K.batch_dot(K.dot(x, K.transpose(self.W[i])), x) + self.bias[i] + x)(input)\n else:\n cross = L.Lambda(lambda x: K.batch_dot(K.dot(x, K.transpose(self.W[i])), input) + self.bias[i] + x)(\n cross)\n return L.Flatten()(cross)\n\n def compute_output_shape(self, input_shape):\n return None, self.output_dim\n\n\ndef preprocess(df, cate_cols, numeric_cols):\n for cl in cate_cols:\n le = LabelEncoder()\n df[cl] = le.fit_transform(df[cl])\n cols = cate_cols + numeric_cols\n X_train = df[cols]\n return X_train\n\n\ndef DCN_model(inp_layer, inp_embed, link_size, cross_size, slice_size, input_deep_col, input_wide_col,\n link_nf_size, cross_nf_size, encoder, link_seqlen=170, cross_seqlen=12, pred_len=1,\n dropout=0.25, sp_dropout=0.1, embed_dim=64, hidden_dim=128, n_layers=3, lr=0.001, \n kernel_size1=3, kernel_size2=2, conv_size=128, conv=False, have_knowledge=True):\n inp = L.concatenate(inp_embed, axis=-1)\n link_inputs = L.Input(shape=(link_seqlen, link_nf_size), name='link_inputs')\n cross_inputs = L.Input(shape=(cross_seqlen, cross_nf_size), name='cross_inputs')\n deep_inputs = L.Input(shape=(input_deep_col,), name='deep_input')\n slice_input = L.Input(shape=(1,), name='slice_input')\n wide_inputs = keras.layers.Input(shape=(input_wide_col,), name='wide_inputs')\n\n # link----------------------------\n categorical_link = link_inputs[:, :, :1]\n embed_link = L.Embedding(input_dim=link_size, output_dim=embed_dim, mask_zero=True)(categorical_link)\n reshaped_link = tf.reshape(embed_link, shape=(-1, embed_link.shape[1], embed_link.shape[2] * embed_link.shape[3]))\n reshaped_link = L.SpatialDropout1D(sp_dropout)(reshaped_link)\n \n \"\"\"\n categorical_slice = link_inputs[:, :, 5:6]\n embed_slice = L.Embedding(input_dim=289, output_dim=16, mask_zero=True)(categorical_slice)\n reshaped_slice = tf.reshape(embed_slice, shape=(-1, embed_slice.shape[1], embed_slice.shape[2] * embed_slice.shape[3]))\n reshaped_slice = L.SpatialDropout1D(sp_dropout)(reshaped_slice)\n\n categorical_hightemp = link_inputs[:, :, 6:7]\n embed_hightemp = L.Embedding(input_dim=33, output_dim=8, mask_zero=True)(categorical_hightemp)\n reshaped_hightemp = tf.reshape(embed_hightemp, shape=(-1, embed_hightemp.shape[1], embed_hightemp.shape[2] * embed_hightemp.shape[3]))\n reshaped_hightemp = L.SpatialDropout1D(sp_dropout)(reshaped_hightemp)\n\n categorical_weather = link_inputs[:, :, 7:8]\n embed_weather = L.Embedding(input_dim=7, output_dim=8, mask_zero=True)(categorical_weather)\n reshaped_weather = tf.reshape(embed_weather, shape=(-1, embed_weather.shape[1], embed_weather.shape[2] * embed_weather.shape[3]))\n reshaped_weather = L.SpatialDropout1D(sp_dropout)(reshaped_weather)\n \n numerical_fea1 = link_inputs[:, :, 1:5]\n numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)\n hidden = L.concatenate([reshaped_link, numerical_fea1, reshaped_slice, reshaped_hightemp, reshaped_weather], axis=2)\n \n \"\"\"\n if have_knowledge:\n numerical_fea1 = link_inputs[:, :, 1:5]\n numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)\n \n \n categorical_ar_st = link_inputs[:, :, 5:6]\n categorical_ar_st = L.Masking(mask_value=-1, name='categorical_ar_st')(categorical_ar_st)\n embed_ar_st = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_st)\n reshaped_ar_st = tf.reshape(embed_ar_st, shape=(-1, embed_ar_st.shape[1], embed_ar_st.shape[2] * embed_ar_st.shape[3]))\n reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)\n\n categorical_ar_sl = link_inputs[:, :, 6:7]\n categorical_ar_sl = L.Masking(mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)\n embed_ar_sl = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_sl)\n reshaped_ar_sl = tf.reshape(embed_ar_sl, shape=(-1, embed_ar_sl.shape[1], embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))\n reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)\n hidden = L.concatenate([reshaped_link, reshaped_ar_st, reshaped_ar_sl, numerical_fea1],axis=2)\n \n #hidden = L.concatenate([reshaped_link, numerical_fea1],axis=2)\n else:\n numerical_fea1 = link_inputs[:, :, 1:5]\n numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1) \n \n categorical_arrival = link_inputs[:, :, 5:6]\n categorical_arrival = L.Masking(mask_value=-1, name='categorical_arrival')(categorical_arrival)\n embed_ar = L.Embedding(input_dim=5, output_dim=16)(categorical_arrival)\n reshaped_ar = tf.reshape(embed_ar, shape=(-1, embed_ar.shape[1], embed_ar.shape[2] * embed_ar.shape[3]))\n reshaped_ar = L.SpatialDropout1D(sp_dropout)(reshaped_ar)\n \n categorical_ar_st = link_inputs[:, :, 6:7]\n categorical_ar_st = L.Masking(mask_value=-1, name='categorical_ar_st')(categorical_ar_st)\n embed_ar_st = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_st)\n reshaped_ar_st = tf.reshape(embed_ar_st, shape=(-1, embed_ar_st.shape[1], embed_ar_st.shape[2] * embed_ar_st.shape[3]))\n reshaped_ar_st = L.SpatialDropout1D(sp_dropout)(reshaped_ar_st)\n\n categorical_ar_sl = link_inputs[:, :, 7:8]\n categorical_ar_sl = L.Masking(mask_value=-1, name='categorical_ar_sl')(categorical_ar_sl)\n embed_ar_sl = L.Embedding(input_dim=289, output_dim=8)(categorical_ar_sl)\n reshaped_ar_sl = tf.reshape(embed_ar_sl, shape=(-1, embed_ar_sl.shape[1], embed_ar_sl.shape[2] * embed_ar_sl.shape[3]))\n reshaped_ar_sl = L.SpatialDropout1D(sp_dropout)(reshaped_ar_sl)\n hidden = L.concatenate([reshaped_link, reshaped_ar, reshaped_ar_st, reshaped_ar_sl, numerical_fea1],axis=2)\n \n #hidden = L.concatenate([reshaped_link, reshaped_ar, numerical_fea1],axis=2)\n #hidden = L.Masking(mask_value=0)(hidden)\n for x in range(n_layers):\n hidden = gru_layer(hidden_dim, dropout)(hidden)\n\n if conv:\n x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden)\n avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)\n max_pool1_gru = GlobalMaxPooling1D()(x_conv1)\n #x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden)\n #avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)\n #max_pool2_gru = GlobalMaxPooling1D()(x_conv2)\n truncated_link = concatenate([avg_pool1_gru, max_pool1_gru])\n else:\n truncated_link = hidden[:, :pred_len]\n truncated_link = L.Flatten()(truncated_link)\n\n # truncated_link = Attention(256)(hidden)\n # CROSS----------------------------\n categorical_fea2 = cross_inputs[:, :, :1]\n embed2 = L.Embedding(input_dim=cross_size, output_dim=16, mask_zero=True)(categorical_fea2)\n reshaped2 = tf.reshape(embed2, shape=(-1, embed2.shape[1], embed2.shape[2] * embed2.shape[3]))\n reshaped2 = L.SpatialDropout1D(sp_dropout)(reshaped2)\n\n numerical_fea2 = cross_inputs[:, :, 1:]\n numerical_fea2 = L.Masking(mask_value=0, name='numerical_fea2')(numerical_fea2)\n hidden2 = L.concatenate([reshaped2, numerical_fea2], axis=2)\n # hidden2 = L.Masking(mask_value=0)(hidden2)\n for x in range(n_layers):\n hidden2 = gru_layer(hidden_dim, dropout)(hidden2)\n\n if conv:\n x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden2)\n avg_pool3_gru = GlobalAveragePooling1D()(x_conv3)\n max_pool3_gru = GlobalMaxPooling1D()(x_conv3)\n #x_conv4 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden2)\n #avg_pool4_gru = GlobalAveragePooling1D()(x_conv4)\n #max_pool4_gru = GlobalMaxPooling1D()(x_conv4)\n truncated_cross = concatenate([avg_pool3_gru, max_pool3_gru])\n else:\n truncated_cross = hidden2[:, :pred_len]\n truncated_cross = L.Flatten()(truncated_cross)\n \n # truncated_cross = Attention(256)(hidden2)\n # SLICE----------------------------\n embed_slice = L.Embedding(input_dim=slice_size, output_dim=1)(slice_input)\n embed_slice = L.Flatten()(embed_slice)\n\n # DEEP_INPUS\n x = encoder(deep_inputs)\n x = L.Concatenate()([x, deep_inputs]) # use both raw and encoded features\n x = L.BatchNormalization()(x)\n x = L.Dropout(0.25)(x)\n \n for i in range(3):\n x = L.Dense(256)(x)\n x = L.BatchNormalization()(x)\n x = L.Lambda(tf.keras.activations.swish)(x)\n x = L.Dropout(0.25)(x)\n dense_hidden3 = L.Dense(64,activation='linear')(x)\n\n # DCN\n cross = CrossLayer(output_dim=inp.shape[2], num_layer=8, name=\"cross_layer\")(inp)\n\n\n # MAIN-------------------------------\n truncated = L.concatenate([truncated_link, truncated_cross, cross, dense_hidden3, wide_inputs, embed_slice])\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Dropout(dropout)(L.Dense(512, activation='relu') (truncated))\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Dropout(dropout)(L.Dense(256, activation='relu') (truncated))\n\n if have_knowledge:\n out = L.Dense(2, activation='linear', name='out')(truncated)\n model = tf.keras.Model(inputs=[inp_layer, link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],\n outputs=out)\n print(model.summary())\n model.compile(loss=knowledge_distillation_loss_withBE,\n optimizer=RAdamOptimizer(learning_rate=1e-3), # 'adam' RAdam(warmup_proportion=0.1, min_lr=1e-7)\n #metrics={'out':'mape'} # AdamWOptimizer(weight_decay=1e-4)\n metrics=[mape_2,mape_3]\n )\n else:\n out = L.Dense(1, activation='linear', name='out')(truncated)\n model = tf.keras.Model(inputs=[inp_layer, link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],\n outputs=out)\n print(model.summary())\n model.compile(loss=['mape'],\n optimizer=RAdamOptimizer(learning_rate=1e-3), # 'adam' RAdam(warmup_proportion=0.1, min_lr=1e-7)\n #metrics={'out':'mape'}\n metrics=['mape']\n )\n\n return model\n\n\ndef arrival_model(inp_layer, inp_embed, link_size, cross_size, slice_size, input_deep_col, input_wide_col,\n link_nf_size, cross_nf_size, link_seqlen=170, cross_seqlen=12, pred_len=1,\n dropout=0.25, sp_dropout=0.1, embed_dim=64, hidden_dim=128, n_layers=3, lr=0.001,\n kernel_size1=3, kernel_size2=2, conv_size=128, conv=False):\n inp = L.concatenate(inp_embed, axis=-1)\n link_inputs = L.Input(shape=(link_seqlen, link_nf_size), name='link_inputs')\n cross_inputs = L.Input(shape=(cross_seqlen, cross_nf_size), name='cross_inputs')\n deep_inputs = L.Input(shape=(input_deep_col,), name='deep_input')\n slice_input = L.Input(shape=(1,), name='slice_input')\n wide_inputs = keras.layers.Input(shape=(input_wide_col,), name='wide_inputs')\n\n # link----------------------------\n categorical_link = link_inputs[:, :, :1]\n embed_link = L.Embedding(input_dim=link_size, output_dim=embed_dim, mask_zero=True)(categorical_link)\n reshaped_link = tf.reshape(embed_link, shape=(-1, embed_link.shape[1], embed_link.shape[2] * embed_link.shape[3]))\n reshaped_link = L.SpatialDropout1D(sp_dropout)(reshaped_link)\n \"\"\" \n categorical_slice = link_inputs[:, :, 5:6]\n embed_slice = L.Embedding(input_dim=289, output_dim=16, mask_zero=True)(categorical_slice)\n reshaped_slice = tf.reshape(embed_slice, shape=(-1, embed_slice.shape[1], embed_slice.shape[2] * embed_slice.shape[3]))\n reshaped_slice = L.SpatialDropout1D(sp_dropout)(reshaped_slice)\n\n categorical_hightemp = link_inputs[:, :, 6:7]\n embed_hightemp = L.Embedding(input_dim=33, output_dim=8, mask_zero=True)(categorical_hightemp)\n reshaped_hightemp = tf.reshape(embed_hightemp, shape=(-1, embed_hightemp.shape[1], embed_hightemp.shape[2] * embed_hightemp.shape[3]))\n reshaped_hightemp = L.SpatialDropout1D(sp_dropout)(reshaped_hightemp)\n\n categorical_weather = link_inputs[:, :, 7:8]\n embed_weather = L.Embedding(input_dim=7, output_dim=8, mask_zero=True)(categorical_weather)\n reshaped_weather = tf.reshape(embed_weather, shape=(-1, embed_weather.shape[1], embed_weather.shape[2] * embed_weather.shape[3]))\n reshaped_weather = L.SpatialDropout1D(sp_dropout)(reshaped_weather)\n \n numerical_fea1 = link_inputs[:, :, 1:5]\n numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)\n hidden = L.concatenate([reshaped_link, numerical_fea1, reshaped_slice, reshaped_hightemp, reshaped_weather], axis=2)\n \"\"\"\n numerical_fea1 = link_inputs[:, :, 1:]\n numerical_fea1 = L.Masking(mask_value=0, name='numerical_fea1')(numerical_fea1)\n hidden = L.concatenate([reshaped_link, numerical_fea1],axis=2)\n \n #hidden = L.Masking(mask_value=0)(hidden)\n for x in range(n_layers):\n hidden = gru_layer(hidden_dim, dropout)(hidden)\n if conv:\n x_conv1 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden)\n avg_pool1_gru = GlobalAveragePooling1D()(x_conv1)\n max_pool1_gru = GlobalMaxPooling1D()(x_conv1)\n #x_conv2 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden)\n #avg_pool2_gru = GlobalAveragePooling1D()(x_conv2)\n #max_pool2_gru = GlobalMaxPooling1D()(x_conv2)\n truncated_link = concatenate([avg_pool1_gru, max_pool1_gru])\n else:\n truncated_link = hidden[:, :pred_len]\n truncated_link = L.Flatten()(truncated_link)\n\n # truncated_link = Attention(256)(hidden)\n # CROSS----------------------------\n categorical_fea2 = cross_inputs[:, :, :1]\n embed2 = L.Embedding(input_dim=cross_size, output_dim=16, mask_zero=True)(categorical_fea2)\n reshaped2 = tf.reshape(embed2, shape=(-1, embed2.shape[1], embed2.shape[2] * embed2.shape[3]))\n reshaped2 = L.SpatialDropout1D(sp_dropout)(reshaped2)\n\n numerical_fea2 = cross_inputs[:, :, 1:]\n numerical_fea2 = L.Masking(mask_value=0, name='numerical_fea2')(numerical_fea2)\n hidden2 = L.concatenate([reshaped2, numerical_fea2], axis=2)\n # hidden2 = L.Masking(mask_value=0)(hidden2)\n for x in range(n_layers):\n hidden2 = gru_layer(hidden_dim, dropout)(hidden2)\n\n if conv:\n x_conv3 = Conv1D(conv_size, kernel_size=kernel_size1, padding='valid', kernel_initializer='he_uniform')(hidden2)\n avg_pool3_gru = GlobalAveragePooling1D()(x_conv3)\n max_pool3_gru = GlobalMaxPooling1D()(x_conv3)\n #x_conv4 = Conv1D(conv_size, kernel_size=kernel_size2, padding='valid', kernel_initializer='he_uniform')(hidden2)\n #avg_pool4_gru = GlobalAveragePooling1D()(x_conv4)\n #max_pool4_gru = GlobalMaxPooling1D()(x_conv4)\n truncated_cross = concatenate([avg_pool3_gru, max_pool3_gru])\n else:\n truncated_cross = hidden2[:, :pred_len]\n truncated_cross = L.Flatten()(truncated_cross)\n\n # truncated_cross = Attention(256)(hidden2)\n # SLICE----------------------------\n embed_slice = L.Embedding(input_dim=slice_size, output_dim=1)(slice_input)\n embed_slice = L.Flatten()(embed_slice)\n\n # DEEP_INPUS\n x = L.BatchNormalization()(deep_inputs)\n x = L.Dropout(0.25)(x)\n\n for i in range(3):\n x = L.Dense(256)(x)\n x = L.BatchNormalization()(x)\n x = L.Lambda(tf.keras.activations.swish)(x)\n x = L.Dropout(0.25)(x)\n dense_hidden3 = L.Dense(64,activation='linear')(x)\n\n # DCN\n cross = CrossLayer(output_dim=inp.shape[2], num_layer=8, name=\"cross_layer\")(inp)\n truncated = L.concatenate([truncated_link, truncated_cross, cross, dense_hidden3, wide_inputs, embed_slice])\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Dropout(dropout)(L.Dense(512, activation='relu') (truncated))\n truncated = L.BatchNormalization()(truncated)\n truncated = L.Dropout(dropout)(L.Dense(256, activation='relu') (truncated))\n\n arrival_0 = L.Dense(1, activation='linear', name='arrival_0')(truncated)\n arrival_1 = L.Dense(1, activation='linear', name='arrival_1')(truncated)\n arrival_2 = L.Dense(1, activation='linear', name='arrival_2')(truncated)\n arrival_3 = L.Dense(1, activation='linear', name='arrival_3')(truncated)\n arrival_4 = L.Dense(1, activation='linear', name='arrival_4')(truncated)\n\n model = tf.keras.Model(inputs=[inp_layer,link_inputs, cross_inputs, deep_inputs, wide_inputs, slice_input],\n outputs=[arrival_0,arrival_1,arrival_2,arrival_3,arrival_4])\n print(model.summary())\n model.compile(loss='mse',\n optimizer=RAdamOptimizer(learning_rate=1e-3) # 'adam' RAdam(warmup_proportion=0.1, min_lr=1e-7)\n )\n \n return model\n\n\ndef get_mc_es_lr(model_name: str, patience=5, min_delta=1e-4):\n mc = tf.keras.callbacks.ModelCheckpoint('../model_h5/model_{}.h5'.format(model_name)),\n es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min',\n restore_best_weights=True, patience=patience)\n lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=patience-1, mode='min',\n min_delta=min_delta)\n\n return mc, es, lr\n\n\ndef get_mc_es_lr_for_student(model_name: str, patience=5, min_delta=1e-4):\n mc = tf.keras.callbacks.ModelCheckpoint('../model_h5/model_{}.h5'.format(model_name)),\n es = tf.keras.callbacks.EarlyStopping(monitor='val_mape_2', mode='min',\n restore_best_weights=True, patience=patience)\n lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_mape_2', factor=0.8, patience=patience, mode='min',\n min_delta=min_delta)\n\n return mc, es, lr\n\n\n\ndef create_autoencoder(input_dim, output_dim, noise=0.05):\n i = L.Input(input_dim)\n encoded = L.BatchNormalization()(i)\n encoded = L.GaussianNoise(noise)(encoded)\n encoded = L.Dense(128, activation='relu')(encoded)\n decoded = L.Dropout(0.2)(encoded)\n decoded = L.Dense(input_dim,name='decoded')(decoded)\n x = L.Dense(64, activation='relu')(decoded)\n x = L.BatchNormalization()(x)\n x = L.Dropout(0.2)(x)\n x = L.Dense(output_dim, activation='linear', name='ata_output')(x)\n \n encoder = keras.models.Model(inputs=i, outputs=decoded)\n autoencoder = keras.models.Model(inputs=i, outputs=[decoded, x])\n \n autoencoder.compile(optimizer=RAdamOptimizer(learning_rate=1e-3), loss={'decoded':'mse', 'ata_output': 'mape'})\n return autoencoder, encoder\n\n\nclass Attention(L.Layer):\n def __init__(self, step_dim,\n W_regularizer=None, b_regularizer=None,\n W_constraint=None, b_constraint=None,\n bias=True, **kwargs):\n self.supports_masking = True\n self.init = initializers.get('glorot_uniform')\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n self.step_dim = step_dim\n self.features_dim = 0\n super(Attention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n\n self.W = self.add_weight((input_shape[-1],),\n initializer=self.init,\n name='{}_W'.format(self.name),\n regularizer=self.W_regularizer,\n constraint=self.W_constraint)\n self.features_dim = input_shape[-1]\n\n if self.bias:\n self.b = self.add_weight((input_shape[1],),\n initializer='zero',\n name='{}_b'.format(self.name),\n regularizer=self.b_regularizer,\n constraint=self.b_constraint)\n else:\n self.b = None\n\n self.built = True\n\n def compute_mask(self, input, input_mask=None):\n return None\n\n def call(self, x, mask=None):\n features_dim = self.features_dim\n step_dim = self.step_dim\n\n eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),\n K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\n\n if self.bias:\n eij += self.b\n\n eij = K.tanh(eij)\n\n a = K.exp(eij)\n\n if mask is not None:\n a *= K.cast(mask, K.floatx())\n\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n a = K.expand_dims(a)\n weighted_input = x * a\n return K.sum(weighted_input, axis=1)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], self.features_dim\n\nclass CustomMultiLossLayer(Layer):\n def __init__(self, nb_outputs=2, **kwargs):\n self.nb_outputs = nb_outputs\n self.is_placeholder = True\n super(CustomMultiLossLayer, self).__init__(**kwargs)\n \n def build(self, input_shape=None):\n # initialise log_vars\n self.log_vars = []\n for i in range(self.nb_outputs):\n self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),\n initializer=Constant(0.), trainable=True)]\n super(CustomMultiLossLayer, self).build(input_shape)\n\n def multi_loss(self, ys_true, ys_pred):\n assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs\n loss = 0\n for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):\n precision = K.exp(-log_var[0])\n loss += K.sum(precision * (y_true - y_pred)**2. + log_var[0], -1)\n return K.mean(loss)\n\n def call(self, inputs):\n ys_true = inputs[:self.nb_outputs]\n ys_pred = inputs[self.nb_outputs:]\n loss = self.multi_loss(ys_true, ys_pred)\n self.add_loss(loss, inputs=inputs)\n # We won't actually use the output.\n return K.concatenate(inputs, -1)\n\n\n"
},
{
"alpha_fraction": 0.5652634501457214,
"alphanum_fraction": 0.5774424076080322,
"avg_line_length": 48.69078826904297,
"blob_id": "6147d07453c2a0bedcbac705a2c47f6de2f4d1c6",
"content_id": "85fba36688569607282c7b7cfbe541c395d75585",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7554,
"license_type": "permissive",
"max_line_length": 117,
"num_lines": 152,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/1_sdne_embedding_allnext.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\"\"\"\nAuthor: Aigege\nCode: https://github.com/AiIsBetter\n\"\"\"\n# date 2021.08.01\nimport numpy as np\nimport networkx as nx\nimport pandas as pd\nfrom gem.embedding.node2vec import node2vec\nimport os\nfrom utils import parallel_apply\nfrom functools import partial\nimport gc\ndef link_id_find(gr):\n gr_ = gr.copy()\n tmp = list(gr_['link_id'])\n link_id_tuple = []\n for i in range(len(tmp)-1):\n link_id_tuple.append([tmp[i],tmp[i+1]])\n return link_id_tuple\n\nif __name__ == '__main__':\n root_path = '../data/giscup_2021/'\n nrows = None\n ######################################nextlinks #######################################\n nextlinks = pd.read_csv(root_path + 'nextlinks.txt', sep=' ', header=None)\n nextlinks.columns = ['from_id', 'to_id']\n nextlinks['to_id'] = nextlinks['to_id'].astype('str')\n nextlinks['to_id'] = nextlinks['to_id'].apply(lambda x: x.split(\",\"))\n nextlinks = pd.DataFrame({'from_id': nextlinks.from_id.repeat(nextlinks.to_id.str.len()),\n 'to_id': np.concatenate(nextlinks.to_id.values)})\n nextlinks['from_id'] = nextlinks['from_id'].astype(int)\n nextlinks['to_id'] = nextlinks['to_id'].astype(int)\n from_id = nextlinks['from_id'].unique()\n # nextlinks.to_csv('../data/giscup_2021/nextlink_all.csv',index=False)\n # nextlinks = pd.read_csv('../data/giscup_2021/nextlink_all.csv')\n\n ######################################nextlinks #######################################\n if 'nextlinks_allday.csv' in os.listdir(root_path):\n nextlinks = pd.read_csv(root_path + 'nextlinks_allday.csv')\n else:\n nextlinks_new = []\n for name in os.listdir(root_path + 'train/'):\n data_time = name.split('.')[0]\n if data_time == '20200803':\n continue\n train = pd.read_csv(root_path + 'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)\n train_head = pd.DataFrame(train[0].str.split(' ').tolist(),\n columns=['order_id', 'ata', 'distance', 'simple_eta', 'driver_id', 'slice_id'])\n train_head['order_id'] = train_head['order_id'].astype(str)\n train_head['ata'] = train_head['ata'].astype(float)\n train_head['distance'] = train_head['distance'].astype(float)\n train_head['simple_eta'] = train_head['simple_eta'].astype(float)\n train_head['driver_id'] = train_head['driver_id'].astype(int)\n train_head['slice_id'] = train_head['slice_id'].astype(int)\n data_link = train[[1]]\n print(\"flag:\", 1)\n data_link['index'] = train_head.index\n data_link['order_id'] = train_head['order_id']\n print(\"flag:\", 2)\n data_link['ata'] = train_head['ata']\n data_link['distance'] = train_head['distance']\n data_link['simple_eta'] = train_head['simple_eta']\n print(\"flag:\", 3)\n data_link['slice_id'] = train_head['slice_id']\n print(\"flag:\", 4)\n data_link_split = data_link[1].str.split(' ', expand=True).stack().to_frame()\n print(\"flag:\", 5)\n data_link_split = data_link_split.reset_index(level=1, drop=True).rename(columns={0: 'link_info'})\n print(\"flag:\", 6)\n data_link_split = data_link[['order_id', 'index', 'ata', 'distance', 'simple_eta', 'slice_id']].join(\n data_link_split)\n print(\"flag:\", 7)\n data_link_split = data_link_split.reset_index(drop=True)\n data_link_split[['link_id',\n 'link_time',\n 'link_ratio',\n 'link_current_status',\n 'link_arrival_status']] = data_link_split['link_info'].str.split(':|,', 5, expand=True)\n print(\"flag:\", 8)\n data_link_split = data_link_split[['order_id','link_id']]\n data_link_split['link_id'] = data_link_split['link_id'].astype(int)\n features = pd.DataFrame({'order_id': data_link_split['order_id'].unique()})\n groupby = data_link_split.groupby(['order_id'])\n func = partial(link_id_find)\n g = parallel_apply(groupby, func, index_name='order_id', num_workers=5, chunk_size=10000)\n g = pd.DataFrame(g,columns=['from_id','to_id'])\n g = g.drop_duplicates()\n nextlinks_new.append(g)\n nextlinks_new = pd.concat(nextlinks_new, axis=0)\n nextlinks_new = nextlinks_new.drop_duplicates()\n nextlinks_new = nextlinks_new.sort_values(by='from_id').reset_index(drop=True)\n nextlinks = pd.concat([nextlinks,nextlinks_new],axis=0)\n nextlinks = nextlinks.drop_duplicates()\n nextlinks = nextlinks.sort_values(by='from_id').reset_index(drop=True)\n print('save all csv')\n nextlinks.to_csv(root_path+'nextlinks_allday.csv',index=False)\n print('calcute weight')\n nextlinks = nextlinks.sort_values(by='from_id').reset_index(drop=True)\n nextlinks = nextlinks.drop_duplicates()\n from_id_weight = nextlinks['from_id'].value_counts()\n from_id_weight = from_id_weight.to_frame()\n from_id_weight['index'] = from_id_weight.index\n from_id_weight.columns = ['weight', 'from_id']\n nextlinks = pd.merge(nextlinks, from_id_weight, 'left', on=['from_id'])\n print('calcute weight finish!')\n nextlinks['to_id'] = nextlinks['to_id'].astype(int)\n nextlinks['from_id'] = nextlinks['from_id'].astype(int)\n id_key = list(set(nextlinks['from_id'].unique().tolist() + nextlinks['to_id'].unique().tolist()))\n id_key_to_connected = dict(zip(id_key, range(len(id_key))))\n nextlinks['from_id'] = nextlinks['from_id'].map(id_key_to_connected)\n nextlinks['to_id'] = nextlinks['to_id'].map(id_key_to_connected)\n np.save(root_path + 'id_key_to_connected_allday.npy', id_key_to_connected)\n print('id key save finish!')\n print('start creating graph')\n G = nx.DiGraph()\n from_id = nextlinks['from_id'].to_list()\n to_id = nextlinks['to_id'].to_list()\n weight = nextlinks['weight'].to_list()\n edge_tuple = list(zip(from_id, to_id,weight))\n # edge_tuple = tuple(from_id,to_id,weight)\n print('adding')\n G.add_weighted_edges_from(edge_tuple)\n G = G.to_directed()\n print('finish create graph!')\n print('start train n2v')\n look_back = list(G.nodes())\n embeddings = {}\n models = []\n models.append(node2vec(d=128, max_iter=10, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1))\n for embedding in models:\n Y, t = embedding.learn_embedding(graph=G, edge_f=None,\n is_weighted=True, no_python=True)\n for i, embedding in enumerate(embedding.get_embedding()):\n embeddings[look_back[i]] = embedding\n np.save(root_path+'graph_embeddings_retp1.npy', embeddings)\n print('nextlink graph embedding retp 1 finish!') # displays \"world\"\n del models\n gc.collect()\n\n look_back = list(G.nodes())\n embeddings = {}\n models = []\n models.append(node2vec(d=128, max_iter=10, walk_len=80, num_walks=10, con_size=10, ret_p=0.5, inout_p=1))\n for embedding in models:\n Y, t = embedding.learn_embedding(graph=G, edge_f=None,\n is_weighted=True, no_python=True)\n for i, embedding in enumerate(embedding.get_embedding()):\n embeddings[look_back[i]] = embedding\n np.save(root_path + 'graph_embeddings_retp05.npy', embeddings)\n print('nextlink graph embedding retp 0.5 finish!')\n\n"
},
{
"alpha_fraction": 0.4818830192089081,
"alphanum_fraction": 0.7115549445152283,
"avg_line_length": 16.102439880371094,
"blob_id": "686b041503f0c1ff0a5cd8282213fbeb38b44710",
"content_id": "dca104d4ecc48ae762327dd4ba56287798c82f33",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6558,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 205,
"path": "/深度学习入门/第一章——深度学习必备知识点.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "### 第一章——深度学习必备知识点\n\n#### 深度学习要解决的问题\n\n\n\n> 人工智能、机器学习、深度学习的区别于联系\n\n\n\n机器学习的流程:\n\n- 数据提取\n- 特征工程\n- 建立模型\n- 评估与应用\n\n> 特征工程可以说是建模过程中,最重要的部分。\n\n既然特征工程是最重要的,常规我们会做各种各样的特征,如聚合统计、交叉等,那有没有一种方法,它可以**自动的**去选择重要的特征。\n\n而深度学习可以说是最接近人工智能这一概念的,因为它解决了机器学习中“人工的”问题,如人工的选择特征、选择算法等。\n\n深度学习最大的亮点,就是解决特征工程的人工问题。\n\n特征工程的作用:\n\n- 数据特征决定了模型的上限\n- 预处理和特征提取是最核心的\n- 算法与参数选择决定了如何逼近这个上限\n\n特征如何提取,数值类的相对容易,那么文本类、图像类的呢?\n\n传统特征提取方法:\n\n\n\n> 各种计算可以说是非常麻烦的。\n\n为什么需要深度学习:\n\n\n\n> 如上图,将一个图像,分解成多个维度并变换成数值,变成机器可认识的。\n\n\n\n#### 深度学习应用领域\n\n自动驾驶:\n\n\n\n人脸识别:\n\n\n\n医学检测:\n\n\n\n缺点:计算大量数据导致速度慢。\n\n数据规模越大,越适合深度学习\n\n\n\n\n\n#### 计算机视觉任务\n\n如图像分类任务,如何把一张猫的图片,分到猫的类别(有一些列的标签:人、笔、飞机、猫 等等)\n\n图像表示:计算机眼中的图像,一张图片被表示成三维数组的形式,每个像素的值从0到255。\n\n\n\n> 可能的维度是`300*100*3`,假设高度h是300,宽度w是100,3则是颜色通道RBG。图像中数值越大表示该点越亮,反之则越暗。\n\n\n\n**计算机视觉面临的挑战**:\n\n还是上面这张图,如照射角度:\n\n\n\n其形状可能改变成如下的样子:\n\n\n\n或者部分遮蔽:\n\n\n\n背景混入:\n\n\n\n这些都是可能遇到得问题,如何解决可以给一些遮蔽的样本,这些都是数据量可以解决的。\n\n\n\n#### 得分函数\n\n线性函数\n\n- 从输入——>输出的映射\n\n \n\n 每个像素点对结果产生影响,而每个像素点有对应的权重。\n\n- 数学表示\n\n \n\n `32*32*3`=3072个X(像素点),假设需要分类的类别一共有10个,那么这3072个X在这10个W(类别)中有着不同的权重得分,还有b偏值项,最终最高 得分 的就是预测的分类。\n\n- 计算方法\n\n \n\n 为了计算简单,假设这种猫只有4个像素点,分别是56、231、24、2,分成3个类别(猫、狗、船),其中W有3行对应3个类别,且Wi有4个值对应4个像素点,我们来计算第一行,`W*X+b = `0.2*56+(-0.5)*231+0.1*24+2.0*2+1.1`=-97.9+1.1=-96.8,这样就得到了结果。\n\n 当权重Wi比较大时,说明该像素点对结果的影响比较重要,正值代表促进作用,负值代表抑制作用。\n\n - W矩阵怎么来的:一开始是假设了一个,然后根据结果不断优化,就想上面的预测是把猫预测成了狗437.9,那么神经网络会不断迭代W矩阵中的值。\n\n 现在我们知道上面的结果不够好,那么我们应该怎样让它变好。\n\n\n\n#### 损失函数\n\n- 如何衡量分类的结果\n- 怎么明确模型当前的效果是好是坏。\n\n##### 其中一种损失函数(回归任务):\n\n实验:\n\n\n\n假设我们运行完模型,得到上面这么个结果,第一张图评定为猫3.2为车5.1明显不合理,如果利用上面的公式,就是5.1-3.2=1.9,值越大表明预测越离谱,小于0则表示没有误差。\n\n而+1是为了防止一种情况是,如果预测的car不是5.1而是3.15,那么3.15-3.2也小于0,但是能明确的说预测没问题吗?或许不能吧,因为只有一点偏差可能是“刚刚好”预测对了,那么+1就可以防止这种情况,预测对的值一定要远大于预测错的值。\n\n计算:\n\ncat = max(0, 5.1 - 3.2 + 1) + max(0, -1.7 - 3.2 + 1)\n\n = max(0, 2.9) + max(0, -3.9)\n\n = 2.9 + 0 = 2.9\n\n> max(0, x)的意思是x大于0取x,小于0则取0\n\ncar = max(0, 1.3 - 4.9 + 1) + max(0, 2.0 - 4.9 + 1)\n\n = max(0, -2.6) + max(0, -1.9)\n\n = 0 + 0 = 0\n\nfrog = max(0, 2.2 - (-3.1) + 1) + max(0, 2.5 - (-3.1) + 1)\n\n = max(0, 5.3) + max(0, 5.6)\n\n = 5.3 + 5.6 = 10.9\n\n其中car预测最好,frog的预测最差\n\n\n\n损失函数的值相同,意味着两个模型一样吗?\n\n- 还是这个公式:\n\n- 输入数据:X = [1, 1, 1, 1]\n - 模型A:w1 = [1, 0, 0, 0]\n - 模型B:w2 = [0.25, 0.25, 0.25, 0.25]\n- 两者的结果都等于1,那两者的可以说一样吗?,是不是模型B的W更可靠。那么损失函数还得加入一个东西\n\n损失函数 = 数据损失 + 正则化惩罚项\n\n- 正则化惩罚项:\n- 正则化惩罚项关注权重w,w越大则该公式的结果越大,我们希望模型不要太复杂,过拟合的模型是没用的。往往还有个 λ 。\n\n回归任务的解决了,还有分类任务的。\n\n\n\n##### Softmax分类器(分类任务)\n\n- 归一化:where\n\n- 计算损失值:\n\n举例:\n\n\n\n> 其中3.2exp表示e^3.2=24.5,24.5normalize表示24.5 / (24.5+164+0.18) = 0.13,这就是概率,同时我们再做-log(0.13)可以得到0.89这个损失值,log(1)的时候为0,即没有损失,表示概率值越接近于1损失的越少。\n\n这些都是前向传播,一步步的得到损失值,那么如何以损失值来更新W更新模型呢,这就是反向传播。"
},
{
"alpha_fraction": 0.46256864070892334,
"alphanum_fraction": 0.6756913065910339,
"avg_line_length": 144.8309783935547,
"blob_id": "a3febba56898c678112b80422d7e15172afae1e1",
"content_id": "8717215b8018044534af62ce7c7917fb0baf40fc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 11285,
"license_type": "permissive",
"max_line_length": 357,
"num_lines": 71,
"path": "/README.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# AiLearning-Theory-Applying\n\n快速上手Ai理论及应用实战:基础知识Basic knowledge、机器学习MachineLearning、深度学习DeepLearning2、自然语言处理BERT,持续更新中。含大量注释及数据集,力求每一位能看懂并复现。\n\n\n\n## 学习章节:\n\n<ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md'>必备数学基础Basic knowledge</a>\n <ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E9%AB%98%E7%AD%89%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80'>高等数学基础</a>\n </li>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E5%BE%AE%E7%A7%AF%E5%88%86'>微积分</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E6%B3%B0%E5%8B%92%E5%85%AC%E5%BC%8F'>泰勒公式</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E7%BA%BF%E6%80%A7%E4%BB%A3%E6%95%B0%E5%9F%BA%E7%A1%80'>线性代数基础</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E9%9A%8F%E6%9C%BA%E5%8F%98%E9%87%8F'>随机变量</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E6%A6%82%E7%8E%87%E8%AE%BA%E5%9F%BA%E7%A1%80'>概率论基础</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E6%95%B0%E6%8D%AE%E7%A7%91%E5%AD%A6%E7%9A%84%E5%87%A0%E7%A7%8D%E5%88%86%E5%B8%83'>数据科学的几种分布</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E6%A0%B8%E5%87%BD%E6%95%B0'>核函数</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E7%86%B5%E5%92%8C%E6%BF%80%E6%B4%BB%E5%87%BD%E6%95%B0'>熵和激活函数</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E5%9B%9E%E5%BD%92%E5%88%86%E6%9E%90'>回归分析</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E5%81%87%E8%AE%BE%E6%A3%80%E9%AA%8C%E7%AB%A0%E8%8A%82'>假设检验</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E7%9B%B8%E5%85%B3%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82'>相关分析</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/notebook_%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80/%E7%9B%B8%E5%85%B3%E5%88%86%E6%9E%90%E7%AB%A0%E8%8A%82'>方差分析</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#kmeans%E7%AE%97%E6%B3%95'>KMEANS算法</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E5%BF%85%E5%A4%87%E6%95%B0%E5%AD%A6%E5%9F%BA%E7%A1%80.md#%E8%B4%9D%E5%8F%B6%E6%96%AF%E5%88%86%E6%9E%90'>贝叶斯分析</a>\n </ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88'>机器学习MachineLearning</a> \n <ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E4%BF%A1%E7%94%A8%E5%8D%A1%E6%AC%BA%E8%AF%88%E6%A3%80%E6%B5%8B'>信用卡欺诈检测(含数据集)</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%B7%A5%E4%B8%9A%E5%8C%96%E5%B7%A5%E7%94%9F%E4%BA%A7%E9%A2%84%E6%B5%8B'>工业化工生产预测(含数据集)</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%99%BA%E6%85%A7%E5%9F%8E%E5%B8%82-%E9%81%93%E8%B7%AF%E9%80%9A%E8%A1%8C%E6%97%B6%E9%97%B4%E9%A2%84%E6%B5%8B'>智慧城市-道路通行时间预测(含数据集)</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%BB%BA%E7%AD%91%E8%83%BD%E6%BA%90%E5%88%A9%E7%94%A8%E7%8E%87%E9%A2%84%E6%B5%8B'>建筑能源利用率预测(含数据集)</a>\n\t\t<li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E5%BF%AB%E6%89%8B%E7%9F%AD%E8%A7%86%E9%A2%91%E7%94%A8%E6%88%B7%E6%B4%BB%E8%B7%83%E5%BA%A6%E5%88%86%E6%9E%90'>快手用户活跃预测(含数据集)</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/Indoor%20Location%20%26%20Navigation'>Indoor Location & Navigation(数据集在kaggle)</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP'>ACM SIGSPATIAL 2021 GISCUP-预估到达时间</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%AE%9E%E6%88%98%E5%B0%8F%E9%A1%B9%E7%9B%AE'>机器学习实战小项目(含数据集)</a>\n\t</ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8'>深度学习入门DeepLearning</a> \n <ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8/%E7%AC%AC%E4%B8%80%E7%AB%A0%E2%80%94%E2%80%94%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%BF%85%E5%A4%87%E7%9F%A5%E8%AF%86%E7%82%B9.md'>深度学习必备知识点</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8/%E7%AC%AC%E4%BA%8C%E7%AB%A0%E2%80%94%E2%80%94%E8%B5%B0%E8%BF%9B%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E7%9A%84%E4%B8%96%E7%95%8C%20%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E6%A8%A1%E5%9E%8B.md'>走进深度学习的世界 神经网络模型</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8/%E7%AC%AC%E4%B8%89%E7%AB%A0%E2%80%94%E2%80%94%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C.md'>卷积神经网络</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8/%E7%AC%AC%E5%9B%9B%E7%AB%A0%E2%80%94%E2%80%94%E9%80%92%E5%BD%92%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E4%B8%8E%E8%AF%8D%E5%90%91%E9%87%8F%E5%8E%9F%E7%90%86%E8%A7%A3%E8%AF%BB.md'>递归神经网络与词向量原理解读</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8/%E7%AC%AC%E4%BA%94%E7%AB%A0%E2%80%94%E2%80%94LSTM%E7%BD%91%E7%BB%9C%E6%9E%B6%E6%9E%84%E4%B8%8E%E6%83%85%E6%84%9F%E5%88%86%E6%9E%90%E5%BA%94%E7%94%A8%E5%AE%9E%E4%BE%8B'>LSTM网络架构与情感分析应用实例</a>\n </ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/NLP%E9%80%9A%E7%94%A8%E6%A1%86%E6%9E%B6BERT%E9%A1%B9%E7%9B%AE%E5%AE%9E%E6%88%98'>NLP通用框架BERT项目实战</a>\n <ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/NLP%E9%80%9A%E7%94%A8%E6%A1%86%E6%9E%B6BERT%E9%A1%B9%E7%9B%AE%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%B8%80%E7%AB%A0%E2%80%94%E2%80%94NLP%E9%80%9A%E7%94%A8%E6%A1%86%E6%9E%B6BERT%E5%8E%9F%E7%90%86%E8%A7%A3%E8%AF%BB.md'>NLP通用框架BERT原理解读</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/NLP%E9%80%9A%E7%94%A8%E6%A1%86%E6%9E%B6BERT%E9%A1%B9%E7%9B%AE%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%BA%8C%E7%AB%A0%E2%80%94%E2%80%94BERT%E6%BA%90%E7%A0%81%E8%A7%A3%E8%AF%BB%E4%B8%8E%E5%BA%94%E7%94%A8%E5%AE%9E%E4%BE%8B'>BERT源码解读与应用实例</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/NLP%E9%80%9A%E7%94%A8%E6%A1%86%E6%9E%B6BERT%E9%A1%B9%E7%9B%AE%E5%AE%9E%E6%88%98/%E7%AC%AC%E4%B8%89%E7%AB%A0%E2%80%94%E2%80%94%E5%9F%BA%E4%BA%8EBERT%E7%9A%84%E4%B8%AD%E6%96%87%E6%83%85%E6%84%9F%E5%88%86%E6%9E%90%E5%AE%9E%E6%88%98.md'>基于BERT的中文情感分析实战.md</a>\n </ul> \n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC'>机器学习算法原理及推导</a>\n <ul>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E8%88%AA%E2%80%94%E2%80%94%E7%BB%9F%E8%AE%A1%E5%AD%A6%E4%B9%A0%E6%96%B9%E6%B3%95'>李航——统计学习方法</a>\n <li><a href='https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AE%97%E6%B3%95%E5%8E%9F%E7%90%86%E5%8F%8A%E6%8E%A8%E5%AF%BC/%E6%9D%8E%E5%AE%8F%E6%AF%85%E2%80%94%E2%80%94%E5%BC%82%E5%B8%B8%E6%A3%80%E6%B5%8B'>李宏毅——异常检测</a>\n </ul>\n\n\n\n## 说明\n\n<p> 本专题并不用于商业用途,转载请注明本专题地址,如有侵权,请务必邮件通知作者。\n<p> 如有文字、代码等遗漏或错误的地方,望不吝赐教,万分感谢。\n<p> Email:[email protected]\n\n\n## License\n\n本文使用的许可见 [LICENSE](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/LICENSE)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5065974593162537,
"alphanum_fraction": 0.7250702977180481,
"avg_line_length": 46.64948272705078,
"blob_id": "f7f0a580c4bf97c2a88f2e1f1975f45d8f447bc2",
"content_id": "feb07c8fd22b8da0d8133d63127bcefeec879a2d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5433,
"license_type": "permissive",
"max_line_length": 342,
"num_lines": 97,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/README.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "# 2021滴滴预估到达时间大赛\n\n[竞赛地址](https://www.biendata.xyz/competition/didi-eta/)\n\n持续更新中...\n\n\n\n**7th/Top1%,提供答疑**\n\n\n\n**也能做到前5,但是没必要**\n\n\n\n\n\n### 1.解题思路\n\n[预估到达时间解题思路.pdf](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/%E9%A2%84%E4%BC%B0%E5%88%B0%E8%BE%BE%E6%97%B6%E9%97%B4%E8%A7%A3%E9%A2%98%E6%80%9D%E8%B7%AF.pdf)\n\n<img src=\"assets/1628668115968.png\" width=\"700\" align=\"middle\" />\n\n\n\n### 2. 数据说明\n\n- 由于滴滴数据保密协议,博主也无法找到可开放数据及数据地址,故无法提供。\n- 数据来自滴滴出行,英文(Data source: Didi Chuxing),数据出处:[https://gaia.didichuxing.com](https://gaia.didichuxing.com/)\n\n### 3. 特征说明\n\n\n\n\n\n- max_order_xt:head级别的特征,如同一sample_eta、distinct等\n- max_170_link_sqe_for_order:link序列特征,如右格式:[link_id_1, link_id_3, link_id_20...]\n- cross_data_dir:cross序列特征\n- link_data_other_dir:link统计特征,如某link_id前6小时的均值、求和等\n- head_data_dir:历史同星期的全天的统计特征\n- win_order_data_dir:订单的滑窗特征,如当前订单时间点的前段时间的统计特征\n- arrival_data_dir:历史到达路况状态的统计特征\n- zsl_arrival_data_dir:同上,不同人进行构建\n- arrival_sqe_data_dir:到达时刻的序列特征,提供给DCN的T模型进行蒸馏给S模型\n- pre_arrival_sqe_dir:利用树模型预测的到达时刻特征\n- zsl_link_data_dir:link统计特征,不同人构建\n\n\n\n### 4. 模型说明\n\n- [DCN蒸馏模型](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/DCN%E8%92%B8%E9%A6%8F_12953)\n - \n- [WDR模型](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/WD_128544)\n - \n- [LGB模型](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/LGB_13700)\n - \n\n\n\n### 5. 推荐服务器\n\n- [智能钛Notebook-2.4.0-tf](https://console.cloud.tencent.com/tione/notebook/instance)\n- [腾讯云服务器](https://console.cloud.tencent.com/cvm/instance/index)\n\n\n\n### 6. 环境配置和所需依赖库\n\n- scikit-learn\n- tqdm\n- pandarallel\n- joblib\n- lightgbm\n- pandas\n- numpy\n- keras_radam\n- tensorFlow-gpu=2.4.0 \n\n### 7. 文件说明\n\n- [DCN蒸馏_12953](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/%E6%BB%B4%E6%BB%B4%E2%80%94%E2%80%94%E9%A2%84%E4%BC%B0%E5%88%B0%E8%BE%BE%E6%97%B6%E9%97%B4/DCN_12953)\n - DCN蒸馏模型(利用“未来”数据),线上分数0.12953\n - dcn_model/[dcn_model.py](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/DCN%E8%92%B8%E9%A6%8F_12953/dcn_model/dcn_model.py):模型代码\n - dcn_model/[main.py](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/DCN%E8%92%B8%E9%A6%8F_12953/dcn_model/main.py):主函数,训练和预测\n - dcn_model/[process.py](https://github.com/ben1234560/AiLearning-Theory-Applying/blob/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/DCN%E8%92%B8%E9%A6%8F_12953/dcn_model/process.py):特征预处理\n - dcn_model/[model_h5](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/DCN%E8%92%B8%E9%A6%8F_12953/model_h5):存放处理信息,不影响模型结果\n- [WD_128544](https://github.com/ben1234560/AiLearning-Theory-Applying/tree/master/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E7%AB%9E%E8%B5%9B%E5%AE%9E%E6%88%98_%E4%BC%98%E8%83%9C%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88/ACM%20SIGSPATIAL%202021%20GISCUP/WD_128544)\n - WD模型,线上分数0.128544\n - 其他同上\n\n### 8. 其他说明\n\n- 代码属于公司所有,不能提供最优代码\n- 感谢[@xbder](https://github.com/xbder)、[@AiIsBetter](https://github.com/AiIsBetter)\n\n"
},
{
"alpha_fraction": 0.5193896889686584,
"alphanum_fraction": 0.7269548773765564,
"avg_line_length": 16.98285675048828,
"blob_id": "d569396799839da69f788bb11ff6d0ed545c5f6c",
"content_id": "99885e60fa6b11b23de7535e346f2fed181d9618",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6186,
"license_type": "permissive",
"max_line_length": 169,
"num_lines": 175,
"path": "/深度学习入门/第三章——卷积神经网络.md",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "### 第三章——卷积神经网络\n\n#### 卷积神经网络应用领域\n\nCV领域发展:\n\n\n\n> 比赛中预测错误率的百分比,每年逐步下降。Human是人类肉眼的识别能力,2016年开始已经远高于人类肉眼死别能力,后面就取消了该方向的比赛了。\n\n检测任务:\n\n\n\n分类与检索:\n\n\n\n> 分类:将图片分到对应类别。\n>\n> 检索:找到相似的图片。\n\n还有图片重构、无人驾驶、人脸识别\n\n\n\n#### 卷积的作用\n\n卷积网络与传统网络的区别:\n\n\n\n> 输出的数据直接是三维的,还多了深度\n\n整体架构:\n\n输入层、卷积层、池化层、全连接层\n\n\n\n> 这里只有卷积层和池化层我们没有了解过\n\n卷积做了什么事:\n\n\n\n> 比如一个猫的图,眼睛鼻子特征和周围环境特征的重要性明显不一样,需要区别对待。先把图像进行分割(最左边),分割成多个小区域,提取其中一个小区域(第二个5×5×3),蓝色图的右下角3×3矩阵,大字体的值是X的值,而小字体的值是w权重,会不断循环获取最优的w权重和对应的值,并输出右边绿色的14(特征值)。\n\n如果看了不理解,下面的具体计算方法一定能帮助你理解。\n\n\n\n#### 卷积的计算方法\n\n\n\n> input是输入,W0是第一层的权重,W1是第二层的权重,Output是输出\n\n我们以input三个块(RGB三颜色)左上角3×3矩阵值 和 第二层W1来计算,内积是乘法再相加。\n\n- 先来第一个(R颜色)左上角3×3:[0,0,0],[0,0,1],[0,0,1] 和 权重 [-1,-1,0],[-1,1,0],[-1,1,0]\n\n `(0*(-1) + 0*(-1) + 0*0)` + `(0*(-1) + 0*1 + 1*0)` + `(0*(-1) + 0*1 + 1*0)` = 0 得出R颜色的左上角矩阵的值为0;\n\n- 第二个(G颜色) 左上角3×3矩阵值 和 第二层W1来计算\n\n `(0*1 + 0*(-1) + 0*0)` + `(0*(-1) + 1*0 + 1*(-1))` + `(0*(-1) + 0*0 + 2*0)` = -1\n\n- 第三个(B颜色) 左上角3×3矩阵值 和 第二层W1来计算\n\n `((-1)*0 + 0*0 + 1)` + `(0*1 + 2*0 + 0*1)` + `(0*0 + 0*(-1) + 0*0)` = 0\n\n- 最后再把三者结果相加并加上bias b1(偏值b)\n\n 0 + (-1) + 0 + 0 = -1\n\n 这级得到了output(0[:,:,1])中左上角的结果 -1。\n\n\n\n#### 卷积层的参数\n\n卷积层涉及参数:滑动窗口步长、卷积核尺寸、边缘填充、卷积核个数\n\n\n\n- 步长(移动多少个单元格):\n\n - 步长为1的卷积:\n\n \n\n 移动一个步长,得到红色绿色特征值,移动的步长越小,特征值越多\n\n - 步长为2的卷积:\n\n \n\n 移动两个步长,得到特征值越少\n\n - 一般图像识别的用步长为1的,\n\n- 卷积核尺寸:选择区域的大小,如上面是3×3的矩阵,可以选择4×4、5×5的,核尺寸越小,越细粒度提取,特征越多。\n\n- 边缘填充:\n\n \n\n +pad表示+1边缘,原本数据只有蓝色背景的部分(中间部分),而周围都是边缘增加的0,为什么这么做,滑动窗口时,边缘数据点明显滑动少,中间多,那能说明中间的就重要吗,为了使边缘的数据点也滑动多几次,就增加了这个边缘填充。文本分类中,有的文本有100个字,有的120字,不同长度无法训练,所以也会对其填充20个字,变成同样120的长度。\n\n- 卷积核个数:最终计算的结果,要得到多少个特征图,个数多少,特征图多少。\n\n \n\n- 卷积参数共享:即input使用的W部分的参数是共享的,卷积网络比之前的全连接大大减少了参数,不再需要每个对应的W。\n\n\n\n#### 池化层的作用\n\n将原始input的所有数据,进行压缩\n\n\n\n> 减少长宽以减少数据量的体积。\n\n最大池化MAX POOLING:\n\n\n\n- 如上图,从可选中,选出最大的值。为什么选择最大的值,因为前面是有权重W相乘的,还记得前面的W0和W1吗,如果计算完成得到的结果最大,那说明该结果是最重要的,所以这里选最大的,即挑最重要的。\n\n- 体积也从上图的2×2矩阵变成4×4的矩阵\n\n- 除了最大池化还有平均池化,不过平均池化基本没人用,既然有最好的结果,就应该拿最好的。\n\n- 池化层没有结果任何计算,只是选最大的\n\n\n\n#### 整体网络架构\n\n只有带参数的才能算层,Relu和池化不算\n\n\n\n将一张图,通过不断卷积、池化、最后变成一条向量,接上全连接层,进行分类。\n\n\n\n#### 残差网络ResNet\n\n深度网络遇到的问题:越大的层数理论上意味着越好,但实际是这样吗?下面是一组很早前测试的图\n\n\n\n> 左边的训练集和右边的预测集都是20层的反而比56层的好,那么说明多出的36层起负作用。\n\n解决方案:\n\n\n\n> 我们还是跟原来一样增加层数,但在此基础上增加残差,也就是如果多的一层网络效果并不比上一层好,那么依然使用上一层的结果,可以看到X直接跳过了两层,这样就能保证了效果一定是越来越好的。\n\n传统神经网络和Resnet的对比\n\n\n\n> ResNet是层数越多效果越好。\n\n下图是某个比赛中,冠军方案使用ResNet的层数是152层,第二名的22层有6.7的残差,而第一名的152层只有3.57的残差,相差近一倍的效果\n\n\n\n> 当然层数越多,可能收益越少,但是保证残差一定是小于等于之前的低层数,也就是结果一定是向好的发展。"
},
{
"alpha_fraction": 0.6031417846679688,
"alphanum_fraction": 0.6353865265846252,
"avg_line_length": 47.219268798828125,
"blob_id": "68b72bea3509d9304f14ad123ed9b504e54872d1",
"content_id": "2b8dd89768c3bacf25e9a110346aa1a03076ba3b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14550,
"license_type": "permissive",
"max_line_length": 232,
"num_lines": 301,
"path": "/机器学习竞赛实战_优胜解决方案/ACM SIGSPATIAL 2021 GISCUP/LGB_13700/5_model_final.py",
"repo_name": "iakirago/AiLearning-Theory-Applying",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\n\"\"\"\nAuthor: Aigege\nCode: https://github.com/AiIsBetter\n\"\"\"\n# date 2021.08.01\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold\nimport lightgbm as lgb\nfrom utils import reduce_mem_usage,reduce_mem_usage_parallel,lgb_score_mape,MAPE\nimport gc\nimport warnings\nimport os,random,pickle\nimport optuna\nwarnings.filterwarnings(\"ignore\")\ndef slice_id_change(x):\n hour = x * 5 / 60\n hour = np.floor(hour)\n hour += 8\n if hour >= 24:\n hour = hour - 24\n return hour\ndef optuna_print(tr_x, tr_y, te_x,te_y):\n def objective(trial,tr_x, tr_y, te_x,te_y):\n dtrain = lgb.Dataset(tr_x, label=tr_y)\n dvalid = lgb.Dataset(te_x, label=te_y)\n param = {\n \"objective\": \"regression\",\n \"metric\": \"mape\",\n \"verbosity\": -1,\n \"boosting_type\": \"gbdt\",\n 'min_split_gain': 0,\n 'random_state':2021,\n 'max_bin':trial.suggest_int('max_bin',63,250),\n 'subsample_for_bin': trial.suggest_int('subsample_for_bin', 40000, 300000),\n \"lambda_l1\": trial.suggest_loguniform(\"lambda_l1\", 1e-8, 10.0),\n \"lambda_l2\": trial.suggest_loguniform(\"lambda_l2\", 1e-8, 10.0),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_uniform(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_uniform(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n }\n # Add a callback for pruning.\n pruning_callback = optuna.integration.LightGBMPruningCallback(trial, \"mape\")\n gbm = lgb.train(\n param, dtrain, valid_sets=[dvalid], verbose_eval=False, callbacks=[pruning_callback]\n )\n\n preds = gbm.predict(te_x)\n pred_labels = np.rint(preds)\n mape = MAPE(te_y, pred_labels)\n return mape\n study = optuna.create_study(\n pruner=optuna.pruners.MedianPruner(n_warmup_steps=10), direction=\"minimize\"\n )\n study.optimize(lambda trial: objective(trial, tr_x, tr_y, te_x, te_y),\n n_trials=100)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n print(\"Best trial:\")\n trial = study.best_trial\n print(\" Value: {}\".format(trial.value))\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\nhead_columns = ['order_id', 'ata', 'distance', 'simple_eta', 'driver_id','slice_id']\nresult = []\nresult_time_weight = []\nresult_dis_weight = []\ncount = 0\ndf = []\nnrows=None\nroot_path = '../data/giscup_2021/'\ndata_list = ['20200818', '20200819', '20200820', '20200821', '20200822', '20200823', '20200824',\n '20200825', '20200826', '20200827', '20200828', '20200829', '20200830', '20200831']\nfor name in os.listdir(root_path+'train/'):\n data_time = name.split('.')[0]\n if data_time not in data_list:\n continue\n train = pd.read_csv(root_path+'train/{}'.format(name),sep= ';;',header=None,nrows=nrows)\n feature_cross = pd.read_csv(root_path+'feature/train/cross_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)\n feature_link = pd.read_csv(root_path+'feature/train/link_fea_order_id_level_{}.csv'.format(data_time),nrows=nrows)\n feature_head = pd.read_csv(root_path+'feature/train/head_link_{}.csv'.format(data_time),nrows=nrows)\n feature_sqe = pd.read_csv(root_path + 'feature/train/{}.csv'.format(data_time),nrows=nrows)\n feature_cross['order_id'] = feature_cross['order_id'].astype(str)\n feature_link['order_id'] = feature_link['order_id'].astype(str)\n feature_head['order_id'] = feature_head['order_id'].astype(str)\n feature_sqe['order_id'] = feature_sqe['order_id'].astype(str)\n\n print(\"开始处理\", data_time)\n # train.columns = ['head','link','cross']\n # train['head'] = train['head'].apply(lambda x:x.split(' '))\n train_head = pd.DataFrame(train[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\n train_head['order_id'] = train_head['order_id'].astype(str)\n train_head['ata'] = train_head['ata'].astype(float)\n train_head['distance'] = train_head['distance'].astype(float)\n train_head['simple_eta'] = train_head['simple_eta'].astype(float)\n train_head['driver_id'] = train_head['driver_id'].astype(int)\n train_head['slice_id'] = train_head['slice_id'].astype(int)\n train_head['date_time'] = int(data_time)\n train_head = train_head.merge(feature_cross,on='order_id',how='left')\n train_head = train_head.merge(feature_link,on='order_id',how='left')\n\n feature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',\n 'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',\n 'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',\n 'len_tmp',\n 'link_time_mean', 'link_time_std'],\n axis=1)\n feature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)\n train_head = train_head.merge(feature_sqe, on='order_id', how='left')\n train_head = train_head.merge(feature_head, on='order_id', how='left')\n print('merge finish!')\n train_head = reduce_mem_usage_parallel(train_head,28)\n df.append(train_head.drop('order_id',axis=1))\n del train\n gc.collect()\n count +=1\ndf = pd.concat(df,axis=0)\ntest = pd.read_csv(root_path+'20200901_test.txt',sep= ';;',header=None,nrows=nrows)\ntest_head = pd.DataFrame(test[0].str.split(' ').tolist(),columns = ['order_id', 'ata', 'distance','simple_eta', 'driver_id', 'slice_id'])\ntest_head['order_id'] = test_head['order_id'].astype(str)\ntest_head['ata'] = test_head['ata'].astype(float)\ntest_head['distance'] = test_head['distance'].astype(float)\ntest_head['simple_eta'] = test_head['simple_eta'].astype(float)\ntest_head['driver_id'] = test_head['driver_id'].astype(int)\ntest_head['slice_id'] = test_head['slice_id'].astype(int)\n\nfeature_cross = pd.read_csv(root_path + 'feature/test/cross_fea_order_id_level_{}.csv'.format('20200901'),nrows=nrows)\nfeature_link = pd.read_csv(root_path + 'feature/test/link_fea_order_id_level_{}.csv'.format('20200901'), nrows=nrows)\nfeature_head = pd.read_csv(root_path + 'feature/test/head_link_{}.csv'.format('20200901'),nrows=nrows)\nfeature_sqe = pd.read_csv(root_path + 'feature/test/{}.csv'.format('20200901'),nrows=nrows)\ntest_head['date_time'] = 20200901\n\nfeature_cross['order_id'] = feature_cross['order_id'].astype(str)\nfeature_link['order_id'] = feature_link['order_id'].astype(str)\nfeature_head['order_id'] = feature_head['order_id'].astype(str)\nfeature_sqe['order_id'] = feature_sqe['order_id'].astype(str)\n\ntest_head = test_head.merge(feature_cross, on='order_id', how='left')\ntest_head = test_head.merge(feature_link,on='order_id',how='left')\nfeature_head = feature_head.drop(['ata', 'distance', 'simple_eta', 'driver_id', 'slice_id', 'index',\n 'date_time', 'link_count', 'link_time_sum', 'link_ratio_sum',\n 'date_time_dt', 'weekday', 'hour', 'weather', 'hightemp', 'lowtemp',\n 'len_tmp',\n 'link_time_mean', 'link_time_std'],\n axis=1)\nfeature_sqe = feature_sqe.drop(['pre_arrival_status', 'arrive_slice_id', 'slice_id'], axis=1)\ntest_head = test_head.merge(feature_sqe, on='order_id', how='left')\ntest_head = test_head.merge(feature_head, on='order_id', how='left')\n\ntest_head = reduce_mem_usage_parallel(test_head,28)\ndel feature_cross,feature_link\ngc.collect()\n\nX_train = df.drop('ata',axis=1)\ny_train = df['ata']\nX_test = test_head.drop(['order_id','ata'],axis=1)\n#调参\n#tr_x, te_x,tr_y,te_y = train_test_split(X_train,y_train,test_size=0.2,random_state=2021)\n#optuna_print(tr_x, tr_y, te_x,te_y)\n#del tr_x, te_x,tr_y,te_y\n#gc.collect()\n\nfolds = 5\nskf = KFold(n_splits=folds, shuffle=True, random_state=2021)\ntrain_mean = np.zeros(shape=[1,folds])\ntest_predict = np.zeros(shape=[X_test.shape[0], folds],dtype=float)\nk_fold_mape = []\nfeature_importance_df = pd.DataFrame()\n# Display/plot feature importance\ndef display_importances(feature_importance_df_):\n feature_importance_df_.to_csv('feature_importances.csv',index=False)\n cols = feature_importance_df_[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False)[:100].index\n best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]\n best_features = best_features.groupby('feature',as_index = False)['importance'].mean()\n best_features = best_features.sort_values(by = 'importance',ascending=False)\n plt.figure(figsize=(8, 10))\n sns.barplot(x=\"importance\", y=\"feature\", data=best_features.sort_values(by=\"importance\", ascending=False))\n plt.title('LightGBM Features (avg over folds)')\n plt.tight_layout()\n plt.savefig('feature_importances.jpg')\n # plt.show()\n#use single model feature importance as best_feature_importances\nfeature_importance_df_ = pd.read_csv('best_feature_importances.csv')\ncols = feature_importance_df_[[\"feature\", \"importance\"]].groupby(\"feature\").mean().sort_values(by=\"importance\", ascending=False).index\nbest_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]\nbest_features = best_features.groupby('feature',as_index = False)['importance'].mean()\nbest_features = best_features.sort_values(by = 'importance',ascending=False)\ndata=best_features.sort_values(by=\"importance\", ascending=False)\nfeature_select = list(data['feature'].values)\nfeature_cols = feature_select\n\nrandom_seed = list(range(2021))\nmax_depth = [4,4,4,4,5,5,5,5,6,6,6,6,7,7,7]\nlambd1 = np.arange(0, 1, 0.0001)\nlambd2 = np.arange(0, 1, 0.0001)\nbagging_fraction = [i / 1000.0 for i in range(700, 800)]\nfeature_fraction = [i / 1000.0 for i in range(700, 800)]\nmin_child_weight = [i / 100.0 for i in range(150, 250)]\nn_feature = [i / 100.0 for i in range(1, 32,2)]\nmax_bin = list(range(130, 240))\nsubsample_for_bin = list(range(50000, 220000,10000))\nbagging_freq = [1,2,3,4,5,6,7,8,9,10,1,2,3,4,5]\nnum_leaves = list(range(130, 250))\n\n\nrandom.shuffle(random_seed)\nrandom.shuffle(max_depth)\nrandom.shuffle(lambd1)\nrandom.shuffle(lambd2)\nrandom.shuffle(bagging_fraction)\nrandom.shuffle(feature_fraction)\nrandom.shuffle(min_child_weight)\nrandom.shuffle(max_bin)\nrandom.shuffle(subsample_for_bin)\nrandom.shuffle(bagging_freq)\nrandom.shuffle(num_leaves)\nrandom.shuffle(n_feature)\n\n\nwith open('params.pkl', 'wb') as f:\n pickle.dump((random_seed, max_depth, lambd1,lambd2, bagging_fraction, feature_fraction, min_child_weight, max_bin,subsample_for_bin,bagging_freq,num_leaves,n_feature), f)\nfor iter in range(15):\n print('max_depth:',max_depth[iter],'random_seed:',random_seed[iter],'feature_fraction:',feature_fraction[iter],\n 'bagging_fraction:',bagging_fraction[iter],'min_child_weight:',min_child_weight[iter],\n 'lambd1:',lambd1[iter],'lambd2:',lambd2[iter],'max_bin:',max_bin[iter],'num_leaves:',num_leaves[iter]\n ,'subsample_for_bin:',subsample_for_bin[iter],'bagging_freq:',bagging_freq[iter],'n_feature:',n_feature[iter])\nnround = 5000\nfor iter in range(15):\n if max_depth[iter]==4:\n nround = 10000\n elif max_depth[iter]==5:\n nround = 8000\n elif max_depth[iter]==6:\n nround = 6000\n elif max_depth[iter] == 7:\n nround = 5000\n X_train_r = X_train[feature_cols[:int(len(feature_cols)*0.7)]+\n feature_cols[int(len(feature_cols)*0.7):int(len(feature_cols)*0.7)+int(len(feature_cols)*n_feature[iter])]]\n X_test_r = X_test[feature_cols[:int(len(feature_cols) * 0.7)] +\n feature_cols[int(len(feature_cols) * 0.7):int(len(feature_cols) * 0.7) + int(\n len(feature_cols) * n_feature[iter])]]\n scores = 0\n threshold = 0\n print('start training......')\n print('训练集维度:',X_train_r.shape)\n print('测试集维度:',X_test_r.shape)\n for i, (trn_idx, val_idx) in enumerate(skf.split(X_train_r, y_train)):\n clf = lgb.LGBMRegressor(\n boosting_type='gbdt',\n objective='regression',\n n_estimators=nround,\n learning_rate=0.08,\n num_leaves=num_leaves[iter],\n max_bin=max_bin[iter],\n max_depth=max_depth[iter],\n random_state=random_seed[iter],\n subsample_for_bin=subsample_for_bin[iter],\n feature_fraction=feature_fraction[iter],\n bagging_fraction=bagging_fraction[iter],\n bagging_freq=bagging_freq[iter],\n min_child_weight=min_child_weight[iter],\n lambda_l1=lambd1[iter],\n lambda_l2=lambd2[iter],\n metric=None,\n n_jobs=30,\n device='gpu'\n )\n clf.fit(X_train_r.iloc[trn_idx], y_train.iloc[trn_idx], eval_set=[(X_train_r.iloc[trn_idx], y_train.iloc[trn_idx]), (X_train_r.iloc[val_idx], y_train.iloc[val_idx])],eval_metric='mape',verbose=100, early_stopping_rounds=200)\n\n print('predicting')\n val_predict = clf.predict(X_train_r.iloc[val_idx], num_iteration=clf.best_iteration_)\n test_predict[:,i] = clf.predict(X_test_r, num_iteration=clf.best_iteration_)\n k_fold_mape.append(MAPE(y_train.iloc[val_idx],val_predict))\n print(\"kfold_{}_mape_score:{} \".format(i, k_fold_mape[i]))\n\n print('Train set kfold {} mean mape:'.format(i), np.mean(k_fold_mape))\n #display_importances(feature_importance_df)\n test_head['result'] = np.mean(test_predict,axis=1)\n test_head['id'] = test_head['order_id']\n test_head[['id','result']].to_csv('random_result/submission_{}.csv'.format(iter),index=False)\n del X_train_r,X_test_r\n gc.collect()\n#merge\ncount = 0\nresult = 1\nfor name in os.listdir('random_result/'):\n tmp = pd.read_csv('random_result/'+name)\n if count == 0:\n result = tmp[['id']]\n tmp = tmp.rename(columns={'result':'result{}'.format(count)})\n result = result.merge(tmp,on='id',how='left')\n count += 1\nresult['result'] = result.drop('id',axis=1).sum(axis=1)\nresult['result'] = result['result']/count\nresult[['id','result']].to_csv('submission_merge.csv',index=False)\n"
}
] | 44 |
txedo/isi-practicas-5 | https://github.com/txedo/isi-practicas-5 | 9dd4c8e87d7e7b95aca5ddd7c1087b3661fe97a2 | dd36cbe5459c7f29db2f24eacb7d78315f4ef63c | 8428faa5d708945dcc810d2b81248626690cda3f | refs/heads/master | 2016-09-06T05:35:29.202891 | 2015-03-29T16:01:32 | 2015-03-29T16:01:32 | 33,072,738 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.795918345451355,
"alphanum_fraction": 0.795918345451355,
"avg_line_length": 19.77777862548828,
"blob_id": "54aa4e0728fffe0d900942573a565b51851dcf7f",
"content_id": "d0e3b35ab5522d71c83bb7a07949b8846d2611a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 9,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/EventosCanales/MensajeMapaListener.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package comunicaciones.EventosCanales;\r\n\r\nimport java.util.EventListener;\r\n\r\npublic interface MensajeMapaListener extends EventListener {\r\n\t\r\n\tpublic void MensajeMapa(MensajeMapaEvent evt);\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.712455153465271,
"alphanum_fraction": 0.7206560969352722,
"avg_line_length": 46.36585235595703,
"blob_id": "8f5d0e85bd2adf6b4ef88b54835748148d2d4e9c",
"content_id": "aa5a681bdcd3bdf07b1a54b2d40c43580e623445",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1951,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 41,
"path": "/mya-ia/kohonen/main.py",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "import psyco\npsyco.full()\n\nfrom kohonen import *\nfrom fileHandler import *\nfrom abalone import *\n\nFACTOR_APRENDIZAJE_INICIAL = 0.9\nDIM = 4\n\nif __name__==\"__main__\":\n mapa = Kohonen()\n fileHandler = File_Handler()\n # Inicializamos cada neurona con el vector definido en el fichero\n datosPesosIniciales = fileHandler.read_lines_file(\"./ficheroPesos.txt\")\n mapa.inicializarNeuronas(datosPesosIniciales) \n # Establecemos el radio inicial a DIM-1, para afectar a todo el mapa\n radio = DIM -1 \n # Establecemos el numero de iteracion\n iteracion = 0\n # Establecemos el factor de aprendizaje inicial a 0.9, que es el maximo\n factorAprendizaje = FACTOR_APRENDIZAJE_INICIAL\n # Leemos todas las lineas del fichero de datos de los abalones y vamos reorganizando el mapa de Kohonen\n # Cada linea representa un abalone con los datos normalizados y separados los valores de los atributos por espacios\n datosAbalone = fileHandler.read_lines_file(\"./abalonesNormalizados_EntradaKohonen.txt\")\n # Evaluamos cada vector de entrada (abalone) con el mapa\n for d in datosAbalone:\n valoresAtributos = d.split()\n abalone = Abalone(float(valoresAtributos[0]), float(valoresAtributos[1]), float(valoresAtributos[2]), float(valoresAtributos[3]), float(valoresAtributos[4]), float(valoresAtributos[5]), float(valoresAtributos[6]))\n # Se evalua la similitud, obteniendo una neurona ganadora\n mapa.evaluarSimilitud(abalone)\n # Se cambian los pesos de la vecindad de la neurona ganadora\n mapa.variarPesoNeuronas(radio, abalone, factorAprendizaje)\n # Se varia el radio de vecindad y el factor de aprendizaje para la siguiente iteracion\n radio = mapa.variarGradoVecindad(iteracion)\n factorAprendizaje = mapa.variarFactorAprendizaje(iteracion)\n # Se aumenta la iteracion\n iteracion = iteracion + 1\n\n for i in mapa.neuronas:\n print (str(i))\n \n"
},
{
"alpha_fraction": 0.6609907150268555,
"alphanum_fraction": 0.7012383937835693,
"avg_line_length": 32,
"blob_id": "b7ec0116e6927966b420b6ea6c63c8e85e52bef8",
"content_id": "a9be124d8bf446c72d684a54ef3f7b2c7edc2b58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 646,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 19,
"path": "/multimedia/faq-manager/install.sql",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "DROP DATABASE IF EXISTS mhrv;\r\n\r\nCREATE DATABASE mhrv CHARACTER SET utf8 COLLATE utf8_bin;\r\nUSE mhrv;\r\n\r\nCREATE TABLE H10_12_categoria_faq (\r\n id INT NOT NULL AUTO_INCREMENT,\r\n categoria VARCHAR(255) UNIQUE NOT NULL default '', \r\n PRIMARY KEY (id)\r\n) TYPE=InnoDB AUTO_INCREMENT=1;\r\n\r\nCREATE TABLE H10_12_faq (\r\n id INT NOT NULL AUTO_INCREMENT,\r\n pregunta VARCHAR(255) UNIQUE NOT NULL default '', \r\n respuesta VARCHAR(4096) NOT NULL default '',\r\n idCategoria INT,\r\n PRIMARY KEY (id),\r\n FOREIGN KEY (idCategoria) REFERENCES H10_12_categoria_faq (id) ON DELETE SET NULL ON UPDATE CASCADE\r\n) TYPE=InnoDB AUTO_INCREMENT=1;\r\n"
},
{
"alpha_fraction": 0.6695652008056641,
"alphanum_fraction": 0.6756521463394165,
"avg_line_length": 56.04999923706055,
"blob_id": "4d716b45b3df52a7969c4358a1e3f7a5855b5dd4",
"content_id": "e08cf46de343b76039d6fe150aca87d8e9acbbc3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1150,
"license_type": "no_license",
"max_line_length": 311,
"num_lines": 20,
"path": "/mya-ia/kohonen/abalone.py",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "from math import *\nimport psyco\npsyco.full()\n\n# Quitamos el sexo para evitar mucha distancia entre vectores. Se tendra en cuenta a la hora de sacar el volumen \nclass Abalone:\n def __init__(self, length, diameter, height, whole_weight, shucked_weight, viscera_weight, shell_weight):\n self.length = length\n self.diameter = diameter\n self.height = height\n self.whole_weight = whole_weight\n self.shucked_weight = shucked_weight\n self.viscera_weight = viscera_weight\n self.shell_weight = shell_weight\n \n def obtenerSimilitud(self, other):\n return sqrt(pow(self.length-other.length, 2)+pow(self.diameter-other.diameter, 2)+pow(self.height-other.height, 2)+pow(self.whole_weight-other.whole_weight, 2)+pow(self.shucked_weight-other.shucked_weight, 2)+pow(self.viscera_weight-other.viscera_weight, 2)+pow(self.shell_weight-other.shell_weight, 2))\n\n def __str__ (self):\n return str(self.length) + \" \" + str(self.diameter) + \" \" + str(self.height) + \" \" + str(self.whole_weight) + \" \" + str(self.shucked_weight) + \" \" + str(self.viscera_weight) + \" \" + str(self.shell_weight)\n \n"
},
{
"alpha_fraction": 0.6990646719932556,
"alphanum_fraction": 0.7230581641197205,
"avg_line_length": 25.728260040283203,
"blob_id": "f06b8e8389632ce0d7d6a105b12b1cf792b9d11e",
"content_id": "e84f5aad626715ad5058e8810163fdfcd2a25edd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2459,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 92,
"path": "/sistemas-aprendizaje/aplicacionMineria/src/interfaz/JResultados.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package interfaz;\nimport com.rapidminer.gui.processeditor.ResultDisplay;\nimport com.rapidminer.operator.IOContainer;\n\nimport java.awt.BorderLayout;\nimport java.awt.event.ActionEvent;\nimport java.awt.event.ActionListener;\nimport javax.swing.JButton;\nimport javax.swing.JFrame;\nimport javax.swing.JPanel;\n\nimport javax.swing.WindowConstants;\nimport javax.swing.SwingUtilities;\n\n\n/**\n* This code was edited or generated using CloudGarden's Jigloo\n* SWT/Swing GUI Builder, which is free for non-commercial\n* use. If Jigloo is being used commercially (ie, by a corporation,\n* company or business for any purpose whatever) then you\n* should purchase a license for each developer using Jigloo.\n* Please visit www.cloudgarden.com for details.\n* Use of Jigloo implies acceptance of these licensing terms.\n* A COMMERCIAL LICENSE HAS NOT BEEN PURCHASED FOR\n* THIS MACHINE, SO JIGLOO OR THIS CODE CANNOT BE USED\n* LEGALLY FOR ANY CORPORATE OR COMMERCIAL PURPOSE.\n*/\npublic class JResultados extends javax.swing.JDialog {\n\n\t{\n\t\t//Set Look & Feel\n\t\ttry {\n\t\t\tjavax.swing.UIManager.setLookAndFeel(\"com.sun.java.swing.plaf.windows.WindowsLookAndFeel\");\n\t\t} catch(Exception e) {\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\n\tprivate JPanel jPanel1;\n\tprivate ResultDisplay resultDisplay1;\n\tprivate JButton jButton1;\n\t\n\tpublic JResultados() {\n\t\tsuper();\n\t\tinitGUI();\n\t}\n\t\n\tprivate void initGUI() {\n\t\ttry {\n\t\t\tsetDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);\n\t\t\tgetContentPane().setLayout(null);\n\t\t\t{\n\t\t\t\tjPanel1 = new JPanel();\n\t\t\t\tgetContentPane().add(jPanel1, \"Center\");\n\t\t\t\tjPanel1.setLayout(null);\n\t\t\t\tjPanel1.setBounds(0, 0, 1233, 894);\n\t\t\t\t{\n\t\t\t\t\tresultDisplay1 = new ResultDisplay();\n\t\t\t\t\tjPanel1.add(resultDisplay1);\n\t\t\t\t\tresultDisplay1.setBounds(10, 11, 1170, 717);\n\t\t\t\t}\n\t\t\t\t{\n\t\t\t\t\tjButton1 = new JButton();\n\t\t\t\t\tjPanel1.add(jButton1);\n\t\t\t\t\tjButton1.setText(\"Cerrar\");\n\t\t\t\t\tjButton1.setBounds(1105, 739, 75, 23);\n\t\t\t\t\tjButton1.addActionListener(new ActionListener() {\n\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\tjButton1ActionPerformed(evt);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t\t\n\t\t\tthis.setPreferredSize(new java.awt.Dimension(1200, 810));\n\t\t\tthis.setTitle(\"Resultados\");\n\t\t\tpack();\n\t\t} catch (Exception e) {\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\t\n\tpublic void setResultados(IOContainer contenedor) {\n\t\tresultDisplay1.setData(contenedor, null);\n\t\tresultDisplay1.setVisible(true);\n\t}\n\t\n\tprivate void jButton1ActionPerformed(ActionEvent evt) {\n\t\tthis.dispose();\n\t}\n\n}\n"
},
{
"alpha_fraction": 0.6697761416435242,
"alphanum_fraction": 0.7052238583564758,
"avg_line_length": 17.214284896850586,
"blob_id": "97919a8c7701ffc91a31590c75615672e974651a",
"content_id": "983438284ad88e4e0cfa1742ee71e8e597d2ce83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 28,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/ClienteJSDT.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package comunicaciones;\r\nimport com.sun.media.jsdt.*;\r\n\r\nimport dominio.conocimiento.Roles;\r\n\r\npublic class ClienteJSDT implements Client {\r\n\r\n\tprivate static final long serialVersionUID = 4706016223824672736L;\r\n\tprivate String nombre;\r\n\tprivate Roles rol;\r\n\r\n\tpublic ClienteJSDT(String nombre, Roles rol) {\r\n\t\tthis.nombre = nombre;\r\n\t\tthis.rol = rol;\r\n\t}\r\n\r\n\tpublic Object authenticate(AuthenticationInfo info) {\r\n\t\treturn null;\r\n\t}\r\n\r\n\tpublic String getName() {\r\n\t\treturn nombre;\r\n\t}\r\n\t\r\n\tpublic Roles getRol() {\r\n\t\treturn rol;\r\n\t}\r\n}"
},
{
"alpha_fraction": 0.6944444179534912,
"alphanum_fraction": 0.7147147059440613,
"avg_line_length": 24.078432083129883,
"blob_id": "d74443bc6fd567ded311f6164571cfb088f98ef4",
"content_id": "f8c99c9dd7007bc12b166d8cfe590f31dbaac033",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1332,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 51,
"path": "/sistemas-colaboracion/plancode/app/src/presentacion/auxiliares/panelConImagenFondo.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package presentacion.auxiliares;\r\n\r\nimport java.awt.Dimension;\r\nimport java.awt.Graphics;\r\nimport java.awt.Graphics2D;\r\nimport java.awt.Rectangle;\r\nimport java.awt.TexturePaint;\r\nimport java.net.URL;\r\n\r\nimport javax.swing.ImageIcon;\r\nimport javax.swing.JPanel;\r\n\r\npublic class panelConImagenFondo extends JPanel{\r\n\t\r\n\t/**\r\n\t * \r\n\t */\r\n\tprivate static final long serialVersionUID = -2643504940011956438L;\r\n\tprivate TexturePaint fondo;\r\n\t \r\n\tpublic panelConImagenFondo() {\r\n\t\tsuper();\r\n\t}\r\n\t\t\r\n\tpublic void setMapaFondoRecibido(ImageIcon fondo) {\r\n\t\t// Se carga el mapa recibido\r\n\t\tthis.fondo = handlerImagenFondoPanel.cargaImageIcon(fondo, this);\r\n\t}\r\n\t\r\n\tpublic void setMapaFondoLocal(URL urlMapa) {\r\n\t\t// Se crea el mapa a partir de la ruta local y se pone como fondo\r\n\t\thandlerImagenFondoPanel.creaImageURL(urlMapa, this);\r\n\t\tthis.fondo = handlerImagenFondoPanel.cargaTextura(this);\r\n\t}\r\n\t\r\n\tpublic void paintComponent(Graphics g)\r\n\t{\r\n\t\tsuper.paintComponent(g);\r\n\t\tGraphics2D g2d = (Graphics2D)g;\r\n\t\tDimension d = getSize();\r\n\t\tg2d.setPaint(fondo);\r\n\t\tg2d.fill(new Rectangle(0,0,d.width,d.height));\r\n\t}\r\n\t\r\n\tprotected void finalize() throws Throwable {\r\n\t\tfondo = null;\r\n\t\tSystem.gc(); // lanza el colector de basura\r\n\t\tSystem.runFinalization(); // para ejecutar los \"finalize\" del resto de clases\r\n\t\tsuper.finalize();\r\n\t}\r\n}\r\n\r\n"
},
{
"alpha_fraction": 0.6159999966621399,
"alphanum_fraction": 0.6171428561210632,
"avg_line_length": 21.078947067260742,
"blob_id": "af75b70ba4dee9b4025f5958194d74f7827c5e49",
"content_id": "a63b1f59ee34f9a5cfc965daa982f0bb4149d9c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 876,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 38,
"path": "/multimedia/faq-manager/broker.php",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "WINDOWS-1250",
"text": "<?php\r\nfunction conectar_bd ()\r\n{\r\n\tinclude 'config.php';\r\n\t$id = mysql_connect($server, $login, $password)\r\n\t\t\tOR die (\"Imposible conectar al servidor MySQL\");\r\n\t$descriptor = mysql_select_db ($database, $id)\r\n\t\t\t\t\tOR die (\"&estado=error&mensaje=Imposible abrir el esquema de la base de datos\");\r\n\treturn $id;\r\n}\r\n\r\nfunction ejecutar_consulta($sql, $id)\r\n{\r\n\t$result = mysql_query ($sql, $id)\r\n\t\t\tOR die (\"&estado=error&mensaje=Error al realizar la consulta: \" . mysql_error());\r\n\treturn $result;\r\n}\r\n\r\nfunction cerrar_bd ($id)\r\n{\r\n\tmysql_close ($id);\r\n}\r\n\r\n\r\n\r\n/*\r\ninclude 'broker.php':\r\n$id = conectar ();\r\n$sql = \"la consulta que queramos realizar\";\r\n$usuario_consulta = mysql_query ($sql, $id)\r\n\t\t\tOR die (Header (\"Location: index.php?mod=error&error=1\")\r\n\t\t\tAND exit);\r\n\t\t\t\r\n-----------------------\r\nCerrar la conexión a la base de datos con:\r\nmysql_close($id);\r\n*/\r\n?>"
},
{
"alpha_fraction": 0.6564195156097412,
"alphanum_fraction": 0.690777599811554,
"avg_line_length": 16.433332443237305,
"blob_id": "d989cf308ed7e0277de06114309521a66df85013",
"content_id": "65b546bea170bdadce82e59e9e741e6cc99e2794",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 30,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/EventosCanales/MensajeRolEvent.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package comunicaciones.EventosCanales;\r\n\r\nimport java.util.EventObject;\r\n\r\nimport dominio.conocimiento.Roles;\r\n\r\npublic class MensajeRolEvent extends EventObject {\r\n\r\n\t/**\r\n\t * \r\n\t */\r\n\tprivate static final long serialVersionUID = 6343783179343926816L;\r\n\r\n\tprivate String nombre;\r\n\tprivate Roles rol;\r\n\r\n\tpublic MensajeRolEvent(Object obj, String nombre, String rol) {\r\n\t\tsuper(obj);\r\n\t\tthis.nombre = nombre;\r\n\t\tthis.rol = Roles.valueOf(rol);\r\n\t}\r\n\r\n\tpublic String getNombre() {\r\n\t\treturn nombre;\r\n\t}\r\n\r\n\tpublic Roles getRol() {\r\n\t\treturn rol;\r\n\t}\r\n}\r\n"
},
{
"alpha_fraction": 0.7114731669425964,
"alphanum_fraction": 0.7264086604118347,
"avg_line_length": 26.326923370361328,
"blob_id": "837ae6d9f6e013096428c6e18c26c425bfa93b55",
"content_id": "2e66f49e146e9a237d80ebd50bfc3061a0e25d91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1473,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 52,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/ConsumidorCanalTrazos.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package comunicaciones;\r\n\r\nimport java.io.StreamCorruptedException;\r\n\r\nimport javax.swing.event.EventListenerList;\r\n\r\nimport presentacion.auxiliares.Dialogos;\r\n\r\n\r\nimport com.sun.media.jsdt.ChannelConsumer;\r\nimport com.sun.media.jsdt.Data;\r\nimport comunicaciones.EventosCanales.MensajeTrazoEvent;\r\nimport comunicaciones.EventosCanales.MensajeTrazoListener;\r\n\r\npublic class ConsumidorCanalTrazos implements ChannelConsumer {\r\n\r\n\t/**\r\n\t * \r\n\t */\r\n\tprivate static final long serialVersionUID = 3012373241632069873L;\r\n\tprivate EventListenerList listenerList;\r\n\t\r\n\tpublic ConsumidorCanalTrazos () {\r\n\t\tlistenerList = new EventListenerList();\r\n\t}\r\n\t\r\n\t@Override\r\n\tpublic void dataReceived(Data d) {\r\n\t\tObject[] listeners;\r\n\t\tint i;\r\n\t\t\r\n\t\t// Notificamos que se ha recibido un mensaje para dibujar trazos\r\n\t\tlisteners = listenerList.getListenerList();\r\n\t\tfor(i = 0; i < listeners.length; i += 2) {\r\n\t\t\tif(listeners[i] == MensajeTrazoListener.class) {\r\n\t\t\t\ttry {\r\n\t\t\t\t\t((MensajeTrazoListener)listeners[i + 1]).MensajeTrazo(new MensajeTrazoEvent(this, d.getSenderName(), d.getDataAsObject()));\r\n\t\t\t\t} catch (StreamCorruptedException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", e.getMessage());\r\n\t\t\t\t} catch (ClassNotFoundException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", e.getMessage());\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tpublic void addMensajeTrazoListener(MensajeTrazoListener listener) {\r\n\t\tlistenerList.add(MensajeTrazoListener.class, listener);\r\n\t}\r\n\t\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6637930870056152,
"avg_line_length": 14.571428298950195,
"blob_id": "5837673d72b169da0fd36dbf82e242178b107396",
"content_id": "ab47d5d36ebd7259a51b75283f47b7b420ca3aa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 464,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 28,
"path": "/sistemas-colaboracion/plancode/app/src/dominio/conocimiento/Usuario.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package dominio.conocimiento;\r\n\r\nimport java.awt.Color;\r\nimport java.io.Serializable;\r\n\r\n\r\npublic class Usuario implements Serializable {\r\n\t/**\r\n\t * \r\n\t */\r\n\tprivate static final long serialVersionUID = -408343270653594450L;\r\n\tprivate Roles rol;\r\n\tprivate Color color;\r\n\r\n\tpublic Usuario(Roles rol, Color color) {\r\n\t\tthis.rol = rol;\r\n\t\tthis.color = color;\r\n\t}\r\n\r\n\tpublic Roles getRol() {\r\n\t\treturn rol;\r\n\t}\r\n\r\n\tpublic Color getColor() {\r\n\t\treturn color;\r\n\t}\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.6958855390548706,
"avg_line_length": 29.16216278076172,
"blob_id": "4253ce0d169a8e23b1a64b636c26536dfc704c54",
"content_id": "3243002ccb88b923defee64878679eb6256a23c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 1133,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 37,
"path": "/ari/search-engine/readme.txt",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "Software necesario que debe ser instalado en el sistema sobre el que\nse ejecutará la aplicación:\n· python2.5 o superior\n· gtk\n· pygtk\n· mysql\n· python-mysqldb\n· psyco\n\nInicialmente se debe crear la base de datos que utilizará el sistema\ndocumental. En la distribución del software se ofrece un fichero\n\"install\" bajo el directorio raíz, que contiene las\nsentencias necesarias para crear la base de datos y sus tablas\nmediante el intérprete de MySQL. Para acceder al intérprete de\nsu servidor MySQL escriba la siguiente sentencia en un terminal:\nmysql -u root -p\n\nLlegados a este punto ya tiene su sistema listo para ser utilizado. Si\ntiene alguna duda sobre su uso consulte el Manual de Usuario\nsuministrado con la documentación del sistema.\n\n\npyDMS v1.0 (Search Engine + Index Engine)\n-----------------------------------------\n./searchEngineGUI.py\n\n\nIndex Engine (standalone)\n-------------------------\nInterfaz gráfica de usuario\n---------------------------\n./indexEngineGUI.py\n\nInterfaz basada en texto\n------------------------\n./indexEngineCMD.py ([-f | --file] <file_path> | [-d | --directory]\n<directory_path>)\n\n\n"
},
{
"alpha_fraction": 0.7314386963844299,
"alphanum_fraction": 0.7392539978027344,
"avg_line_length": 36.5616455078125,
"blob_id": "bce896f5ce9d8dd52e5603dd1adc035db8ae7da3",
"content_id": "f33fb043470c126ad0c5f57dbd60e1bc0ac13403",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2821,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 73,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/ConsumidorCanalGestionRol.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-1",
"text": "package comunicaciones;\r\n\r\nimport javax.swing.event.EventListenerList;\r\n\r\nimport presentacion.auxiliares.Dialogos;\r\n\r\nimport com.sun.media.jsdt.ChannelConsumer;\r\nimport com.sun.media.jsdt.ConnectionException;\r\nimport com.sun.media.jsdt.Data;\r\nimport com.sun.media.jsdt.InvalidClientException;\r\nimport com.sun.media.jsdt.NoSuchChannelException;\r\nimport com.sun.media.jsdt.NoSuchClientException;\r\nimport com.sun.media.jsdt.NoSuchConsumerException;\r\nimport com.sun.media.jsdt.NoSuchSessionException;\r\nimport com.sun.media.jsdt.PermissionDeniedException;\r\nimport com.sun.media.jsdt.TimedOutException;\r\n\r\nimport comunicaciones.EventosCanales.MensajeRolEvent;\r\nimport comunicaciones.EventosCanales.MensajeRolListener;\r\nimport excepciones.NoSlotsDisponiblesException;\r\n\r\n\r\npublic class ConsumidorCanalGestionRol implements ChannelConsumer {\r\n\t\r\n\t/**\r\n\t * \r\n\t */\r\n\tprivate static final long serialVersionUID = -2302586415061386684L;\r\n\tprivate EventListenerList listenerList;\r\n\t\r\n\tpublic ConsumidorCanalGestionRol () {\r\n\t\tlistenerList = new EventListenerList();\r\n\t}\r\n\t\r\n\t@Override\r\n\tpublic void dataReceived(Data d) {\r\n\t\tObject[] listeners;\r\n\t\tint i;\r\n\t\t\r\n\t\t// Notificamos que se ha recibido un mensaje de gestión\r\n\t\tlisteners = listenerList.getListenerList();\r\n\t\tfor(i = 0; i < listeners.length; i += 2) {\r\n\t\t\tif(listeners[i] == MensajeRolListener.class) {\r\n\t\t\t\ttry {\r\n\t\t\t\t\t((MensajeRolListener)listeners[i + 1]).MensajeRolRecibido(new MensajeRolEvent(this, d.getSenderName(), d.getDataAsString()));\r\n\t\t\t\t} catch (ConnectionException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"No se puede establecer una conexión\");\r\n\t\t\t\t} catch (InvalidClientException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"Cliente de destino inválido\");\r\n\t\t\t\t} catch (NoSuchChannelException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"No existe el canal\");\r\n\t\t\t\t} catch (NoSuchClientException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"No se encuentra el cliente de destino\");\r\n\t\t\t\t} catch (NoSuchSessionException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"No se encuentra la sesión\");\r\n\t\t\t\t} catch (PermissionDeniedException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"Permiso denegado\");\r\n\t\t\t\t} catch (TimedOutException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"Tiempo de espera agotado\");\r\n\t\t\t\t} catch (NoSlotsDisponiblesException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"No se puede iniciar sesión porque el sistema ha alcanzado su capacidad máxima\");\r\n\t\t\t\t} catch (NoSuchConsumerException e) {\r\n\t\t\t\t\tDialogos.mostrarDialogoError(null, \"Error\", \"No existe el consumidor\");\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tpublic void addMensajeRolRecibidoListener(MensajeRolListener listener) {\r\n\t\tlistenerList.add(MensajeRolListener.class, listener);\r\n\t}\r\n\t\r\n}\r\n"
},
{
"alpha_fraction": 0.6166139841079712,
"alphanum_fraction": 0.6234632730484009,
"avg_line_length": 28.126983642578125,
"blob_id": "649c33168b06bbdd42a58c1be67c9257a5803650",
"content_id": "507674c71c17ca595750611873b2fc91689a26fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 5708,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 189,
"path": "/sistemas-colaboracion/plancode/app/src/presentacion/CanvasPaint.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-1",
"text": "package presentacion;\r\n\r\n/**\r\n * REFERENCIA : http://www.chuidiang.com/java/codigo_descargable/appletpaint.php\r\n */\r\n\r\nimport java.awt.BasicStroke;\r\nimport java.awt.Color;\r\nimport java.awt.Graphics;\r\nimport java.awt.Graphics2D;\r\nimport java.awt.geom.Point2D;\r\nimport java.util.LinkedList;\r\nimport presentacion.auxiliares.GestorTrazos;\r\nimport presentacion.auxiliares.ListenerArrastre;\r\nimport presentacion.auxiliares.ListenerPinchar;\r\n\r\nimport javax.swing.JPanel;\r\n\r\nimport dominio.conocimiento.InfoTrazo;\r\nimport dominio.conocimiento.Trazo;\r\nimport dominio.control.ControladorPrincipal;\r\n\r\n\r\n/**\r\n * Clase que hereda de JPanel y que permite dibujar trazos, actuando como un canvas\r\n * \r\n */\r\npublic class CanvasPaint extends JPanel\r\n{\r\n\r\n private static final long serialVersionUID = 3978706198935583032L;\r\n\r\n /** Objeto para gestionar lo relacionado con trazos (dibujar/eliminar) */\r\n private GestorTrazos gestorT = null;\r\n\r\n /** Objeto manejador de los arrastres de ratón (para dibujar el trazo) */\r\n private ListenerArrastre listenerA = null;\r\n \r\n /** Objeto manejador para detectar donde se pincha con el ratón (para eliminar el trazo) */\r\n private ListenerPinchar listenerP = null;\r\n\r\n /** Lista de trazos dibujados */\r\n private LinkedList<Trazo> trazos = new LinkedList<Trazo>();\r\n\r\n private boolean modoEliminar = false;\r\n private boolean listenersEstablecidos = false;\r\n \r\n \r\n // Se inicializa el gestor de trazos y los listener para arrastrar el ratón y pinchar con el ratón\r\n public CanvasPaint(ControladorPrincipal c)\r\n {\r\n gestorT = new GestorTrazos(trazos, this, c);\r\n listenerA = new ListenerArrastre(gestorT);\r\n listenerP = new ListenerPinchar(gestorT);\r\n // Se pasa el propio canvas, para poder cambiar la imagen del cursor cuando éste entra en ese área\r\n listenerP.setComponent(this);\r\n }\r\n \r\n public void noAction() {\r\n \t// Se eliminan los listeners del canvas\r\n removeMouseMotionListener(listenerA);\r\n removeMouseListener(listenerP);\r\n listenersEstablecidos = false;\r\n }\r\n /** \r\n * Pone el modo de dibujo de trazos.\r\n * En este caso, el listener de \"pinchar\" no tiene ninguna acción. \r\n */\r\n public void modoPintarTrazo()\r\n {\r\n \tif (!listenersEstablecidos) {\r\n \t\t // Se añaden los listeners al canvas\r\n addMouseMotionListener(listenerA);\r\n addMouseListener(listenerP);\r\n listenersEstablecidos = true;\r\n \t}\r\n \t// El listener de arrastre recibe el gestor de trazos para ir dibujando los trazos\r\n listenerA.setAccion(gestorT);\r\n listenerP.setAccion(null);\r\n\r\n }\r\n \r\n /**\r\n * Pone el modo para eliminar trazos. En este caso, el listener de arrastre no tiene acción pero\r\n * sí tiene el listener de \"pinchar\", ya que se elimina un trazo al pinchar cerca de él. \r\n */\r\n public void modoEliminarTrazo() {\r\n \tif (!listenersEstablecidos) {\r\n \t\t // Se añaden los listeners al canvas\r\n addMouseMotionListener(listenerA);\r\n addMouseListener(listenerP);\r\n listenersEstablecidos = true;\r\n \t}\r\n \tlistenerA.setAccion(null);\r\n \tlistenerP.setAccion(gestorT);\r\n \tmodoEliminar = true;\r\n }\r\n\r\n public void update(Graphics g)\r\n {\r\n \t// Si es el modo eliminar trazos, se llama al padre para borrar el canvas completo.\r\n \t// Luego se dibujan los trazos que sean necesarios\r\n \tif (modoEliminar) { \t\r\n \t\tsuper.update(g);\r\n \t}\r\n \tpaint(g);\r\n }\r\n\r\n /**\r\n * Dibuja los trazos en este componente\r\n */\r\n public void paint(Graphics g)\r\n {\r\n \tsuper.paint(g);\r\n for (int i = 0; i < trazos.size(); i++)\r\n {\r\n dibujaTrazo(trazos.get(i), g);\r\n }\r\n }\r\n\r\n public void clear() {\r\n \ttrazos.clear();\r\n \tthis.repaint();\r\n }\r\n \r\n /**\r\n * Dibuja un trazo en este componente.\r\n */\r\n private void dibujaTrazo(Trazo trazo, Graphics g)\r\n { \t\r\n g.setColor(trazo.getColor());\r\n // Aumentamos el grosor del trazo, para que se vea mejor sobre el mapa\r\n ((Graphics2D)g).setStroke( new BasicStroke(4) ); \r\n Point2D p0 = trazo.getPunto(0);\r\n for (int i = 0; i < trazo.getNumeroPuntos() - 1; i++)\r\n {\r\n Point2D p1 = trazo.getPunto(i + 1);\r\n g.drawLine((int) p0.getX(), (int) p0.getY(), (int) p1.getX(),\r\n (int) p1.getY());\r\n p0 = p1;\r\n }\r\n }\r\n\r\n /**\r\n * Cambia el color de dibujo del trazo.\r\n */\r\n public void setColorActual(Color colorActual)\r\n {\r\n gestorT.setColorActual(colorActual);\r\n }\r\n\r\n\tpublic LinkedList<Trazo> getTrazos() {\r\n\t\treturn trazos;\r\n\t}\r\n\r\n\tpublic void setTrazos(InfoTrazo info) {\r\n\t\t\r\n\t\t// Se limpia el canvas si se recibe el mensaje correspondiente\r\n\t\tif (info.isClear()) {\r\n\t\t\tthis.clear();\r\n\t\t}\r\n\t\t\r\n\t\t// Si se acaba de conectar, se establecen los trazos del resto de clientes\r\n\t\telse if (info.isRecienConectado()) {\r\n\t\t\tthis.trazos = (LinkedList<Trazo>)info.getTrazos().clone();\r\n\t\t\tgestorT.setTrazos(trazos);\r\n\t\t}\r\n\t\t\r\n\t\t// Si se está dibujando, se añade el trazo recibido y se borran aquellos trazos no definitivos\r\n\t\telse if (info.isDibujando()) {\r\n\t\t\t\t// Copia auxiliar de trazos\r\n\t\t\t\tLinkedList<Trazo> aux = new LinkedList<Trazo>();\r\n\t\t\t\taux = (LinkedList<Trazo>)this.trazos.clone();\r\n\t\t\t\tfor (int i=0; i<aux.size(); i++) {\r\n\t\t\t\t\tif (!aux.get(i).isTerminado()) {\r\n\t\t\t\t\t\tthis.trazos.remove(aux.get(i));\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\tthis.trazos.add(info.getTrazo());\r\n\t\t\t\r\n\t\t}\r\n\t\t// Se elimina el trazo recibido\r\n\t\telse {\r\n\t\t\tthis.trazos.remove(info.getTrazo());\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t}\r\n}\r\n"
},
{
"alpha_fraction": 0.6526671648025513,
"alphanum_fraction": 0.6688389778137207,
"avg_line_length": 45.022220611572266,
"blob_id": "248c854504c193499646b838469fad85c2f30f5e",
"content_id": "53fcd81caabb9def2a6b584ce932a62d45b3b7b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4143,
"license_type": "no_license",
"max_line_length": 225,
"num_lines": 90,
"path": "/mya-ia/kohonen/kohonen.py",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "import psyco\nfrom abalone import *\npsyco.full()\n\nDIM = 4\nINFINITO = 999999\nFACTOR_APRENDIZAJE_INICIAL = 0.9\nFACTOR_ESCALA = 510\n\nclass Kohonen:\n\n def __init__(self):\n self.indiceWinner = None\n self.neuronas = []\n\t\t\n\t# \"datos\" son las lineas del fichero de texto, donde cada linea representa los datos de cada neurona (los 10 atributos)\n def inicializarNeuronas(self, datos):\n for i in datos:\n # Separamos los atributos por el espacio, para poder tomar los numeros decimales\n valoresAtributos = i.split()\n\t\t\t# Se van tomando los atributos de cada linea del fichero. Entre un atributo y otro del fichero hay un espacio\n neurona = Abalone(float(valoresAtributos[0]), float(valoresAtributos[1]), float(valoresAtributos[2]), float(valoresAtributos[3]), float(valoresAtributos[4]), float(valoresAtributos[5]), float(valoresAtributos[6]))\n self.neuronas.append(neurona)\n\t\t\n\t\t\n\t# \"abalone\" representa cada uno de los vectores de entrada al mapa de kohonen\n def evaluarSimilitud(self, abalone):\n\t distancia = INFINITO\n\t distanciaAux = 0\n\t for i in self.neuronas:\n\t distanciaAux = abalone.obtenerSimilitud(i)\n\t if distancia > distanciaAux:\n\t distancia = distanciaAux\n\t self.indiceWinner = self.neuronas.index(i)\n\t \n\t \n def variarGradoVecindad(self, iteracion):\n\t # Empezamos con un radio alto y terminamos con un radio bajo\n radio = 0\n if iteracion < 1000: radio = 3\n elif iteracion > 1000 and iteracion < 2000: radio = 2\n elif iteracion > 2000 and iteracion < 3000: radio = 1\n elif iteracion > 3000: radio = 0\n return radio\n\t \n\t \n def calcularVecindad (self, radio):\n vecinas = []\n if radio == 0:\n vecinas.append(self.neuronas[self.indiceWinner])\n elif radio == DIM-1:\n # Con radio DIM - 1, las vecinas son todas las neuronas. Se hace una copia de la lista para evitar problemas de referencias\n vecinas = self.neuronas[:]\n else:\n fila = self.indiceWinner / DIM\n columna = self.indiceWinner % DIM\n limiteIzquierdo = columna - radio\n if limiteIzquierdo < 0: limiteIzquierdo = 0\n limiteDerecho = columna + radio\n if limiteDerecho > DIM-1: limiteDerecho = DIM-1 \n limiteSuperior = fila - radio\n if limiteSuperior < 0: limiteSuperior = 0\n limiteInferior = fila+radio\n if limiteInferior > DIM-1: limiteInferior = DIM-1\n for i in range(limiteSuperior, limiteInferior+1):\n vecinas.extend(self.neuronas[limiteIzquierdo+i*DIM:limiteDerecho+i*DIM])\n return vecinas\n\t\n\t\n\t# El factor de aprendizaje decrece mas rapido al principio y se estabiliza al final, quedando entre 0 y 0.1\n def variarFactorAprendizaje(self, iteracion):\n\t return FACTOR_APRENDIZAJE_INICIAL/(1+iteracion/FACTOR_ESCALA)\n\t\n\n\t# Se varia el peso de las neuronas de la vecindad (segun el radio), aplicando la formula de clase\n\t# \"abalone\" corresponde al vector de entrada\n def variarPesoNeuronas(self, radio, abalone, factorAprendizaje):\n vecinas = self.calcularVecindad(radio)\n for neurona in vecinas:\n neurona.length = neurona.length + (abalone.length - neurona.length)*factorAprendizaje\n neurona.diameter = neurona.diameter + (abalone.diameter - neurona.diameter)*factorAprendizaje\n neurona.height = neurona.height + (abalone.height - neurona.height)*factorAprendizaje\n neurona.whole_weight = neurona.whole_weight + (abalone.whole_weight - neurona.whole_weight)*factorAprendizaje\n neurona.shucked_weight = neurona.shucked_weight + (abalone.shucked_weight - neurona.shucked_weight)*factorAprendizaje\n neurona.viscera_weight = neurona.viscera_weight + (abalone.viscera_weight - neurona.viscera_weight)*factorAprendizaje\n neurona.shell_weight = neurona.shell_weight + (abalone.shell_weight - neurona.shell_weight)*factorAprendizaje\n\t \n\t \n def getNeuronaGanadora(self):\n return self.indiceWinner\n\n"
},
{
"alpha_fraction": 0.578646719455719,
"alphanum_fraction": 0.5878734588623047,
"avg_line_length": 31.985074996948242,
"blob_id": "e5ba7cfe4fb0e253251d7ec30244d9c6170d8922",
"content_id": "c20eb921293cfbd929ece3125143006e2bbdb597",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 4554,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 134,
"path": "/multimedia/faq-manager/faq-dao.php",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-1",
"text": "<?php\r\n// Comprobamos que esta definido el tipo de operacion (add, del, edit, view)\r\nif (isset($_GET['op'])) {\r\n\tinclude 'broker.php';\r\n\t$operacion = $_GET['op'];\r\n\tif ($operacion == \"add\") {\r\n\t\tif (isset($_GET['pregunta']) && isset($_GET['respuesta']) && isset($_GET['categoria'])) {\r\n\t\t\t$id = conectar_bd();\r\n\t\t\t$categoria = $_GET['categoria'];\r\n\t\t\t$sql = \"SELECT id FROM H10_12_categoria_faq WHERE categoria='$categoria'\";\r\n\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\t$fila = mysql_fetch_array ($res);\r\n\t\t\t$idCategoria = $fila[\"id\"];\r\n\t\t\t$sql = \"INSERT INTO H10_12_faq (pregunta, respuesta, idCategoria) VALUES ('\".$_GET['pregunta'].\"','\".$_GET['respuesta'].\"',\".$idCategoria.\")\";\r\n\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\tif ($res) {\r\n\t\t\t\techo \"&estado=ok\";\r\n\t\t\t\techo \"&mensaje=La pregunta se ha creado con exito.\";\r\n\t\t\t}\r\n\t\t\telse {\r\n\t\t\t\techo \"&estado=error\";\r\n\t\t\t\techo \"&mensaje=No se ha podido crear la pregunta. \" . mysql_error();\r\n\t\t\t}\r\n\t\t\tcerrar_bd ($id);\r\n\t\t}\r\n\t\telse {\r\n\t\t\techo \"&estado=error\";\r\n\t\t\techo \"&mensaje=Debe especificar la pregunta, la respuesta y la categoria de la misma.\";\r\n\t\t}\r\n\t}\r\n\telse if ($operacion == \"view\") {\r\n\t\t$id = conectar_bd();\r\n\t\tif (isset($_GET['pregunta'])) {\r\n\t\t\t$pregunta = $_GET['pregunta'];\r\n\t\t\tif ($pregunta!=\"\") {\r\n\t\t\t\t// Dada una pregunta -> devolver respuesta y categoria\r\n\t\t\t\t$sql = \"SELECT respuesta,categoria FROM H10_12_faq f, H10_12_categoria_faq c WHERE f.idCategoria=c.id AND f.pregunta='$pregunta'\";\r\n\t\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\t\t$fila = mysql_fetch_array ($res);\r\n\t\t\t\t$respuesta = $fila[\"respuesta\"];\r\n\t\t\t\t$categoria = $fila[\"categoria\"];\r\n\t\t\t\techo \"&respuesta=$respuesta&categoria=$categoria\";\r\n\t\t\t\techo \"&estado=ok\";\r\n\t\t\t}\r\n\t\t}\r\n\t\telse if (isset($_GET['categoria'])) {\r\n\t\t\t$categoria = $_GET['categoria'];\r\n\t\t\tif ($categoria!=\"\") {\r\n\t\t\t\t// Dada una categoria -> devolver todas sus preguntas y respuestas\r\n\t\t\t\t$sql = \"SELECT pregunta,respuesta FROM H10_12_faq f, H10_12_categoria_faq c WHERE f.idCategoria=c.id AND c.categoria='$categoria' ORDER BY categoria\";\r\n\t\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\t\t$contador = 0;\r\n\t\t\t\twhile ($fila = mysql_fetch_array ($res)) {\r\n\t\t\t\t\t$pregunta = $fila[\"pregunta\"];\r\n\t\t\t\t\t$respuesta = $fila[\"respuesta\"];\r\n\t\t\t\t\techo \"&pregunta$contador=$pregunta&respuesta$contador=$respuesta\";\r\n\t\t\t\t\t$contador++;\r\n\t\t\t\t}\r\n\t\t\t\techo \"&estado=ok\";\r\n\t\t\t\techo \"&contador=$contador\";\r\n\t\t\t}\r\n\t\t}\r\n\t\telse {\r\n\t\t\t// Si no se indica nada -> devolver todos los títulos\r\n\t\t\t$sql = \"SELECT pregunta FROM H10_12_faq\";\r\n\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\t$contador = 0;\r\n\t\t\twhile ($fila = mysql_fetch_array ($res)) {\r\n\t\t\t\t$pregunta = $fila[\"pregunta\"];\r\n\t\t\t\techo \"&pregunta$contador=$pregunta\";\r\n\t\t\t\t$contador++;\r\n\t\t\t}\r\n\t\t\techo \"&estado=ok\";\r\n\t\t\techo \"&contador=$contador\";\r\n\t\t}\r\n\t\tcerrar_bd ($id);\r\n\t}\r\n\telse if ($operacion == \"modify\") {\r\n\t\t$id = conectar_bd();\r\n\t\tif (isset($_GET['pregunta']) && isset($_GET['categoria']) && isset($_GET['respuesta'])) {\r\n\t\t\t$pregunta = $_GET['pregunta'];\r\n\t\t\t$categoria= $_GET['categoria'];\r\n\t\t\t$respuesta = $_GET['respuesta'];\r\n\t\t\tif ($pregunta!=\"\") {\r\n\t\t\t\t$sql = \"UPDATE H10_12_faq SET idCategoria=(SELECT id FROM H10_12_categoria_faq WHERE categoria='$categoria'), respuesta='$respuesta' WHERE pregunta='$pregunta'\";\r\n\t\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\t\tif ($res) {\r\n\t\t\t\t\techo \"&estado=ok\";\r\n\t\t\t\t\techo \"&mensaje=La pregunta ha sido modificada correctamente.\";\r\n\t\t\t\t}\r\n\t\t\t\telse {\r\n\t\t\t\t\techo \"&estado=error\";\r\n\t\t\t\t\techo \"&mensaje=No se ha podido modificar la pregunta. \" . mysql_error();\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\telse {\r\n\t\t\techo \"&estado=error\";\r\n\t\t\techo \"&mensaje=Debe especificar la pregunta que desea eliminar junto con su categoría y respuesta.\";\r\n\t\t}\r\n\t\tcerrar_bd ($id);\r\n\t}\r\n\telse if ($operacion == \"del\") {\r\n\t\t$id = conectar_bd();\r\n\t\tif (isset($_GET['pregunta'])) {\r\n\t\t\t$pregunta = $_GET['pregunta'];\r\n\t\t\tif ($pregunta!=\"\") {\r\n\t\t\t\t$sql = \"DELETE FROM H10_12_faq WHERE pregunta='$pregunta'\";\r\n\t\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\t\tif ($res) {\r\n\t\t\t\t\techo \"&estado=ok\";\r\n\t\t\t\t\techo \"&mensaje=La pregunta ha sido eliminada.\";\r\n\t\t\t\t}\r\n\t\t\t\telse {\r\n\t\t\t\t\techo \"&estado=error\";\r\n\t\t\t\t\techo \"&mensaje=No se ha podido eliminar la pregunta. \" . mysql_error();\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\telse {\r\n\t\t\techo \"&estado=error\";\r\n\t\t\techo \"&mensaje=Debe especificar la pregunta que desea eliminar.\";\r\n\t\t}\r\n\t\tcerrar_bd ($id);\r\n\t}\r\n\telse {\r\n\t\techo \"&estado=error\";\r\n\t\techo \"&mensaje=Operacion no definida: $operacion\";\r\n\t}\r\n} else {\r\n\techo \"&estado=error\";\r\n\techo \"&mensaje=No se ha definido un tipo de operacion\";\r\n}\r\n?>"
},
{
"alpha_fraction": 0.6944220066070557,
"alphanum_fraction": 0.6952304244041443,
"avg_line_length": 23.77083396911621,
"blob_id": "0bcfa596f3f848097538cfcd812722e8b9794e78",
"content_id": "65d5fc26f4a92a422b6539b5f5ce5dafea6fafa7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1237,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 48,
"path": "/sistemas-colaboracion/plancode/app/src/dominio/control/GestorColores.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package dominio.control;\r\n\r\nimport java.awt.Color;\r\nimport java.util.Vector;\r\n\r\nimport excepciones.NoSlotsDisponiblesException;\r\n\r\npublic class GestorColores {\r\n\r\n\tprivate static Vector<Color> colores = new Vector<Color>();\r\n\tprivate static Vector<Color> coloresOcupados = new Vector<Color>();\r\n\r\n\tpublic static void inicializaColores() {\r\n\t\tcolores.add(Color.darkGray);\r\n\t\tcolores.add(Color.blue);\r\n\t\tcolores.add(Color.magenta);\t\t\r\n\t\tcolores.add(Color.green);\r\n\t\tcolores.add(Color.orange);\r\n\t\t\r\n\t}\r\n\r\n\t/*\r\n\t * Se busca un color libre para esta sesion y se marca como ocupado\r\n\t */\r\n\tpublic static Color getColorLibre() throws NoSlotsDisponiblesException {\r\n\t\tColor colorElegido = null;\r\n\t\tfor (int i = 0; colorElegido == null && i < colores.size(); i++) {\r\n\t\t\tif (!coloresOcupados.contains(colores.get(i))) {\r\n\t\t\t\tcolorElegido = colores.get(i);\r\n\t\t\t\tcoloresOcupados.add(colorElegido);\r\n\t\t\t}\r\n\t\t}\r\n\t\tif (colorElegido == null) throw new NoSlotsDisponiblesException();\r\n\t\treturn colorElegido;\r\n\t}\r\n\t\r\n\tpublic static void liberarColor(Color c) {\r\n\t\tcoloresOcupados.remove(c);\r\n\t}\r\n\r\n\tpublic static Vector<Color> getColores() {\r\n\t\treturn colores;\r\n\t}\r\n\r\n\tpublic static Vector<Color> getColoresOcupados() {\r\n\t\treturn coloresOcupados;\r\n\t}\r\n}\r\n"
},
{
"alpha_fraction": 0.5339806079864502,
"alphanum_fraction": 0.5339806079864502,
"avg_line_length": 15.5,
"blob_id": "b9e9c6d997076441140189d0f5b4586d73290ac4",
"content_id": "3793d4ec6cdbf620655886b4f10c9920b6420d0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 103,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 6,
"path": "/multimedia/faq-manager/config.php",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "<?php\r\n\t$server = \"localhost\";\r\n\t$database = \"mhrv\";\r\n\t$login = \"mhrv\";\r\n\t$password = \"mhrvpass\";\r\n?>"
},
{
"alpha_fraction": 0.8063063025474548,
"alphanum_fraction": 0.8198198080062866,
"avg_line_length": 54.5,
"blob_id": "24e262468d12b6d5d05d952daa85476033fb40ea",
"content_id": "c4cba51f18cf9068e25d480a3bac1a0427654020",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 227,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 4,
"path": "/README.md",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "# isi-practicas-5\nAutomatically exported from code.google.com/p/isi-practicas-5\n\nPrácticas por parejas de asignaturas de 5º de la Ingeniería Superior Informática de la Escuela Superior de Informática de Ciudad Real (UCLM)\n"
},
{
"alpha_fraction": 0.5791420340538025,
"alphanum_fraction": 0.5857987999916077,
"avg_line_length": 26.20833396911621,
"blob_id": "e03299ae80f57230fbbea27441e19eb3aae715b7",
"content_id": "fc1062dc214e7e24a22c4cad0ee5aa5ea6a001cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "PHP",
"length_bytes": 1352,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 48,
"path": "/multimedia/faq-manager/categoria-dao.php",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "<?php\r\n// Comprobamos que esta definido el tipo de operacion (add, del, edit, view)\r\nif (isset($_GET['op'])) {\r\n\tinclude 'broker.php';\r\n\t$operacion = $_GET['op'];\r\n\tif ($operacion == \"add\") {\r\n\t\tif (isset($_GET['nombre'])) {\r\n\t\t\t$id = conectar_bd();\r\n\t\t\t$sql = \"INSERT INTO H10_12_categoria_faq (categoria) VALUES ('\".$_GET['nombre'].\"')\";\r\n\t\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t\tif ($res) {\r\n\t\t\t\techo \"&estado=ok\";\r\n\t\t\t\techo \"&mensaje=La categoria ha sido creada con exito.\";\r\n\t\t\t}\r\n\t\t\telse {\r\n\t\t\t\techo \"&estado=error\";\r\n\t\t\t\techo \"&mensaje=No se ha podido crear la categoria. \" . mysql_error();\r\n\t\t\t}\r\n\t\t\tcerrar_bd ($id);\r\n\t\t}\r\n\t\telse {\r\n\t\t\techo \"&estado=error\";\r\n\t\t\techo \"&mensaje=Debe especificar el nombre de la categoria.\";\r\n\t\t}\r\n\t}\r\n\telse if ($operacion == \"view\") {\r\n\t\t$id = conectar_bd();\r\n\t\t$sql = \"SELECT categoria FROM H10_12_categoria_faq ORDER BY categoria\";\r\n\t\t$res = ejecutar_consulta ($sql, $id);\r\n\t\t$contador = 0;\r\n\t\twhile ($fila = mysql_fetch_array ($res)) {\r\n\t\t\t$categoria = $fila[\"categoria\"];\r\n\t\t\techo \"&categoria$contador=$categoria\";\r\n\t\t\t$contador++;\r\n\t\t}\r\n\t\techo \"&estado=ok\";\r\n\t\techo \"&contador=$contador\";\r\n\t\tcerrar_bd ($id);\r\n\t}\r\n\telse {\r\n\t\techo \"&estado=error\";\r\n\t\techo \"&mensaje=Operacion no definida: $operacion\";\r\n\t}\r\n} else {\r\n\techo \"&estado=error\";\r\n\techo \"&mensaje=No se ha definido un tipo de operacion\";\r\n}\r\n?>"
},
{
"alpha_fraction": 0.5815181732177734,
"alphanum_fraction": 0.5867986679077148,
"avg_line_length": 29.1200008392334,
"blob_id": "8b45699e908665c5bbe48c6df186564ae5866c5f",
"content_id": "a8c8980ae9fb4268494140a05bbd7ad761ea6615",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1515,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 50,
"path": "/mya-ia/kohonen/fileHandler.py",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# This file is part of pyDMS v1.0: Yet Another Document Management System\n# Copyright (C) 2009, Jose Domingo Lopez Lopez & Juan Andrada Romero\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport psyco\npsyco.full()\n\nclass File_Handler:\n def __init__(self):\n pass\n\n def read_text_file (self, filename):\n text = \"\"\n try: \n f = open (filename, \"r\")\n for line in f.xreadlines():\n text = text + line\n f.close()\n except:\n raise\n return text\n\n def read_lines_file (self, filename):\n try:\n f = open (filename, \"r\")\n return f.readlines()\n except:\n raise\n\n def write_text_file (self, filename, lines):\n try:\n f = open(filename, \"w\")\n f.writelines(lines)\n f.close()\n except:\n raise\n \n"
},
{
"alpha_fraction": 0.6939163208007812,
"alphanum_fraction": 0.730038046836853,
"avg_line_length": 19.040000915527344,
"blob_id": "70b78cc71dff483180c9b643ae2873fb3e3ba567",
"content_id": "005ac0d6f95b456e3e1dc629e5ef2e078e5cf42d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 526,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 25,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/EventosCanales/MensajeChatRecibidoEvent.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package comunicaciones.EventosCanales;\r\n\r\nimport java.util.EventObject;\r\n\r\npublic class MensajeChatRecibidoEvent extends EventObject {\r\n\r\n\tprivate static final long serialVersionUID = -8922139367513391169L;\r\n\tprivate String nombre;\r\n\tprivate String mensaje;\r\n\r\n\tpublic MensajeChatRecibidoEvent(Object obj, String nombre, String mensaje) {\r\n\t\tsuper(obj);\r\n\t\tthis.nombre = nombre;\r\n\t\tthis.mensaje = mensaje;\r\n\t}\r\n\r\n\tpublic String getNombre() {\r\n\t\treturn nombre;\r\n\t}\r\n\r\n\tpublic String getMensaje() {\r\n\t\treturn mensaje;\r\n\t}\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.6890459656715393,
"alphanum_fraction": 0.6972909569740295,
"avg_line_length": 34.375,
"blob_id": "429c98d7971d3a4322b9d8a843d19accbfea0f52",
"content_id": "fd97ec212dd956479071fcb8336bfc22f52d64c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 849,
"license_type": "no_license",
"max_line_length": 221,
"num_lines": 24,
"path": "/mya-ia/kohonen/testing.py",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "import psyco\npsyco.full()\n\nfrom kohonen import *\nfrom fileHandler import *\nfrom abalone import *\n\n\nif __name__==\"__main__\":\n mapa = Kohonen()\n fileHandler = File_Handler()\n # Leer neuronas obtenidas en el algoritmo\n res = fileHandler.read_lines_file(\"./resultados.txt\")\n mapa.inicializarNeuronas(res) \n\n # Leemos el conjunto de test\n datosAbalone = fileHandler.read_lines_file(\"./test.txt\")\n for d in datosAbalone:\n valoresAtributos = d.split()\n abalone = Abalone(float(valoresAtributos[0]), float(valoresAtributos[1]), float(valoresAtributos[2]), float(valoresAtributos[3]), float(valoresAtributos[4]), float(valoresAtributos[5]), float(valoresAtributos[6]))\n # Se evalua la similitud, obteniendo una neurona ganadora\n mapa.evaluarSimilitud(abalone)\n\n print (mapa.getNeuronaGanadora())\n"
},
{
"alpha_fraction": 0.676066517829895,
"alphanum_fraction": 0.6898047924041748,
"avg_line_length": 18.04347801208496,
"blob_id": "1f1ac3fa4f63906f98d849dd787f4d1a70ad0b9c",
"content_id": "481c41dc93415a1cf460d271dfb9c94f5e685469",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1384,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 69,
"path": "/sistemas-colaboracion/plancode/app/src/dominio/conocimiento/InfoTrazo.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-10",
"text": "package dominio.conocimiento;\r\n\r\nimport java.io.Serializable;\r\nimport java.util.LinkedList;\r\n\r\n\r\npublic class InfoTrazo implements Serializable {\r\n\r\n\t/**\r\n\t * \r\n\t */\r\n\tprivate static final long serialVersionUID = 1732709130048456235L;\r\n\t\r\n\tprivate Trazo trazo;\r\n\tprivate LinkedList<Trazo> trazos;\r\n\tprivate boolean dibujando;\r\n\tprivate boolean clear;\r\n\tprivate boolean recienConectado;\r\n\r\n\r\n\t// Constructor para el caso de dibujar sin aņadir puntos (o para eliminar, si dibujando = false)\r\n\tpublic InfoTrazo(boolean dibujando, Trazo trazo) {\r\n\t\tthis.trazo = trazo;\r\n\t\tthis.dibujando = dibujando;\r\n\t\tclear = false;\r\n\t\trecienConectado = false;\r\n\t}\r\n\t\r\n\t// Constructor para pasar todos los trazos al conectarse\r\n\tpublic InfoTrazo (LinkedList<Trazo> trazos) {\r\n\t\tthis.trazos = trazos;\r\n\t\tclear = false;\r\n\t\trecienConectado = true;\r\n\t}\r\n\t\r\n\t// El construcotr por defecto significa limpiar el canvas completo\r\n\tpublic InfoTrazo() {\r\n\t\tthis.clear = true;\r\n\t\trecienConectado = false;\r\n\t\tdibujando = false;\r\n\t}\r\n\r\n\r\n\tpublic Trazo getTrazo() {\r\n\t\treturn trazo;\r\n\t}\r\n\r\n\r\n\tpublic boolean isDibujando() {\r\n\t\treturn dibujando;\r\n\t}\r\n\t\r\n\tpublic boolean isTerminado() {\r\n\t\treturn trazo.isTerminado();\r\n\t}\r\n\r\n\tpublic LinkedList<Trazo> getTrazos() {\r\n\t\treturn trazos;\r\n\t}\r\n\r\n\tpublic boolean isClear() {\r\n\t\treturn clear;\r\n\t}\r\n\r\n\tpublic boolean isRecienConectado() {\r\n\t\treturn recienConectado;\r\n\t}\r\n\t\r\n}\r\n"
},
{
"alpha_fraction": 0.717756986618042,
"alphanum_fraction": 0.7532710433006287,
"avg_line_length": 22.31818199157715,
"blob_id": "12ce199871279f6964d8e7c3f2071db4368cf57d",
"content_id": "977bac605163e18d6ab96ae0e4fe271996cf7b46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 535,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 22,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/EventosCanales/MensajeListaUsuariosEvent.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package comunicaciones.EventosCanales;\r\n\r\nimport java.util.EventObject;\r\nimport java.util.Hashtable;\r\n\r\nimport dominio.conocimiento.Usuario;\r\n\r\npublic class MensajeListaUsuariosEvent extends EventObject {\r\n\r\n\tprivate static final long serialVersionUID = -8922139367513391169L;\r\n\tprivate Hashtable<String,Usuario> lista;\r\n\t\r\n\tpublic MensajeListaUsuariosEvent(Object obj, Object lista) {\r\n\t\tsuper(obj);\r\n\t\tthis.lista = (Hashtable<String,Usuario>)lista;\t\t\r\n\t}\r\n\r\n\tpublic Hashtable<String, Usuario> getLista() {\r\n\t\treturn lista;\r\n\t}\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.6246174573898315,
"alphanum_fraction": 0.650799036026001,
"avg_line_length": 31.804597854614258,
"blob_id": "e65de47eb77f9c1fb53c00afbb57fc6fedca1883",
"content_id": "db94be8e757e7212f4ba05b58ae000de0428a684",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2950,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 87,
"path": "/sistemas-colaboracion/plancode/app/src/presentacion/auxiliares/Validacion.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-1",
"text": "package presentacion.auxiliares;\r\n\r\nimport java.util.regex.Pattern;\r\n\r\n\r\nimport excepciones.CadenaIncorrectaException;\r\nimport excepciones.CadenaVaciaException;\r\nimport excepciones.IPInvalidaException;\r\nimport excepciones.PuertoInvalidoException;\r\n\r\n\r\n/**\r\n * Clase estática que contiene métodos para comprobar la validez\r\n * de los campos de las ventanas.\r\n */\r\npublic class Validacion {\r\n\r\n\tpublic static final int MAX_LONGITUD_CAMPOS = 255;\r\n\tpublic static final int PUERTO_MINIMO = 1025;\r\n\tpublic static final int PUERTO_MAXIMO = 65535;\r\n\t\r\n\r\n\t// Una cadena es válida si todos sus caracteres son alfabéticos, espacios o guiones\r\n\tpublic static void comprobarCadena(String cadena) throws CadenaIncorrectaException, CadenaVaciaException {\r\n\t\tboolean bCorrecto = false;\r\n\t\tboolean bAux = true;\r\n\r\n\t\tif(cadena.length() > MAX_LONGITUD_CAMPOS) {\r\n\t\t\tthrow new CadenaIncorrectaException();\r\n\t\t}\r\n\t\t\r\n\t\t// El primer caracter debe ser una letra\r\n\t\tif(cadena.length() > 0) {\r\n\t\t\tif(Character.isLetter(cadena.charAt(0))) {\r\n\t\t\t\tbAux = !cadena.contains(\"--\");\r\n\t\t\t\t// El resto de caracteres pueden ser letra, espacio o guion (no se permiten guiones juntos, como --)\r\n\t\t\t\tfor(int i = 1; i < cadena.length() && bAux; i++) {\r\n\t\t\t\t\tbAux = Character.isLetter(cadena.charAt(i)) || Character.isWhitespace(cadena.charAt(i)) || cadena.charAt(i) == '-';\r\n\t\t\t\t}\r\n\t\t\t\tbCorrecto = bAux;\r\n\t\t\t\t\r\n\t\t\t\t// No se puede terminar la cadena con un guión ni con espacios\r\n\t\t\t\tif ((bCorrecto && cadena.charAt(cadena.length()-1) == '-') || bCorrecto && Character.isWhitespace(cadena.charAt(cadena.length()-1)))\r\n\t\t\t\t\tbCorrecto = false;\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\tif(!bCorrecto) {\r\n\t\t\t\tthrow new CadenaIncorrectaException();\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\tthrow new CadenaVaciaException();\r\n\t\t}\r\n\t}\r\n\t\r\n\tpublic static void comprobarDireccionIP(String ip) throws IPInvalidaException {\r\n\t\tPattern patronIP;\r\n\t\t\r\n\t\t// Creamos un patrón que define las IPs válidas\r\n\t\tpatronIP = Pattern.compile(\"\\\\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\\\.\" + \r\n\t\t\t\t\"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\\\.\" +\r\n \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\\\.\" +\r\n \"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\\\b\");\r\n\t\t\r\n\t\tif(ip.equals(\"\")) {\r\n\t\t\tthrow new IPInvalidaException(\"La dirección IP no puede ser nula.\");\r\n\t\t} else if(!patronIP.matcher(ip).matches()) {\r\n\t\t\tthrow new IPInvalidaException();\r\n\t\t}\r\n\t}\r\n\t\r\n\tpublic static void comprobarPuerto(String puerto) throws PuertoInvalidoException {\r\n\t\tint numPuerto;\r\n\t\r\n\t\tif(puerto.equals(\"\")) {\r\n\t\t\tthrow new PuertoInvalidoException(\"El puerto no puede ser nulo.\");\r\n\t\t} else {\r\n\t\t\ttry {\r\n\t\t\t\tnumPuerto = Integer.parseInt(puerto);\r\n\t\t\t\tif(numPuerto < PUERTO_MINIMO || numPuerto > PUERTO_MAXIMO) {\r\n\t\t\t\t\tthrow new PuertoInvalidoException(\"El puerto debe ser un número comprendido entre \" + PUERTO_MINIMO + \" y \" + PUERTO_MAXIMO + \".\");\r\n\t\t\t\t}\r\n\t\t\t} catch(NumberFormatException ex) {\r\n\t\t\t\tthrow new PuertoInvalidoException();\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n"
},
{
"alpha_fraction": 0.7774274945259094,
"alphanum_fraction": 0.7774274945259094,
"avg_line_length": 42.05555725097656,
"blob_id": "a29235563be5492744bf1661f30af485013d80ea",
"content_id": "303c79adfe3a46dfc820bcf6e95270505f3f0903",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1592,
"license_type": "no_license",
"max_line_length": 254,
"num_lines": 36,
"path": "/sistemas-colaboracion/plancode/app/src/presentacion/auxiliares/InterfaceArrastrarRaton.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-1",
"text": "package presentacion.auxiliares;\r\n\r\nimport com.sun.media.jsdt.ConnectionException;\r\nimport com.sun.media.jsdt.InvalidClientException;\r\nimport com.sun.media.jsdt.NoSuchChannelException;\r\nimport com.sun.media.jsdt.NoSuchClientException;\r\nimport com.sun.media.jsdt.NoSuchSessionException;\r\nimport com.sun.media.jsdt.PermissionDeniedException;\r\nimport com.sun.media.jsdt.TimedOutException;\r\n\r\n/**\r\n * REFERENCIA : http://www.chuidiang.com/java/codigo_descargable/appletpaint.php\r\n */\r\n\r\n/**\r\n * Interface para las clases encargadas de hacer algo cuando se arrastre el\r\n * ratón.\r\n */\r\npublic interface InterfaceArrastrarRaton\r\n{\r\n /**\r\n * Crea un trazo nuevo y le pone como primer punto x,y.\r\n */ \r\n public void comienzaDibujarTrazo(int x, int y) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException;\r\n\r\n /**\r\n * Añade nuevos puntos al trazo mientras se arrastra el ratón \r\n */\r\n public void añadirPuntosTrazo(int xAntigua, int yAntigua, int xNueva, int yNueva) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException;\r\n\r\n /**\r\n * Se llama a este método cuando se termina de arrastrar el ratón.\r\n \r\n */\r\n public void finalizaArrastra(int x, int y) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException;\r\n}\r\n"
},
{
"alpha_fraction": 0.7227138876914978,
"alphanum_fraction": 0.7227138876914978,
"avg_line_length": 16.842105865478516,
"blob_id": "0f8a8f26f06dd1b3312957cde5d7c773a8f56219",
"content_id": "bac9e2ca16ed2ca983a3da6eb91bbf656d45d25c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 19,
"path": "/sistemas-aprendizaje/aplicacionMineria/src/utilities/FiltroDeXLS.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package utilities;\nimport java.io.File;\n\nimport javax.swing.filechooser.FileFilter;\n\npublic class FiltroDeXLS extends FileFilter\n{\n\tpublic boolean accept (File fichero)\n\t{\n\t\tif (fichero.toString().toLowerCase().endsWith(\".xls\"))\n\t\t\treturn true;\n\t\telse\n\t\t\treturn false;\n\t}\n\tpublic String getDescription()\n\t{\n\t\treturn (\"Archivos XLS\");\n\t}\n}\n"
},
{
"alpha_fraction": 0.6511628031730652,
"alphanum_fraction": 0.6518357992172241,
"avg_line_length": 24.53769874572754,
"blob_id": "d5b560c01eafcb7017d37d3cffeaeebe32f65899",
"content_id": "87a705120cda05804ed6c06e0b16fc83363ea1a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 13373,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 504,
"path": "/sistemas-colaboracion/p2p-jxta/apps/JXTA-AdvertismentOS/src/AdvertisementTutorial.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "import net.jxta.document.*;\r\nimport net.jxta.id.ID;\r\nimport net.jxta.id.IDFactory;\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.io.Serializable;\r\nimport java.net.InetAddress;\r\nimport java.net.URI;\r\nimport java.net.URISyntaxException;\r\nimport java.net.UnknownHostException;\r\nimport java.util.Enumeration;\r\nimport java.util.logging.Logger;\r\n\r\n/**\r\n * Simple Advertisement Tutorial creates a advertisment describing a system <p/>\r\n * \r\n * <pre>\r\n * <?xml version="1.0"?>\r\n * <!DOCTYPE jxta:System>\r\n * <jxta:System xmlns:jxta="http://jxta.org">\r\n * <id>id</id>\r\n * <name>Device Name</name>\r\n * <ip>ip address</ip>\r\n * <hwarch>x86</hwarch>\r\n * <hwvendor>Sun MicroSystems</hwvendor>\r\n * <OSName></OSName>\r\n * <OSVer></OSVer>\r\n * <osarch></osarch>\r\n * <sw></sw>\r\n * </jxta:System>\r\n * </pre>\r\n */\r\npublic class AdvertisementTutorial extends Advertisement implements Comparable,\r\n\t\tCloneable, Serializable {\r\n\tprivate String hwarch;\r\n\tprivate String hwvendor;\r\n\tprivate ID id = ID.nullID;\r\n\tprivate String ip;\r\n\tprivate String name;\r\n\tprivate String osname;\r\n\tprivate String osversion;\r\n\tprivate String osarch;\r\n\tprivate String inventory;\r\n\tprivate final static Logger LOG = Logger\r\n\t\t\t.getLogger(AdvertisementTutorial.class.getName());\r\n\tprivate final static String OSNameTag = \"OSName\";\r\n\tprivate final static String OSVersionTag = \"OSVer\";\r\n\tprivate final static String OSarchTag = \"osarch\";\r\n\tprivate final static String hwarchTag = \"hwarch\";\r\n\tprivate final static String hwvendorTag = \"hwvendor\";\r\n\tprivate final static String idTag = \"ID\";\r\n\tprivate final static String ipTag = \"ip\";\r\n\tprivate final static String nameTag = \"name\";\r\n\tprivate final static String swTag = \"sw\";\r\n\t/**\r\n\t * Indexable fields. Advertisements must define the indexables, in order to\r\n\t * properly index and retrieve these advertisements locally and on the\r\n\t * network\r\n\t */\r\n\tprivate final static String[] fields = { idTag, nameTag, hwarchTag };\r\n\r\n\t/**\r\n\t * Default Constructor\r\n\t */\r\n\tpublic AdvertisementTutorial() {\r\n\t}\r\n\r\n\t/**\r\n\t * Construct from a StructuredDocument\r\n\t * \r\n\t * @param root\r\n\t * Root element\r\n\t */\r\n\tpublic AdvertisementTutorial(Element root) {\r\n\t\tTextElement doc = (TextElement) root;\r\n\t\tif (!getAdvertisementType().equals(doc.getName())) {\r\n\t\t\tthrow new IllegalArgumentException(\"Could not construct : \"\r\n\t\t\t\t\t+ getClass().getName() + \"from doc containing a \"\r\n\t\t\t\t\t+ doc.getName());\r\n\t\t}\r\n\t\tinitialize(doc);\r\n\t}\r\n\r\n\t/**\r\n\t * Construct a doc from InputStream\r\n\t * \r\n\t * @param stream\r\n\t * the underlying input stream.\r\n\t * @throws IOException\r\n\t * if an I/O error occurs.\r\n\t */\r\n\tpublic AdvertisementTutorial(InputStream stream) throws IOException {\r\n\t\tStructuredTextDocument doc = (StructuredTextDocument) StructuredDocumentFactory\r\n\t\t\t\t.newStructuredDocument(MimeMediaType.XMLUTF8, stream);\r\n\t\tinitialize(doc);\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the hWArch attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param hwarch\r\n\t * The new hWArch value\r\n\t */\r\n\tpublic void setHWArch(String hwarch) {\r\n\t\tthis.hwarch = hwarch;\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the OSArch attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param osarch\r\n\t * The new hWArch value\r\n\t */\r\n\tpublic void setOSArch(String osarch) {\r\n\t\tthis.osarch = osarch;\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the hWVendor attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param hwvendor\r\n\t * The new hWVendor value\r\n\t */\r\n\tpublic void setHWVendor(String hwvendor) {\r\n\t\tthis.hwvendor = hwvendor;\r\n\t}\r\n\r\n\t/**\r\n\t * sets the unique id\r\n\t * \r\n\t * @param id\r\n\t * The id\r\n\t */\r\n\tpublic void setID(ID id) {\r\n\t\tthis.id = (id == null ? null : id);\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the iP attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param ip\r\n\t * The new iP value\r\n\t */\r\n\tpublic void setIP(String ip) {\r\n\t\tthis.ip = ip;\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the name attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param name\r\n\t * The new name value\r\n\t */\r\n\tpublic void setName(String name) {\r\n\t\tthis.name = name;\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the oSName attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param osname\r\n\t * The new oSName value\r\n\t */\r\n\tpublic void setOSName(String osname) {\r\n\t\tthis.osname = osname;\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the oSVersion attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param osversion\r\n\t * The new oSVersion value\r\n\t */\r\n\tpublic void setOSVersion(String osversion) {\r\n\t\tthis.osversion = osversion;\r\n\t}\r\n\r\n\t/**\r\n\t * Sets the SWInventory attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @param inventory\r\n\t * the software inventory of the system\r\n\t */\r\n\tpublic void setSWInventory(String inventory) {\r\n\t\tthis.inventory = inventory;\r\n\t}\r\n\r\n\t/**\r\n\t * {@inheritDoc}\r\n\t * \r\n\t * @param asMimeType\r\n\t * Document encoding\r\n\t * @return The document value\r\n\t */\r\n\t@Override\r\n\tpublic Document getDocument(MimeMediaType asMimeType) {\r\n\t\tStructuredDocument adv = StructuredDocumentFactory\r\n\t\t\t\t.newStructuredDocument(asMimeType, getAdvertisementType());\r\n\t\tif (adv instanceof Attributable) {\r\n\t\t\t((Attributable) adv).addAttribute(\"xmlns:jxta\", \"http://jxta.org\");\r\n\t\t}\r\n\t\tElement e;\r\n\t\te = adv.createElement(idTag, getID().toString());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(nameTag, getName().trim());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(OSNameTag, getOSName().trim());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(OSVersionTag, getOSVersion().trim());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(OSarchTag, getOSArch().trim());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(ipTag, getIP().trim());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(hwarchTag, getHWArch().trim());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(hwvendorTag, getHWVendor().trim());\r\n\t\tadv.appendChild(e);\r\n\t\te = adv.createElement(swTag, getSWInventory().trim());\r\n\t\tadv.appendChild(e);\r\n\t\treturn adv;\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the hWArch attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @return The hWArch value\r\n\t */\r\n\tpublic String getHWArch() {\r\n\t\treturn hwarch;\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the OSArch attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @return The OSArch value\r\n\t */\r\n\tpublic String getOSArch() {\r\n\t\treturn osarch;\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the hWVendor attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @return The hWVendor value\r\n\t */\r\n\tpublic String getHWVendor() {\r\n\t\treturn hwvendor;\r\n\t}\r\n\r\n\t/**\r\n\t * returns the id of the device\r\n\t * \r\n\t * @return ID the device id\r\n\t */\r\n\t@Override\r\n\tpublic ID getID() {\r\n\t\treturn (id == null ? null : id);\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the IP attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @return The IP value\r\n\t */\r\n\tpublic String getIP() {\r\n\t\treturn ip;\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the name attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @return The name value\r\n\t */\r\n\tpublic String getName() {\r\n\t\treturn name;\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the OSName attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @return The OSName value\r\n\t */\r\n\tpublic String getOSName() {\r\n\t\treturn osname;\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the Software Inventory text element\r\n\t * \r\n\t * @return The Inventory value\r\n\t */\r\n\tpublic String getSWInventory() {\r\n\t\tif (inventory == null) {\r\n\t\t\tinventory = \"\";\r\n\t\t}\r\n\t\treturn inventory;\r\n\t}\r\n\r\n\t/**\r\n\t * Gets the OSVersion attribute of the AdvertisementTutorial object\r\n\t * \r\n\t * @return The OSVersion value\r\n\t */\r\n\tpublic String getOSVersion() {\r\n\t\treturn osversion;\r\n\t}\r\n\r\n\t/**\r\n\t * Process an individual element from the document.\r\n\t * \r\n\t * @param elem\r\n\t * the element to be processed.\r\n\t * @return true if the element was recognized, otherwise false.\r\n\t */\r\n\tprotected boolean handleElement(TextElement elem) {\r\n\t\tif (elem.getName().equals(idTag)) {\r\n\t\t\ttry {\r\n\t\t\t\tURI id = new URI(elem.getTextValue());\r\n\t\t\t\tsetID(IDFactory.fromURI(id));\r\n\t\t\t} catch (URISyntaxException badID) {\r\n\t\t\t\tthrow new IllegalArgumentException(\r\n\t\t\t\t\t\t\"unknown ID format in advertisement: \"\r\n\t\t\t\t\t\t\t\t+ elem.getTextValue());\r\n\t\t\t} catch (ClassCastException badID) {\r\n\t\t\t\tthrow new IllegalArgumentException(\r\n\t\t\t\t\t\t\"Id is not a known id type: \" + elem.getTextValue());\r\n\t\t\t}\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(nameTag)) {\r\n\t\t\tsetName(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(OSNameTag)) {\r\n\t\t\tsetOSName(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(OSVersionTag)) {\r\n\t\t\tsetOSVersion(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(OSarchTag)) {\r\n\t\t\tsetOSArch(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(ipTag)) {\r\n\t\t\tsetIP(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(hwarchTag)) {\r\n\t\t\tsetHWArch(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(hwvendorTag)) {\r\n\t\t\tsetHWVendor(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (elem.getName().equals(swTag)) {\r\n\t\t\tsetSWInventory(elem.getTextValue());\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\t// element was not handled\r\n\t\treturn false;\r\n\t}\r\n\r\n\t/**\r\n\t * Intialize a System advertisement from a portion of a structured document.\r\n\t * \r\n\t * @param root\r\n\t * document root\r\n\t */\r\n\tprotected void initialize(Element root) {\r\n\t\tif (!TextElement.class.isInstance(root)) {\r\n\t\t\tthrow new IllegalArgumentException(getClass().getName()\r\n\t\t\t\t\t+ \" only supports TextElement\");\r\n\t\t}\r\n\t\tTextElement doc = (TextElement) root;\r\n\t\tif (!doc.getName().equals(getAdvertisementType())) {\r\n\t\t\tthrow new IllegalArgumentException(\"Could not construct : \"\r\n\t\t\t\t\t+ getClass().getName() + \"from doc containing a \"\r\n\t\t\t\t\t+ doc.getName());\r\n\t\t}\r\n\t\tEnumeration elements = doc.getChildren();\r\n\t\twhile (elements.hasMoreElements()) {\r\n\t\t\tTextElement elem = (TextElement) elements.nextElement();\r\n\t\t\tif (!handleElement(elem)) {\r\n\t\t\t\tLOG.warning(\"Unhandleded element \\'\" + elem.getName()\r\n\t\t\t\t\t\t+ \"\\' in \" + doc.getName());\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * {@inheritDoc}\r\n\t */\r\n\t@Override\r\n\tpublic final String[] getIndexFields() {\r\n\t\treturn fields;\r\n\t}\r\n\r\n\t/**\r\n\t * {@inheritDoc}\r\n\t */\r\n\t@Override\r\n\tpublic boolean equals(Object obj) {\r\n\t\tif (this == obj) {\r\n\t\t\treturn true;\r\n\t\t}\r\n\t\tif (obj instanceof AdvertisementTutorial) {\r\n\t\t\tAdvertisementTutorial adv = (AdvertisementTutorial) obj;\r\n\t\t\treturn getID().equals(adv.getID());\r\n\t\t}\r\n\t\treturn false;\r\n\t}\r\n\r\n\t/**\r\n\t * {@inheritDoc}\r\n\t */\r\n\tpublic int compareTo(Object other) {\r\n\t\treturn getID().toString().compareTo(other.toString());\r\n\t}\r\n\r\n\t/**\r\n\t * All messages have a type (in xml this is !doctype) which identifies\r\n\t * the message\r\n\t * \r\n\t * @return String \"jxta:AdvertisementTutorial\"\r\n\t */\r\n\tpublic static String getAdvertisementType() {\r\n\t\treturn \"jxta:AdvertisementTutorial\";\r\n\t}\r\n\r\n\t/**\r\n\t * Instantiator\r\n\t */\r\n\tpublic static class Instantiator implements\r\n\t\t\tAdvertisementFactory.Instantiator {\r\n\t\t/**\r\n\t\t * Returns the identifying type of this Advertisement.\r\n\t\t * \r\n\t\t * @return String the type of advertisement\r\n\t\t */\r\n\t\tpublic String getAdvertisementType() {\r\n\t\t\treturn AdvertisementTutorial.getAdvertisementType();\r\n\t\t}\r\n\r\n\t\t/**\r\n\t\t * Constructs an instance of <CODE>Advertisement</CODE> matching the\r\n\t\t * type specified by the <CODE>advertisementType</CODE> parameter.\r\n\t\t * \r\n\t\t * @return The instance of <CODE>Advertisement</CODE> or null if it\r\n\t\t * could not be created.\r\n\t\t */\r\n\t\tpublic Advertisement newInstance() {\r\n\t\t\treturn new AdvertisementTutorial();\r\n\t\t}\r\n\r\n\t\t/**\r\n\t\t * Constructs an instance of <CODE>Advertisement</CODE> matching the\r\n\t\t * type specified by the <CODE>advertisementType</CODE> parameter.\r\n\t\t * \r\n\t\t * @param root\r\n\t\t * Specifies a portion of a StructuredDocument which will be\r\n\t\t * converted into an Advertisement.\r\n\t\t * @return The instance of <CODE>Advertisement</CODE> or null if it\r\n\t\t * could not be created.\r\n\t\t */\r\n\t\tpublic Advertisement newInstance(net.jxta.document.Element root) {\r\n\t\t\treturn new AdvertisementTutorial(root);\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Main method\r\n\t * \r\n\t * @param args\r\n\t * command line arguments. None defined\r\n\t */\r\n\tpublic static void main(String args[]) {\r\n\t\t// The following step is required and only need to be done once,\r\n\t\t// without this step the AdvertisementFactory has no means of\r\n\t\t// associating an advertisement name space with the proper obect\r\n\t\t// in this cast the AdvertisementTutorial\r\n\t\tAdvertisementFactory.registerAdvertisementInstance(\r\n\t\t\t\tAdvertisementTutorial.getAdvertisementType(),\r\n\t\t\t\tnew AdvertisementTutorial.Instantiator());\r\n\t\tAdvertisementTutorial advTutorial = new AdvertisementTutorial();\r\n\t\tadvTutorial.setID(ID.nullID);\r\n\t\tadvTutorial.setName(\"AdvertisementTutorial\");\r\n\t\ttry {\r\n\t\t\tadvTutorial.setIP(InetAddress.getLocalHost().getHostAddress());\r\n\t\t} catch (UnknownHostException ignored) {\r\n\t\t\t//ignored\r\n\t\t}\r\n\t\tadvTutorial.setOSName(System.getProperty(\"os.name\"));\r\n\t\tadvTutorial.setOSVersion(System.getProperty(\"os.version\"));\r\n\t\tadvTutorial.setOSArch(System.getProperty(\"os.arch\"));\r\n\t\tadvTutorial.setHWArch(System.getProperty(\"HOSTTYPE\", System\r\n\t\t\t\t.getProperty(\"os.arch\")));\r\n\t\tadvTutorial.setHWVendor(System.getProperty(\"java.vm.vendor\"));\r\n\t\tSystem.out.println(advTutorial.toString());\r\n\t}\r\n}"
},
{
"alpha_fraction": 0.7831987738609314,
"alphanum_fraction": 0.7840339541435242,
"avg_line_length": 44.0512809753418,
"blob_id": "08f30e500fde381ba31d5fd2451cfbc464357864",
"content_id": "0dcf028080d6b60d69b767f36d6ca1b4482c17f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 14393,
"license_type": "no_license",
"max_line_length": 452,
"num_lines": 312,
"path": "/sistemas-colaboracion/plancode/app/src/dominio/control/ControladorPrincipal.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-1",
"text": "package dominio.control;\r\n\r\nimport java.awt.Color;\r\nimport java.util.Hashtable;\r\nimport java.util.LinkedList;\r\n\r\nimport javax.swing.ImageIcon;\r\n\r\nimport presentacion.JFLogin;\r\nimport presentacion.JFPrincipal;\r\n\r\nimport com.sun.media.jsdt.Channel;\r\nimport com.sun.media.jsdt.ConnectionException;\r\nimport com.sun.media.jsdt.Data;\r\nimport com.sun.media.jsdt.InvalidClientException;\r\nimport com.sun.media.jsdt.InvalidURLException;\r\nimport com.sun.media.jsdt.NameInUseException;\r\nimport com.sun.media.jsdt.NoRegistryException;\r\nimport com.sun.media.jsdt.NoSuchByteArrayException;\r\nimport com.sun.media.jsdt.NoSuchChannelException;\r\nimport com.sun.media.jsdt.NoSuchClientException;\r\nimport com.sun.media.jsdt.NoSuchConsumerException;\r\nimport com.sun.media.jsdt.NoSuchHostException;\r\nimport com.sun.media.jsdt.NoSuchSessionException;\r\nimport com.sun.media.jsdt.NoSuchTokenException;\r\nimport com.sun.media.jsdt.NotBoundException;\r\nimport com.sun.media.jsdt.PermissionDeniedException;\r\nimport com.sun.media.jsdt.PortInUseException;\r\nimport com.sun.media.jsdt.RegistryExistsException;\r\nimport com.sun.media.jsdt.RegistryFactory;\r\nimport com.sun.media.jsdt.Session;\r\nimport com.sun.media.jsdt.SessionFactory;\r\nimport com.sun.media.jsdt.TimedOutException;\r\nimport com.sun.media.jsdt.URLString;\r\nimport com.sun.media.jsdt.event.ChannelEvent;\r\nimport com.sun.media.jsdt.event.ChannelListener;\r\nimport comunicaciones.ClienteJSDT;\r\nimport comunicaciones.ConsumidorCanalChat;\r\nimport comunicaciones.ConsumidorCanalGestionListaUsuarios;\r\nimport comunicaciones.ConsumidorCanalGestionRol;\r\nimport comunicaciones.ConsumidorCanalMapa;\r\nimport comunicaciones.ConsumidorCanalTrazos;\r\nimport comunicaciones.DatosConexion;\r\nimport comunicaciones.ICanales;\r\nimport comunicaciones.ISesion;\r\nimport comunicaciones.EventosCanales.MensajeRolEvent;\r\nimport comunicaciones.EventosCanales.MensajeRolListener;\r\n\r\nimport dominio.conocimiento.InfoTrazo;\r\nimport dominio.conocimiento.Roles;\r\nimport dominio.conocimiento.Trazo;\r\nimport dominio.conocimiento.Usuario;\r\nimport excepciones.NoSlotsDisponiblesException;\r\n\r\npublic class ControladorPrincipal implements ICanales, ISesion {\r\n\tprivate JFLogin ventanaLogin;\r\n\tprivate JFPrincipal ventanaPrincipal;\r\n\t\r\n\tprivate boolean esServidor = false;\r\n\tprivate Hashtable<String,Usuario> listaUsuarios;\r\n\t\r\n\tprivate DatosConexion con;\r\n\tprivate URLString url;\r\n\tprivate Session sesion;\r\n\tprivate ClienteJSDT cliente;\r\n\tprivate Channel canalChat;\r\n\tprivate Channel canalDibujo;\r\n\t/* Estos canales se utilizan para intercambiar datos entre clientes para gestionar sus roles y el panel de sesiones\r\n\t * Se usan estos canales para no mostrar estos datos por el chat ni mezclarlos con otros canales\r\n\t * Ademas, se necesitan usar dos, porque cada canl debe tener su listener, ya que si no, no se muestran bien los datos\r\n\t */\t\r\n\tprivate Channel canalGestionRol;\r\n\tprivate Channel canalGestionListaUsuarios;\r\n\tprivate Channel canalMapa;\r\n\tprivate ConsumidorCanalChat consumidorChat;\r\n\tprivate ConsumidorCanalGestionRol consumidorGestionRol;\r\n\tprivate ConsumidorCanalGestionListaUsuarios consumidorGestionListaUsuarios;\r\n\tprivate ConsumidorCanalTrazos consumidorTrazos;\r\n\tprivate ConsumidorCanalMapa consumidorMapa;\r\n\t\r\n\tpublic ControladorPrincipal () {\r\n\t\tventanaLogin = new JFLogin(this);\r\n\t\tlistaUsuarios = new Hashtable<String,Usuario>();\r\n\t}\r\n\t\r\n\tpublic void mostrarVentanaLogin() {\r\n\t\tventanaLogin.mostrarVentana();\r\n\t}\r\n\r\n\tpublic void mostrarVentanaPrincipal() {\r\n\t\tventanaPrincipal.mostrarVentana();\r\n\t}\r\n\r\n\tpublic void iniciarSesion(String host, int puerto, String nick, Roles rol, boolean sesionExistente) throws NoRegistryException, RegistryExistsException, ConnectionException, InvalidClientException, InvalidURLException, NameInUseException, NoSuchClientException, NoSuchHostException, NoSuchSessionException, PermissionDeniedException, PortInUseException, TimedOutException, NoSuchChannelException, NoSuchConsumerException, NoSlotsDisponiblesException {\r\n\t\tcon = new DatosConexion (host, puerto);\r\n\t\t// 1. Si no está el Registry funcionando, ponerlo en funcionamiento\r\n\t\tif (RegistryFactory.registryExists(TIPO_SESION) == false) {\t\r\n\t\t\tRegistryFactory.startRegistry(TIPO_SESION);\r\n\t\t\tesServidor = true;\r\n\t\t}\r\n\t\t// 2. Crear un cliente\r\n\t\tcliente = new ClienteJSDT(nick, rol);\r\n\t\t// Como este cliente actua como servidor, se auto-asigna un color, ademas de inicializar los colores disponibles\r\n\t\tif (esServidor) {\r\n\t\t\tGestorColores.inicializaColores();\r\n\t\t\tColor c = GestorColores.getColorLibre();\r\n\t\t\t// Se añade en la lista de usuarios conectados\r\n\t\t\tlistaUsuarios.put(cliente.getName(), new Usuario(rol, c));\r\n\t\t}\r\n\t\t\r\n\t\t// 3. Crear la sesión\r\n\t\tcrearSesion ();\r\n\t\t// 4. Crear los canales y poner el cliente como consumidor\r\n\t\tcrearCanales ();\r\n\t\tponerConsumidores();\r\n\t\t\r\n\t\t// Cerramos la ventana de login y abrimos la ventana principal\r\n\t\tventanaLogin.cerrarVentana();\r\n\t\tventanaPrincipal = new JFPrincipal (this);\r\n\t\tventanaPrincipal.mostrarVentana();\r\n\t\t\r\n\t\t// Tras poner a los lcientes como consumidores, se añaden los eventos al canal del chat\r\n\t\tcanalChat.addChannelListener(new ChannelListener() {\r\n\t\t\t// Pasamos a la interfaz gráfica el nick del cliente que se acaba de unir al canal del chat\r\n\t\t\tpublic void channelJoined(ChannelEvent e) {\r\n\t\t\t\tventanaPrincipal.iniciarSesion(e.getClientName());\r\n\t\t\t}\r\n\r\n\t\t\tpublic void channelConsumerAdded(ChannelEvent arg0) {\t\t\t\t\r\n\t\t\t}\r\n\r\n\t\t\tpublic void channelConsumerRemoved(ChannelEvent arg0) {\t\t\t\t\r\n\t\t\t}\r\n\r\n\t\t\tpublic void channelExpelled(ChannelEvent arg0) {\t\t\t\t\r\n\t\t\t}\r\n\r\n\t\t\tpublic void channelInvited(ChannelEvent arg0) {\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\t// Pasamos a la interfaz gráfica el nick del cliente que acaba de dejar el canal del chat\r\n\t\t\t// Liberamos también su color\r\n\t\t\tpublic void channelLeft(ChannelEvent e) {\r\n\t\t\t\tif (isServidor()) {\r\n\t\t\t\t\tGestorColores.liberarColor(listaUsuarios.get(e.getClientName()).getColor());\r\n\t\t\t\t\tlistaUsuarios.remove(e.getClientName());\r\n\t\t\t\t}\r\n\t\t\t\tif (!isServidor() || (isServidor() && listaUsuarios.size() > 0 )) {\r\n\t\t\t\t\tventanaPrincipal.notificarLogout(e.getClientName());\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t});\r\n\t\t\r\n\t\t// Añadimos el evento para poder recibir el mensaje de rol de otros clientes que se conectan\r\n\t\tif (esServidor) {\r\n\t\t\tconsumidorGestionRol.addMensajeRolRecibidoListener(new MensajeRolListener() {\r\n\t\t\t\tpublic void MensajeRolRecibido(MensajeRolEvent evt) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException, NoSlotsDisponiblesException, NoSuchConsumerException {\r\n\t\t\t\t\t/* El mensaje del rol lo gestiona el servidor para saber que se ha conectado un\r\n\t\t\t\t\t * nuevo cliente y enviar la lista de usuarios al resto de clientes conectados.\r\n\t\t\t\t\t * Además, le asigna un color.\r\n\t\t\t\t\t */\r\n\t\t\t\t\tColor c = GestorColores.getColorLibre();\r\n\t\t\t\t\tlistaUsuarios.put(evt.getNombre(), new Usuario(evt.getRol(), c));\r\n\t\t\t\t\tcanalGestionListaUsuarios.sendToAll(cliente, new Data(listaUsuarios));\r\n\t\t\t\t\t// Se envía también el mapa cargado (si lo hay)\r\n\t\t\t\t\tif (ventanaPrincipal.getMapa()!=null) {\r\n\t\t\t\t\t\tenviarMapaRecienConectado(evt.getNombre(), ventanaPrincipal.getMapa());\r\n\t\t\t\t\t}\r\n\t\t\t\t\t// Se espera un pequeño tiempo para que reciba el mapa y sobre él se carguen los trazos (si los hay)\r\n\t\t\t\t\ttry {\r\n\t\t\t\t\t\tThread.sleep(600);\r\n\t\t\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\t\t}\r\n\t\t\t\t\t// Envía los trazos ya dibujados al nuevo cliente\r\n\t\t\t\t\tif (!ventanaPrincipal.getCanvas().getTrazos().isEmpty()) {\r\n\t\t\t\t\t\tenviarTrazosRecienConectado(evt.getNombre(), ventanaPrincipal.getCanvas().getTrazos());\r\n\t\t\t\t\t}\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t\t}\r\n\t\t\r\n\t\t/* Al conectarse a la aplicación, el cliente envía al resto su rol (si no es el servidor)\r\n\t\t * El servidor tomará ese mensaje y lo incluirá en la lista de usuarios conectados\r\n\t\t * Hay que enviar el mensaje a todos porque el cliente que se conecta no conoce el \r\n\t\t * nombre del servidor\r\n\t\t */\r\n\t\tif (!esServidor) {\r\n\t\t\tcanalGestionRol.sendToOthers(cliente, new Data(cliente.getRol().name()));\r\n\t\t}\r\n\t\t\r\n\t}\r\n\t\r\n\tprivate void crearSesion () throws ConnectionException, InvalidClientException, InvalidURLException, NameInUseException, NoRegistryException, NoSuchClientException, NoSuchHostException, NoSuchSessionException, PermissionDeniedException, PortInUseException, TimedOutException {\r\n\t\turl = URLString.createSessionURL(con.getIp(), con.getPuerto(), TIPO_SESION, SESION);\r\n\t\tsesion = SessionFactory.createSession(cliente, url, true);\r\n\t}\r\n\t\r\n\tprivate void crearCanales () throws ConnectionException, InvalidClientException, NameInUseException, NoSuchSessionException, NoSuchClientException, NoSuchHostException, PermissionDeniedException, TimedOutException, NoSuchChannelException {\r\n\t\t// El último parámetro indica un join implícito\r\n\t\tcanalChat = sesion.createChannel(cliente, CANAL_CHAT, true, true, true);\r\n\t\tcanalDibujo = sesion.createChannel(cliente, CANAL_DIBUJO, true, true, true);\r\n\t\tcanalGestionRol = sesion.createChannel(cliente, CANAL_GESTION_ROL, true, true, true);\r\n\t\tcanalGestionListaUsuarios = sesion.createChannel(cliente, CANAL_GESTION_LISTA, true, true, true);\r\n\t\tcanalMapa = sesion.createChannel(cliente, CANAL_MAPA, true, true, true);\r\n\t}\r\n\t\r\n\tprivate void ponerConsumidores () throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchConsumerException, NoSuchSessionException, PermissionDeniedException, TimedOutException {\t\t\t\r\n\t\tconsumidorGestionRol = new ConsumidorCanalGestionRol ();\r\n\t\tcanalGestionRol.addConsumer(cliente, consumidorGestionRol);\r\n\t\tconsumidorGestionListaUsuarios = new ConsumidorCanalGestionListaUsuarios();\r\n\t\tcanalGestionListaUsuarios.addConsumer(cliente, consumidorGestionListaUsuarios);\r\n\t\tconsumidorTrazos = new ConsumidorCanalTrazos();\r\n\t\tcanalDibujo.addConsumer(cliente, consumidorTrazos);\r\n\t\tconsumidorMapa = new ConsumidorCanalMapa();\r\n\t\tcanalMapa.addConsumer(cliente, consumidorMapa);\r\n\t\tconsumidorChat = new ConsumidorCanalChat ();\r\n\t\tcanalChat.addConsumer(cliente, consumidorChat);\t\r\n\t}\r\n\t\r\n\tprivate void quitarConsumidoresYCanales () throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchConsumerException, NoSuchSessionException, PermissionDeniedException, TimedOutException, NoSuchByteArrayException, NoSuchTokenException {\r\n\t\tcanalChat.removeConsumer(cliente, consumidorChat);\r\n\t\tcanalChat.destroy(cliente);\r\n\t\tcanalGestionRol.removeConsumer(cliente, consumidorGestionRol);\r\n\t\tcanalGestionRol.destroy(cliente);\r\n\t\tcanalGestionListaUsuarios.removeConsumer(cliente, consumidorGestionListaUsuarios);\r\n\t\tcanalGestionListaUsuarios.destroy(cliente);\r\n\t\tcanalDibujo.removeConsumer(cliente, consumidorTrazos);\r\n\t\tcanalDibujo.destroy(cliente);\r\n\t\tcanalMapa.removeConsumer(cliente, consumidorMapa);\r\n\t\tcanalMapa.destroy(cliente);\r\n\t}\r\n\t\r\n\t\r\n\tpublic void enviarMensajeChat (String mensaje) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException {\r\n\t\tcanalChat.sendToAll(cliente, new Data (mensaje));\r\n\t}\r\n\t\r\n\tpublic void enviarTrazo (InfoTrazo info) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException {\r\n\t\tcanalDibujo.sendToOthers(cliente, new Data(info));\r\n\t\t\r\n\t}\r\n\t\r\n\tpublic void ponerMensajeLogLocal(InfoTrazo info) {\r\n\t\tthis.ventanaPrincipal.ponerMensajeLog(cliente.getName(), info);\r\n\t}\r\n\t\r\n\t// Este método es para que el servidor envíe los trazos ya dibujados al cliente que se acaba de conectar \r\n\tpublic void enviarTrazosRecienConectado(String clienteDestino, LinkedList<Trazo> trazos) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchConsumerException, NoSuchSessionException, PermissionDeniedException, TimedOutException {\r\n\t\tInfoTrazo info = new InfoTrazo((LinkedList<Trazo>)trazos.clone());\r\n\t\tcanalDibujo.sendToClient(cliente, clienteDestino, new Data(info));\r\n\t}\r\n\t\r\n\tpublic void enviarTrazosClean() throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException {\r\n\t\tInfoTrazo info = new InfoTrazo();\r\n\t\tcanalDibujo.sendToOthers(cliente, new Data(info));\r\n\t}\r\n\t\r\n\tpublic void enviarMapa(ImageIcon mapa) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchSessionException, PermissionDeniedException, TimedOutException {\r\n\t\tcanalMapa.sendToOthers(cliente, new Data(mapa));\r\n\t}\r\n\t\r\n\t// Este método es para que el servidor envíe el mapa al cliente que se acaba de conectar \r\n\tpublic void enviarMapaRecienConectado(String clienteDestino, ImageIcon mapa) throws ConnectionException, InvalidClientException, NoSuchChannelException, NoSuchClientException, NoSuchConsumerException, NoSuchSessionException, PermissionDeniedException, TimedOutException {\r\n\t\tcanalMapa.sendToClient(cliente, clienteDestino, new Data(mapa));\r\n\t}\r\n\t\r\n\tpublic Hashtable<String,Usuario> getListaUsuarios() {\r\n\t\treturn listaUsuarios;\r\n\t}\r\n\r\n\tpublic ConsumidorCanalChat getConsumidorCanalChat() {\r\n\t\treturn consumidorChat;\r\n\t}\r\n\t\r\n\tpublic ConsumidorCanalGestionRol getConsumidorGestionRol() {\r\n\t\treturn consumidorGestionRol;\r\n\t}\r\n\t\r\n\tpublic ConsumidorCanalGestionListaUsuarios getConsumidorGestionListaUsuarios() {\r\n\t\treturn consumidorGestionListaUsuarios;\r\n\t}\r\n\r\n\tpublic boolean isServidor() {\r\n\t\treturn esServidor;\r\n\t}\r\n\t\r\n\tpublic String getNombreCliente () {\r\n\t\treturn cliente.getName();\r\n\t}\r\n\r\n\tpublic ConsumidorCanalTrazos getConsumidorTrazos() {\r\n\t\treturn consumidorTrazos;\r\n\t}\r\n\r\n\tpublic ConsumidorCanalMapa getConsumidorMapa() {\r\n\t\treturn consumidorMapa;\r\n\t}\r\n\r\n\tpublic Channel getCanalGestionListaUsuarios() {\r\n\t\treturn canalGestionListaUsuarios;\r\n\t}\r\n\r\n\tpublic ClienteJSDT getCliente() {\r\n\t\treturn cliente;\r\n\t}\r\n\r\n\tpublic void forzarCierre() throws NoRegistryException, ConnectionException, InvalidClientException, InvalidURLException, NoSuchClientException, NoSuchHostException, NoSuchSessionException, NotBoundException, PermissionDeniedException, TimedOutException, NoSuchChannelException, NoSuchConsumerException, NoSuchByteArrayException, NoSuchTokenException {\r\n\t\tquitarConsumidoresYCanales();\r\n\t\tSessionFactory.destroySession(cliente, url);\r\n\t}\r\n}\r\n"
},
{
"alpha_fraction": 0.636623740196228,
"alphanum_fraction": 0.6416308879852295,
"avg_line_length": 39.117645263671875,
"blob_id": "bd30b604ab6b9c96b1eb8d511a4cc544cfae62ea",
"content_id": "76b18b8895a127d53d25567708685142c30b8084",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1398,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 34,
"path": "/sistemas-colaboracion/p2p-jxta/doc/HelloWorld.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "import java.text.MessageFormat;\r\n\r\nimport net.jxta.peergroup.PeerGroup;\r\nimport net.jxta.platform.NetworkManager;\r\nimport net.jxta.platform.NetworkManager.ConfigMode;\r\n\r\npublic class HelloWorld {\r\n \r\n public static void main(String args[]) throws Exception {\r\n System.out.println(\"Starting JXTA ....\");\r\n \r\n // Create, and Start the default jxta NetPeerGroup\r\n NetworkManager manager = new NetworkManager(ConfigMode.EDGE, \"HelloWorld\");\r\n \r\n // Connect to network (default group)\r\n manager.startNetwork();\r\n\r\n // Some info about the default group and the created peer\r\n PeerGroup peerGroup = manager.getNetPeerGroup();\r\n System.out.println(\"Hello from JXTA group \" + peerGroup.getPeerGroupName());\r\n System.out.println(\" Group ID = \" + peerGroup.getPeerGroupID().toString());\r\n System.out.println(\" Peer name = \" + peerGroup.getPeerName());\r\n System.out.println(\" Peer ID = \" + peerGroup.getPeerID().toString());\r\n \r\n System.out.println(\"Waiting for a rendezvous connection\");\r\n boolean connected = manager.waitForRendezvousConnection(120000);\r\n System.out.println(MessageFormat.format(\"Connected :{0}\", connected));\r\n \r\n System.out.println(\"Stopping JXTA\");\r\n manager.stopNetwork();\r\n \r\n System.out.println(\"Good Bye ....\");\r\n }\r\n}\r\n"
},
{
"alpha_fraction": 0.791208803653717,
"alphanum_fraction": 0.791208803653717,
"avg_line_length": 16.200000762939453,
"blob_id": "8c862c826d8c8e1dd7bc5a301cfa156ce5685e0b",
"content_id": "6255efb7d1feb87f66cdd1d6613a06cda003d096",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 5,
"path": "/sistemas-colaboracion/plancode/app/src/excepciones/NoSlotsDisponiblesException.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package excepciones;\r\n\r\npublic class NoSlotsDisponiblesException extends Exception {\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.7832512259483337,
"alphanum_fraction": 0.7832512259483337,
"avg_line_length": 18.299999237060547,
"blob_id": "d5ca06f44e396630c2bfa2c72d47fd56025c96a1",
"content_id": "994210f2bb857f9dd3900f5450c40fc73ecc9a00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 203,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 10,
"path": "/sistemas-colaboracion/plancode/app/src/comunicaciones/EventosCanales/MensajeTrazoListener.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "UTF-8",
"text": "package comunicaciones.EventosCanales;\r\n\r\nimport java.util.EventListener;\r\n\r\npublic interface MensajeTrazoListener extends EventListener {\r\n\t\t\r\n\t\tpublic void MensajeTrazo(MensajeTrazoEvent evt);\r\n\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.6968724727630615,
"alphanum_fraction": 0.7103906273841858,
"avg_line_length": 35.88309860229492,
"blob_id": "33ceadad87db51f6f6d0a04a39ebfbcc22ea96dd",
"content_id": "618bf371c4d2871b5612beec3e44d953c4a51684",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 26220,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 710,
"path": "/sistemas-colaboracion/plancode/app/src/presentacion/JFPrincipal.java",
"repo_name": "txedo/isi-practicas-5",
"src_encoding": "ISO-8859-1",
"text": "package presentacion;\n\nimport java.awt.Color;\nimport java.awt.GridLayout;\nimport java.awt.event.ActionEvent;\nimport java.awt.event.ActionListener;\nimport java.awt.event.WindowAdapter;\nimport java.awt.event.WindowEvent;\nimport java.io.File;\nimport java.net.MalformedURLException;\nimport java.net.URL;\nimport java.util.Enumeration;\nimport java.util.Hashtable;\n\nimport javax.swing.BorderFactory;\nimport javax.swing.ImageIcon;\nimport javax.swing.JButton;\nimport javax.swing.JFileChooser;\nimport javax.swing.JLabel;\nimport javax.swing.JMenu;\nimport javax.swing.JMenuBar;\nimport javax.swing.JMenuItem;\nimport javax.swing.JPanel;\nimport javax.swing.JScrollPane;\nimport javax.swing.JSeparator;\nimport javax.swing.JTabbedPane;\nimport javax.swing.JTextArea;\nimport javax.swing.JTextField;\nimport javax.swing.JToolBar;\nimport javax.swing.WindowConstants;\n\nimport presentacion.auxiliares.Dialogos;\nimport presentacion.auxiliares.handlerImagenFondoPanel;\nimport presentacion.auxiliares.panelConImagenFondo;\n\nimport com.cloudgarden.layout.AnchorConstraint;\nimport com.sun.media.jsdt.ConnectionException;\nimport com.sun.media.jsdt.Data;\nimport com.sun.media.jsdt.InvalidClientException;\nimport com.sun.media.jsdt.InvalidURLException;\nimport com.sun.media.jsdt.NoRegistryException;\nimport com.sun.media.jsdt.NoSuchByteArrayException;\nimport com.sun.media.jsdt.NoSuchChannelException;\nimport com.sun.media.jsdt.NoSuchClientException;\nimport com.sun.media.jsdt.NoSuchConsumerException;\nimport com.sun.media.jsdt.NoSuchHostException;\nimport com.sun.media.jsdt.NoSuchSessionException;\nimport com.sun.media.jsdt.NoSuchTokenException;\nimport com.sun.media.jsdt.NotBoundException;\nimport com.sun.media.jsdt.PermissionDeniedException;\nimport com.sun.media.jsdt.TimedOutException;\nimport comunicaciones.EventosCanales.MensajeChatRecibidoEvent;\nimport comunicaciones.EventosCanales.MensajeChatRecibidoListener;\nimport comunicaciones.EventosCanales.MensajeListaUsuariosEvent;\nimport comunicaciones.EventosCanales.MensajeListaUsuariosListener;\nimport comunicaciones.EventosCanales.MensajeMapaEvent;\nimport comunicaciones.EventosCanales.MensajeMapaListener;\nimport comunicaciones.EventosCanales.MensajeTrazoEvent;\nimport comunicaciones.EventosCanales.MensajeTrazoListener;\n\nimport dominio.conocimiento.InfoTrazo;\nimport dominio.conocimiento.Roles;\nimport dominio.conocimiento.Usuario;\nimport dominio.control.ControladorPrincipal;\nimport info.clearthought.layout.TableLayout;\n\n/**\n* This code was edited or generated using CloudGarden's Jigloo\n* SWT/Swing GUI Builder, which is free for non-commercial\n* use. If Jigloo is being used commercially (ie, by a corporation,\n* company or business for any purpose whatever) then you\n* should purchase a license for each developer using Jigloo.\n* Please visit www.cloudgarden.com for details.\n* Use of Jigloo implies acceptance of these licensing terms.\n* A COMMERCIAL LICENSE HAS NOT BEEN PURCHASED FOR\n* THIS MACHINE, SO JIGLOO OR THIS CODE CANNOT BE USED\n* LEGALLY FOR ANY CORPORATE OR COMMERCIAL PURPOSE.\n*/\npublic class JFPrincipal extends javax.swing.JFrame {\n\n\t{\n\t\t//Set Look & Feel\n\t\ttry {\n\t\t\tjavax.swing.UIManager.setLookAndFeel(\"com.sun.java.swing.plaf.windows.WindowsLookAndFeel\");\n\t\t} catch(Exception e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", e.getMessage());\n\t\t}\n\t}\n\n\n\tprivate static final long serialVersionUID = -3368293739950370869L;\n\tprivate ControladorPrincipal controlador;\n\tprivate JPanel jPnlToolBoox;\n\tprivate JPanel jPnlUsuarios;\n\tprivate JPanel panelPaint;\t\n\tprivate JButton jbtnClear;\n\tprivate JScrollPane jScrollPane3;\n\tprivate panelConImagenFondo jPanelFondo;\n\tprivate JButton btnCargarMapa;\n\tprivate JButton btnEnviar;\n\tprivate JTextField txtMensaje;\n\tprivate JMenuItem jmiAcercaDe;\n\tprivate JMenu jMenu2;\n\tprivate JTextArea taChat;\n\tprivate JTextArea taLog;\n\tprivate JScrollPane jScrollPane1;\n\tprivate JScrollPane jScrollPane2;\n\tprivate JPanel jpLog;\n\tprivate JPanel jpChat;\n\tprivate JTabbedPane jTabbedPane;\n\tprivate JLabel lblStatusBar;\n\tprivate JToolBar jToolBar;\n\tprivate JMenuItem jmiExit;\n\tprivate JSeparator jSeparator1;\n\tprivate JMenuItem jmiOpenImage;\n\tprivate JMenu jMenu1;\n\tprivate JMenuBar jMenuBar;\n\tprivate JPanel jPanel1;\n\tprivate JButton jButton1;\n\tprivate JButton Dibujar;\n\tprivate CanvasPaint canvasPaint;\n\tprivate JFileChooser fc;\n\t\n\tprivate Color colorCliente = null;\n\tprivate ImageIcon mapa = null;\n\n\tpublic JFPrincipal(ControladorPrincipal c) {\n\t\tsuper();\n\t\tcontrolador = c;\n\t\tinitGUI();\n\t\t\n\t\tlblStatusBar.setText(\"Sesión iniciada como \" + controlador.getNombreCliente() + \"@\" + controlador.getCliente().getRol());\n\t\t// Para el caso del servidor, como es el primer cliente que se conecta y no recibe el evento del canal\n\t\t// la primera vez, se solicia al controlador la lista de usuarios (que sólo lo contendrá a él)\n\t\tif (c.isServidor()) {\n\t\t\t// Establecemos el color\n\t\t\tsetColorActual(controlador.getListaUsuarios());\n\t\t\tactualizarListaUsuarios(c.getListaUsuarios());\n\t\t}\n\t\t\n\t\t// Ponemos el listener a los consumidores del chat para poder recibir los mensajes de chat\n\t\tc.getConsumidorCanalChat().addMensajeChatRecibidoListener(new MensajeChatRecibidoListener() {\n\t\t\tpublic void MensajeChatRecibido(MensajeChatRecibidoEvent evt) {\n\t\t\t\t// Ponemos el mensaje en el chat, coloreandolo con el color del usuario que lo envia\n\t\t\t\tponerMensajeChat(evt);\n\t\t\t}\n\t\t});\n\t\t\n\t\t// Ponemos el listener a los consumidores del canal de gestión para poder recibir la lista de usuarios conectados\n\t\tc.getConsumidorGestionListaUsuarios().addMensajeListaUsuariosListener(new MensajeListaUsuariosListener() {\n\t\t\tpublic void MensajeListaUsuarios(MensajeListaUsuariosEvent evt) {\n\t\t\t\tactualizarListaUsuarios(evt.getLista());\n\t\t\t\tsetColorActual(evt.getLista());\n\t\t\t}\n\t\t});\n\t\t\n\t\tc.getConsumidorTrazos().addMensajeTrazoListener(new MensajeTrazoListener() {\n\t\t\tpublic void MensajeTrazo(MensajeTrazoEvent evt) {\n\t\t\t\tcanvasPaint.setTrazos(evt.getInfo());\n\t\t\t\tcanvasPaint.revalidate();\n\t\t\t\tcanvasPaint.repaint();\n\t\t\t\tponerMensajeLog(evt.getNombreCliente(), evt.getInfo());\n\t\t\t}\n\t\t});\n\t\t\n\t\tc.getConsumidorMapa().addMensajeMapaListener(new MensajeMapaListener() {\n\t\t\tpublic void MensajeMapa(MensajeMapaEvent evt) {\n\t\t\t\tsetMapaRecibido(evt.getMapa());\t\n\t\t \t// Se actualiza el log\n\t\t \ttaLog.append(evt.getNombreCliente() + \" ha cargado una imagen\\n\");\n\t\t \ttaLog.setCaretPosition(taLog.getDocument().getLength());\n\t\t\t}\n\n\t\t});\n\t}\n\t\n\tprivate void setMapaRecibido(ImageIcon mapa) {\n\n\t\t// Se crea un panel para poner el mapa recibido de fondo\n\t\tjPanelFondo = new panelConImagenFondo();\n\t\tjPanelFondo.setMapaFondoRecibido(mapa);\n\t\t// Se añade ese panel y el canvas al área de trabajo\n\t\tinitAreaTrabajo();\n\t}\n\t\n\tprivate void setMapaLocal(URL urlMapa) {\n\t\t// Se crea un panel para poner el mapa recibido de fondo\n\t\tjPanelFondo = new panelConImagenFondo();\n\t\tjPanelFondo.setMapaFondoLocal(urlMapa);\n\t\t// Se añade ese panel y el canvas al área de trabajo\n\t\tinitAreaTrabajo();\n\t}\n\t\n\tprivate void initAreaTrabajo() {\n\t\t// Al establecer un nuevo mapa, se elimina el panel con el mapa que hubiese cargado (si había alguno)\n\t\tpanelPaint.removeAll();\n\t\tjPanelFondo.setLayout(null);\n\t\tjPanelFondo.setBounds(6, 20, 591, 302);\n\t\t// Se limpia el canvas al cambiar el mapa, porque solo existe una instancia de él\n\t\tcanvasPaint.clear();\n \tpanelPaint.add(canvasPaint);\n \tpanelPaint.add(jPanelFondo);\n\t\tjPanelFondo.repaint();\n \tpanelPaint.revalidate();\n \t// Se actualiza el log\n \ttaLog.append(controlador.getNombreCliente() + \" ha cargado una imagen\\n\");\n \ttaLog.setCaretPosition(taLog.getDocument().getLength());\n\t}\n\t\n\tprivate void ponerMensajeChat(MensajeChatRecibidoEvent evt) {\n\t\ttaChat.append(evt.getNombre() + \"> \" + evt.getMensaje() + \"\\n\");\n\t\ttaChat.setCaretPosition(taChat.getDocument().getLength());\n\t}\n\t\n\tpublic void ponerMensajeLog(String nombreCliente, InfoTrazo info) {\n\t\tif (info.isClear()) {\n\t\t\ttaLog.append(nombreCliente + \" ha limpiado todos los trazos\\n\");\n\t\t\ttaLog.setCaretPosition(taLog.getDocument().getLength());\n\t\t}\n\t\tif (info.isDibujando()) {\n\t\t\tif (!info.isTerminado()) {\n\t\t\t\ttaLog.append(nombreCliente + \" está dibujando un trazo\\n\");\n\t\t\t\ttaLog.setCaretPosition(taLog.getDocument().getLength());\n\t\t\t}\n\t\t\telse {\n\t\t\t\ttaLog.append(nombreCliente + \" ha terminado de dibujar un trazo\\n\");\n\t\t\t\ttaLog.setCaretPosition(taLog.getDocument().getLength());\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\ttaLog.append(nombreCliente + \" ha eliminado un trazo\\n\");\n\t\t\ttaLog.setCaretPosition(taLog.getDocument().getLength());\n\t\t}\n\t}\n\t\n\tprivate void initGUI() {\n\t\ttry {\n\t\t\tsetDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE);\n\t\t\tthis.setMinimumSize(new java.awt.Dimension(875, 608));\n\t\t\tthis.setResizable(false);\n\t\t\tsetLocationRelativeTo(null);\n\t\t\tGridLayout thisLayout = new GridLayout(1, 1);\n\t\t\tthisLayout.setHgap(5);\n\t\t\tthisLayout.setVgap(5);\n\t\t\tthisLayout.setColumns(1);\n\t\t\tgetContentPane().setLayout(thisLayout);\n\t\t\tthis.setTitle(\"PlanCoDE\");\n\t\t\tthis.addWindowListener(new WindowAdapter() {\n\t\t\t\tpublic void windowClosing(WindowEvent evt) {\n\t\t\t\t\tthisWindowClosing(evt);\n\t\t\t\t}\n\t\t\t});\n\t\t\t{\n\t\t\t\tjPanel1 = new JPanel();\n\t\t\t\tgetContentPane().add(jPanel1);\n\t\t\t\tjPanel1.setLayout(null);\n\t\t\t\tjPanel1.setMaximumSize(new java.awt.Dimension(2123123892, 231236100));\n\t\t\t\tjPanel1.setMinimumSize(new java.awt.Dimension(10, 10));\n\t\t\t\tjPanel1.setPreferredSize(new java.awt.Dimension(867, 555));\n\t\t\t\t{\n\t\t\t\t\tjPnlToolBoox = new JPanel();\n\t\t\t\t\tGridLayout jPnlToolBooxLayout = new GridLayout(6, 1);\n\t\t\t\t\tjPnlToolBooxLayout.setHgap(5);\n\t\t\t\t\tjPnlToolBooxLayout.setVgap(5);\n\t\t\t\t\tjPnlToolBooxLayout.setColumns(1);\n\t\t\t\t\tjPnlToolBooxLayout.setRows(6);\n\t\t\t\t\tjPnlToolBoox.setLayout(jPnlToolBooxLayout);\n\t\t\t\t\tjPanel1.add(jPnlToolBoox, new AnchorConstraint(25, 222, 634, 6, AnchorConstraint.ANCHOR_ABS, AnchorConstraint.ANCHOR_REL, AnchorConstraint.ANCHOR_REL, AnchorConstraint.ANCHOR_ABS));\n\t\t\t\t\tjPnlToolBoox.setBorder(BorderFactory.createTitledBorder(\"Toolbox\"));\n\t\t\t\t\tjPnlToolBoox.setBounds(10, 49, 72, 329);\n\t\t\t\t\t{\n\t\t\t\t\t\tjButton1 = new JButton();\n\t\t\t\t\t\tjPnlToolBoox.add(jButton1);\n\t\t\t\t\t\tjButton1.setIcon(new ImageIcon(getClass().getClassLoader().getResource(\"icons/eraser.png\")));\n\t\t\t\t\t\tjButton1.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\tjButton1ActionPerformed(evt);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\t{\n\t\t\t\t\t\tDibujar = new JButton();\n\t\t\t\t\t\tjPnlToolBoox.add(Dibujar);\n\t\t\t\t\t\tDibujar.setBounds(16, 52, 42, 39);\n\t\t\t\t\t\tDibujar.setIcon(new ImageIcon(getClass().getClassLoader().getResource(\"icons/pencil.gif\")));\n\t\t\t\t\t\tDibujar.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\tDibujarActionPerformed(evt);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t{\n\t\t\t\t\tjScrollPane3 = new JScrollPane();\n\t\t\t\t\tjPanel1.add(jScrollPane3, new AnchorConstraint(25, 17, 632, 757, AnchorConstraint.ANCHOR_ABS, AnchorConstraint.ANCHOR_ABS, AnchorConstraint.ANCHOR_REL, AnchorConstraint.ANCHOR_REL));\n\t\t\t\t\tjScrollPane3.setBounds(697, 49, 171, 329);\n\t\t\t\t\tjScrollPane3.setBorder(BorderFactory.createTitledBorder(\"Usuarios\"));\n\t\t\t\t\t{\n\t\t\t\t\t\tjPnlUsuarios = new JPanel();\n\t\t\t\t\t\tjScrollPane3.setViewportView(jPnlUsuarios);\n\t\t\t\t\t\tTableLayout jPnlUsuariosLayout = new TableLayout(new double[][] {{0.4,0.02,0.7}, {0.2,0.02,0.2,0.02,0.2,0.02,0.2,0.02,0.2,0.02,0.2,0.02,0.2}});\n\t\t\t\t\t\tjPnlUsuariosLayout.setHGap(10);\n\t\t\t\t\t\tjPnlUsuariosLayout.setVGap(10);\n\t\t\t\t\t\tjPnlUsuarios.setLayout(jPnlUsuariosLayout);\n\t\t\t\t\t\tjPnlUsuarios.setBounds(697, 49, 160, 329);\n\t\t\t\t\t\t//jPnlUsuarios.setPreferredSize(new java.awt.Dimension(0, 0));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t{\n\t\t\t\t\t\n\t\t\t\t\tpanelPaint = new JPanel();\n\t\t\t\t\tjPanel1.add(panelPaint, new AnchorConstraint(25, 744, 634, 229, AnchorConstraint.ANCHOR_ABS, AnchorConstraint.ANCHOR_REL, AnchorConstraint.ANCHOR_REL, AnchorConstraint.ANCHOR_REL));\n\t\t\t\t\tpanelPaint.setBorder(BorderFactory.createTitledBorder(\"Área de trabajo\"));\n\t\t\t\t\tpanelPaint.setLayout(null);\n\t\t\t\t\tpanelPaint.setBounds(92, 49, 605, 329);\n\t\t\t\t\t{\n\t\t\t\t\t\tcanvasPaint = new CanvasPaint(controlador);\n\t\t\t\t\t\tcanvasPaint.setOpaque(false);\n\t\t\t\t\t\tpanelPaint.add(canvasPaint);\n\t\t\t\t\t\tcanvasPaint.setBounds(4, 17, 593, 305);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t{\n\t\t\t\t\tjToolBar = new JToolBar();\n\t\t\t\t\tjPanel1.add(jToolBar);\n\t\t\t\t\tjToolBar.setBounds(10, 6, 847, 43);\n\t\t\t\t\tjToolBar.setFloatable(false);\n\t\t\t\t\t{\n\t\t\t\t\t\tbtnCargarMapa = new JButton();\n\t\t\t\t\t\tjToolBar.add(btnCargarMapa);\n\t\t\t\t\t\tbtnCargarMapa.setBounds(1, 109, 66, 23);\n\t\t\t\t\t\tbtnCargarMapa.setIcon(new ImageIcon(getClass().getClassLoader().getResource(\"icons/open.png\")));\n\t\t\t\t\t\tbtnCargarMapa.setPreferredSize(new java.awt.Dimension(45, 38));\n\t\t\t\t\t\tbtnCargarMapa.setText(\"Cargar mapa\");\n\t\t\t\t\t\tbtnCargarMapa.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\tbtnCargarMapaActionPerformed(evt);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\t{\n\t\t\t\t\t\tjbtnClear = new JButton();\n\t\t\t\t\t\tjToolBar.add(jbtnClear);\n\t\t\t\t\t\tjbtnClear.setText(\"Limpiar trazos\");\n\t\t\t\t\t\tjbtnClear.setPreferredSize(new java.awt.Dimension(80, 29));\n\t\t\t\t\t\tjbtnClear.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\tjbtnClearActionPerformed(evt);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t{\n\t\t\t\t\tlblStatusBar = new JLabel();\n\t\t\t\t\tjPanel1.add(lblStatusBar);\n\t\t\t\t\tlblStatusBar.setBounds(10, 539, 848, 16);\n\t\t\t\t\tlblStatusBar.setText(\"texto de prueba...\");\n\t\t\t\t}\n\t\t\t\t{\n\t\t\t\t\tjTabbedPane = new JTabbedPane();\n\t\t\t\t\tjPanel1.add(jTabbedPane);\n\t\t\t\t\tjTabbedPane.setBounds(10, 384, 847, 149);\n\t\t\t\t\t{\n\t\t\t\t\t\tjpChat = new JPanel();\n\t\t\t\t\t\tjTabbedPane.addTab(\"Chat\", null, jpChat, null);\n\t\t\t\t\t\tjpChat.setLayout(null);\n\t\t\t\t\t\tjpChat.setPreferredSize(new java.awt.Dimension(825, 88));\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tjScrollPane1 = new JScrollPane();\n\t\t\t\t\t\t\tjpChat.add(jScrollPane1);\n\t\t\t\t\t\t\tjScrollPane1.setBounds(10, 11, 821, 74);\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\ttaChat = new JTextArea();\n\t\t\t\t\t\t\t\tjScrollPane1.setViewportView(taChat);\n\t\t\t\t\t\t\t\ttaChat.setEditable(false);\n\t\t\t\t\t\t\t\ttaChat.setFocusable(false);\n\t\t\t\t\t\t\t\ttaChat.setForeground(colorCliente);\n\t\t\t\t\t\t\t\t//taChat.setPreferredSize(new java.awt.Dimension(280, 76));\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttxtMensaje = new JTextField();\n\t\t\t\t\t\t\tjpChat.add(txtMensaje);\n\t\t\t\t\t\t\ttxtMensaje.setBounds(10, 97, 283, 20);\n\t\t\t\t\t\t}\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tbtnEnviar = new JButton();\n\t\t\t\t\t\t\tjpChat.add(btnEnviar);\n\t\t\t\t\t\t\tbtnEnviar.setText(\"Enviar\");\n\t\t\t\t\t\t\tbtnEnviar.setBounds(303, 96, 65, 23);\n\t\t\t\t\t\t\tbtnEnviar.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\t\tbtnEnviarActionPerformed(evt);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t{\n\t\t\t\t\t\tjpLog = new JPanel();\n\t\t\t\t\t\tjTabbedPane.addTab(\"Log\", null, jpLog, null);\n\t\t\t\t\t\tjpLog.setLayout(null);\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tjScrollPane2 = new JScrollPane();\n\t\t\t\t\t\t\tjpLog.add(jScrollPane2);\n\t\t\t\t\t\t\tjScrollPane2.setBounds(10, 11, 821, 104);\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\ttaLog = new JTextArea();\n\t\t\t\t\t\t\t\tjScrollPane2.setViewportView(taLog);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t{\n\t\t\t\tjMenuBar = new JMenuBar();\n\t\t\t\tsetJMenuBar(jMenuBar);\n\t\t\t\t{\n\t\t\t\t\tjMenu1 = new JMenu();\n\t\t\t\t\tjMenuBar.add(jMenu1);\n\t\t\t\t\tjMenu1.setText(\"Archivo\");\n\t\t\t\t\t{\n\t\t\t\t\t\tjmiOpenImage = new JMenuItem();\n\t\t\t\t\t\tjMenu1.add(jmiOpenImage);\n\t\t\t\t\t\tjmiOpenImage.setText(\"Cargar imagen local...\");\n\t\t\t\t\t\tjmiOpenImage.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\tjmiOpenImageActionPerformed(evt);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\t{\n\t\t\t\t\t\tjSeparator1 = new JSeparator();\n\t\t\t\t\t\tjMenu1.add(jSeparator1);\n\t\t\t\t\t}\n\t\t\t\t\t{\n\t\t\t\t\t\tjmiExit = new JMenuItem();\n\t\t\t\t\t\tjMenu1.add(jmiExit);\n\t\t\t\t\t\tjmiExit.setText(\"Salir\");\n\t\t\t\t\t\tjmiExit.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\tjmiExitActionPerformed(evt);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t{\n\t\t\t\t\tjMenu2 = new JMenu();\n\t\t\t\t\tjMenuBar.add(jMenu2);\n\t\t\t\t\tjMenu2.setText(\"Ayuda\");\n\t\t\t\t\t{\n\t\t\t\t\t\tjmiAcercaDe = new JMenuItem();\n\t\t\t\t\t\tjMenu2.add(jmiAcercaDe);\n\t\t\t\t\t\tjmiAcercaDe.setText(\"Acerca de...\");\n\t\t\t\t\t\tjmiAcercaDe.addActionListener(new ActionListener() {\n\t\t\t\t\t\t\tpublic void actionPerformed(ActionEvent evt) {\n\t\t\t\t\t\t\t\tjmiAcercaDeActionPerformed(evt);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\t\t\n\t\t\tpack();\n\t\t\tbtnEnviar.setDefaultCapable(true);\n\t\t\tgetRootPane().setDefaultButton(btnEnviar);\n\t\t} catch (Exception e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", e.getMessage());\n\t\t}\n\t}\n\t\n\tprivate void jButton1ActionPerformed(ActionEvent evt) {\n\t\tcanvasPaint.modoEliminarTrazo();\n\t}\n\t\n\tprivate void DibujarActionPerformed(ActionEvent evt) {\n\t\tcanvasPaint.modoPintarTrazo();\n\t}\n\n\tpublic void cerrarVentana() {\n\t\tthis.dispose();\n\t}\n\n\tpublic void mostrarVentana() {\n\t\tthis.setVisible(true);\n\t}\n\n\tprivate void actualizarListaUsuarios(Hashtable<String, Usuario> lista) {\n\t\tEnumeration<String> clientesConectados = lista.keys();\n\t\tString cliente;\n\t\tint contador = 0;\n\t\t// Borramos los elementos del panel de sesiones y los volvemos a dibujar\n\t\tjPnlUsuarios.removeAll();\n\t\tjPnlUsuarios.revalidate();\n\t\tjPnlUsuarios.repaint();\n\t\twhile (clientesConectados.hasMoreElements()) {\n\t\t\tJLabel icon = new JLabel();\n\t\t\tJLabel nombre = new JLabel();\n\t\t\tcliente = clientesConectados.nextElement();\n\t\t\t// Segun el rol, cargamos una u otra imagen\n\t\t\tif (lista.get(cliente).getRol().equals(Roles.Policia))\n\t\t\t\t\ticon.setIcon(new ImageIcon(getClass().getClassLoader().getResource(\"icons/police.png\")));\n\t\t\telse if (lista.get(cliente).getRol().equals(Roles.Sanidad))\n\t\t\t\ticon.setIcon(new ImageIcon(getClass().getClassLoader().getResource(\"icons/doctor.png\")));\n\t\t\telse\n\t\t\t\ticon.setIcon(new ImageIcon(getClass().getClassLoader().getResource(\"icons/fireman.png\")));\n\t\t\t// Colocamos el rol en el icono, junto al nombre\n\t\t\tnombre.setText(cliente);\n\t\t\tnombre.setForeground(lista.get(cliente).getColor());\n\t\t\tjPnlUsuarios.add(icon, \"0, \" + String.valueOf(contador));\n\t\t\tjPnlUsuarios.add(nombre, \"2, \" + String.valueOf(contador));\n\t\t\tcontador += 2;\n\t\t}\n\t\t// Esta llamada es necesaria para que los clientes uqe ya tenian la interfaz inicializada, refresquen sus paneles de sesiones\n\t\tjPnlUsuarios.revalidate();\n\t\tjPnlUsuarios.repaint();\n\t\t\n\t}\n\n\tprivate void btnEnviarActionPerformed(ActionEvent evt) {\n\t\ttry {\n\t\t\tthis.controlador.enviarMensajeChat(txtMensaje.getText());\n\t\t} catch (ConnectionException e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se puede establecer una conexión\");\n\t\t} catch (InvalidClientException e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Cliente de destino inválido\");\n\t\t} catch (NoSuchChannelException e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No existe el canal\");\n\t\t} catch (NoSuchClientException e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra el cliente de destino\");\n\t\t} catch (NoSuchSessionException e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra la sesión\");\n\t\t} catch (PermissionDeniedException e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Permiso denegado\");\n\t\t} catch (TimedOutException e) {\n\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Tiempo de espera agotado\");\n\t\t}\n\t\ttxtMensaje.setText(\"\");\n\t}\n\t\n\n\tpublic void iniciarSesion(String login) {\n\t\t// Cuando un cliente se conecta, se notifica su entrada en el chat al resto de clientes.\n\t\ttaChat.append(login + \" ha iniciado sesión.\\n\");\n\t\ttaChat.setCaretPosition(taChat.getDocument().getLength());\n\t}\n\t\n\tpublic void notificarLogout(String login) {\n\t\ttaChat.append(login + \" ha dejado el chat.\\n\");\n\t\ttaChat.setCaretPosition(taChat.getDocument().getLength());\t\n\t\tif (controlador.isServidor()) {\n\t\t \ttry {\n\t \t\tcontrolador.getCanalGestionListaUsuarios().sendToAll(controlador.getCliente(), new Data(controlador.getListaUsuarios()));\n\t \t\tactualizarListaUsuarios(controlador.getListaUsuarios());\n\t\t } catch (ConnectionException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se puede establecer una conexión\");\n \t\t} catch (InvalidClientException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Cliente de destino inválido\");\n \t\t} catch (NoSuchChannelException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No existe el canal\");\n \t\t} catch (NoSuchClientException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra el cliente de destino\");\n \t\t} catch (NoSuchSessionException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra la sesión\");\n \t\t} catch (PermissionDeniedException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Permiso denegado\");\n \t\t} catch (TimedOutException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Tiempo de espera agotado\");\n \t\t}\n\t\t}\n\t}\n\t\n public void setColorActual(Hashtable<String, Usuario> lista)\n {\n\t\t// Ponemos el color a este cliente\n\t\tcolorCliente = lista.get(controlador.getNombreCliente()).getColor();\n canvasPaint.setColorActual(colorCliente);\n }\n \n private void btnCargarMapaActionPerformed(ActionEvent evt) {\n \tcargarMapa();\n }\n \n private void cargarMapa() {\n \tfc = new JFileChooser();\n \t// Se establece el filtro para mostrar sólo imagenes png, jpg, jpeg y gif\n \tfc.addChoosableFileFilter(new presentacion.auxiliares.ImageFilter());\n \tfc.setAcceptAllFileFilterUsed(false);\n \t// Se pone un mensaje personalizado tanto al botón del fileChooser como a su título\n \tint valor = fc.showDialog(this, \"Cargar imagen\");\n \tif (valor == fc.APPROVE_OPTION) {\n \t File image = fc.getSelectedFile();\t \t\n\t \ttry {\n\t \t\t// Se toma la ruta del mapa local\n\t\t \tURL urlFondo = image.toURL();\n\t\t\t\t// Se dibuja el panel\n\t\t\t\tsetMapaLocal(urlFondo);\n\t\t\t\t// Se toma el objeto ImageIcon de este mapa\n\t\t\t\tmapa = handlerImagenFondoPanel.getMapaCargado();\n\t\t\t\t// Se envia este mapa al resto de clientes conectados\n\t\t\t\tcontrolador.enviarMapa(mapa);\n\t\t\t} catch (MalformedURLException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", e.getMessage());\n\t\t\t} catch (ConnectionException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se puede establecer una conexión\");\n\t\t\t} catch (InvalidClientException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Cliente de destino inválido\");\n\t\t\t} catch (NoSuchChannelException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No existe el canal\");\n\t\t\t} catch (NoSuchClientException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra el cliente de destino\");\n\t\t\t} catch (NoSuchSessionException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra la sesión\");\n\t\t\t} catch (PermissionDeniedException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Permiso denegado\");\n\t\t\t} catch (TimedOutException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Tiempo de espera agotado\");\n\t\t\t}\n \t}\t\n }\n \n private void thisWindowClosing(WindowEvent evt) {\n \tsalir();\n }\n \n private void jmiExitActionPerformed(ActionEvent evt) {\n \tsalir();\n }\n \n private void salir () {\n \tif (controlador.isServidor()) {\n \t\ttry {\n\t\t\t\tcontrolador.forzarCierre();\n\t\t\t} catch (NoRegistryException e) {\n\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", e.getMessage());\n\t\t\t } catch (ConnectionException ex) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se puede establecer una conexión\");\n\t \t\t} catch (InvalidClientException ex) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Cliente de destino inválido\");\n\t \t\t} catch (NoSuchChannelException ex) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No existe el canal\");\n\t \t\t} catch (NoSuchClientException ex) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra el cliente de destino\");\n\t \t\t} catch (NoSuchSessionException ex) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra la sesión\");\n\t \t\t} catch (PermissionDeniedException ex) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Permiso denegado\");\n\t \t\t} catch (TimedOutException ex) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Tiempo de espera agotado\");\n\t \t\t} catch (InvalidURLException e) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"URL de destino inválida\");\n\t \t\t} catch (NoSuchHostException e) {\n\t \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No existe el host de destino\");\n\t\t\t\t} catch (NotBoundException e) {\n\t\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", e.getLocalizedMessage());\n\t\t\t\t} catch (NoSuchConsumerException e) {\n\t\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No existe el consumidor\");\n\t\t\t\t} catch (NoSuchByteArrayException e) {\n\t\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", e.getLocalizedMessage());\n\t\t\t\t} catch (NoSuchTokenException e) {\n\t\t\t\t\tDialogos.mostrarDialogoError(this, \"Error\", e.getLocalizedMessage());\n\t\t\t\t}\n \t}\n \tthis.dispose();\n \tSystem.gc();\n }\n \n private void jmiOpenImageActionPerformed(ActionEvent evt) {\n \tcargarMapa();\n }\n \n private void jbtnClearActionPerformed(ActionEvent evt) {\n \t// Se envia el evento al resto de clientes\n \ttry {\n\t\t\tcontrolador.enviarTrazosClean();\n \t } catch (ConnectionException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se puede establecer una conexión\");\n \t\t} catch (InvalidClientException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Cliente de destino inválido\");\n \t\t} catch (NoSuchChannelException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No existe el canal\");\n \t\t} catch (NoSuchClientException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra el cliente de destino\");\n \t\t} catch (NoSuchSessionException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"No se encuentra la sesión\");\n \t\t} catch (PermissionDeniedException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Permiso denegado\");\n \t\t} catch (TimedOutException ex) {\n \t\t\tDialogos.mostrarDialogoError(this, \"Error\", \"Tiempo de espera agotado\");\n \t\t}\n \tthis.canvasPaint.clear();\n \tponerMensajeLog(controlador.getNombreCliente(), new InfoTrazo());\n\n }\n\n\tpublic CanvasPaint getCanvas() {\n\t\treturn canvasPaint;\n\t}\n\n\tpublic ImageIcon getMapa() {\n\t\treturn mapa;\n\t}\n\t\n\tprivate void jmiAcercaDeActionPerformed(ActionEvent evt) {\n\t\tJDAcercaDe acercaDe = new JDAcercaDe();\n\t\tacercaDe.setLocation(this.getLocation());\n\t\tacercaDe.setModal(true);\n\t\tacercaDe.show();\n\t}\n\n}\n"
}
] | 34 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.